2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 /****************************************************************************
23 Calculate the recommended read buffer size
24 ****************************************************************************/
25 static size_t cli_read_max_bufsize(struct cli_state
*cli
)
27 if (!client_is_signing_on(cli
) && !cli_encryption_on(cli
)
28 && (cli
->posix_capabilities
& CIFS_UNIX_LARGE_READ_CAP
)) {
29 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE
;
31 if (cli
->capabilities
& CAP_LARGE_READX
) {
33 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
34 : CLI_WINDOWS_MAX_LARGE_READX_SIZE
;
36 return (cli
->max_xmit
- (smb_size
+32)) & ~1023;
40 * Send a read&x request
43 struct async_req
*cli_read_andx_send(TALLOC_CTX
*mem_ctx
,
44 struct cli_state
*cli
, int fnum
,
45 off_t offset
, size_t size
)
47 struct async_req
*result
;
48 struct cli_request
*req
;
49 bool bigoffset
= False
;
52 if (size
> cli_read_max_bufsize(cli
)) {
53 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
54 "size=%d\n", (int)size
,
55 (int)cli_read_max_bufsize(cli
)));
59 result
= cli_request_new(mem_ctx
, cli
->event_ctx
, cli
, 12, 0, &req
);
61 DEBUG(0, ("cli_request_new failed\n"));
65 req
->data
.read
.ofs
= offset
;
66 req
->data
.read
.size
= size
;
67 req
->data
.read
.received
= 0;
68 req
->data
.read
.rcvbuf
= NULL
;
70 if ((SMB_BIG_UINT
)offset
>> 32)
73 cli_set_message(req
->outbuf
, bigoffset
? 12 : 10, 0, False
);
75 SCVAL(req
->outbuf
,smb_com
,SMBreadX
);
76 SSVAL(req
->outbuf
,smb_tid
,cli
->cnum
);
77 cli_setup_packet_buf(cli
, req
->outbuf
);
79 SCVAL(req
->outbuf
,smb_vwv0
,0xFF);
80 SCVAL(req
->outbuf
,smb_vwv0
+1,0);
81 SSVAL(req
->outbuf
,smb_vwv1
,0);
82 SSVAL(req
->outbuf
,smb_vwv2
,fnum
);
83 SIVAL(req
->outbuf
,smb_vwv3
,offset
);
84 SSVAL(req
->outbuf
,smb_vwv5
,size
);
85 SSVAL(req
->outbuf
,smb_vwv6
,size
);
86 SSVAL(req
->outbuf
,smb_vwv7
,(size
>> 16));
87 SSVAL(req
->outbuf
,smb_vwv8
,0);
88 SSVAL(req
->outbuf
,smb_vwv9
,0);
89 SSVAL(req
->outbuf
,smb_mid
,req
->mid
);
92 SIVAL(req
->outbuf
, smb_vwv10
,
93 (((SMB_BIG_UINT
)offset
)>>32) & 0xffffffff);
96 cli_calculate_sign_mac(cli
, req
->outbuf
);
98 event_fd_set_writeable(cli
->fd_event
);
100 if (cli_encryption_on(cli
)) {
102 status
= cli_encrypt_message(cli
, req
->outbuf
, &enc_buf
);
103 if (!NT_STATUS_IS_OK(status
)) {
104 DEBUG(0, ("Error in encrypting client message. "
105 "Error %s\n", nt_errstr(status
)));
109 req
->outbuf
= enc_buf
;
110 req
->enc_state
= cli
->trans_enc_state
;
117 * Pull the data out of a finished async read_and_x request. rcvbuf is
118 * talloced from the request, so better make sure that you copy it away before
119 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
123 NTSTATUS
cli_read_andx_recv(struct async_req
*req
, ssize_t
*received
,
126 struct cli_request
*cli_req
= cli_request_get(req
);
130 SMB_ASSERT(req
->state
>= ASYNC_REQ_DONE
);
131 if (req
->state
== ASYNC_REQ_ERROR
) {
135 status
= cli_pull_error(cli_req
->inbuf
);
137 if (NT_STATUS_IS_ERR(status
)) {
141 /* size is the number of bytes the server returned.
143 size
= SVAL(cli_req
->inbuf
, smb_vwv5
);
144 size
|= (((unsigned int)(SVAL(cli_req
->inbuf
, smb_vwv7
))) << 16);
146 if (size
> cli_req
->data
.read
.size
) {
147 DEBUG(5,("server returned more than we wanted!\n"));
148 return NT_STATUS_UNEXPECTED_IO_ERROR
;
151 *rcvbuf
= (uint8_t *)
152 (smb_base(cli_req
->inbuf
) + SVAL(cli_req
->inbuf
, smb_vwv6
));
157 struct cli_readall_state
{
158 struct cli_state
*cli
;
166 static void cli_readall_done(struct async_req
*subreq
);
168 static struct async_req
*cli_readall_send(TALLOC_CTX
*mem_ctx
,
169 struct cli_state
*cli
,
171 off_t offset
, size_t size
)
173 struct async_req
*req
, *subreq
;
174 struct cli_readall_state
*state
;
176 req
= async_req_new(mem_ctx
, cli
->event_ctx
);
180 state
= talloc(req
, struct cli_readall_state
);
185 req
->private_data
= state
;
189 state
->start_offset
= offset
;
194 subreq
= cli_read_andx_send(state
, cli
, fnum
, offset
, size
);
195 if (subreq
== NULL
) {
199 subreq
->async
.fn
= cli_readall_done
;
200 subreq
->async
.priv
= req
;
204 static void cli_readall_done(struct async_req
*subreq
)
206 struct async_req
*req
= talloc_get_type_abort(
207 subreq
->async
.priv
, struct async_req
);
208 struct cli_readall_state
*state
= talloc_get_type_abort(
209 req
->private_data
, struct cli_readall_state
);
214 status
= cli_read_andx_recv(subreq
, &received
, &buf
);
215 if (!NT_STATUS_IS_OK(status
)) {
216 async_req_error(req
, status
);
226 if ((state
->received
== 0) && (received
== state
->size
)) {
227 /* Ideal case: Got it all in one run */
229 state
->received
+= received
;
235 * We got a short read, issue a read for the
236 * rest. Unfortunately we have to allocate the buffer
237 * ourselves now, as our caller expects to receive a single
238 * buffer. cli_read_andx does it from the buffer received from
239 * the net, but with a short read we have to put it together
240 * from several reads.
243 if (state
->buf
== NULL
) {
244 state
->buf
= talloc_array(state
, uint8_t, state
->size
);
245 if (async_req_nomem(state
->buf
, req
)) {
249 memcpy(state
->buf
+ state
->received
, buf
, received
);
250 state
->received
+= received
;
254 if (state
->received
>= state
->size
) {
259 subreq
= cli_read_andx_send(state
, state
->cli
, state
->fnum
,
260 state
->start_offset
+ state
->received
,
261 state
->size
- state
->received
);
262 if (async_req_nomem(subreq
, req
)) {
265 subreq
->async
.fn
= cli_readall_done
;
266 subreq
->async
.priv
= req
;
269 static NTSTATUS
cli_readall_recv(struct async_req
*req
, ssize_t
*received
,
272 struct cli_readall_state
*state
= talloc_get_type_abort(
273 req
->private_data
, struct cli_readall_state
);
275 SMB_ASSERT(req
->state
>= ASYNC_REQ_DONE
);
276 if (req
->state
== ASYNC_REQ_ERROR
) {
279 *received
= state
->received
;
280 *rcvbuf
= state
->buf
;
285 * Parallel read support.
287 * cli_pull sends as many read&x requests as the server would allow via
288 * max_mux at a time. When replies flow back in, the data is written into
289 * the callback function "sink" in the right order.
292 struct cli_pull_subreq
{
293 struct async_req
*req
;
298 struct cli_pull_state
{
299 struct async_req
*req
;
301 struct cli_state
*cli
;
306 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
);
312 * Outstanding requests
315 struct cli_pull_subreq
*reqs
;
318 * For how many bytes did we send requests already?
323 * Next request index to push into "sink". This walks around the "req"
324 * array, taking care that the requests are pushed to "sink" in the
325 * right order. If necessary (i.e. replies don't come in in the right
326 * order), replies are held back in "reqs".
331 * How many bytes did we push into "sink"?
337 static char *cli_pull_print(TALLOC_CTX
*mem_ctx
, struct async_req
*req
)
339 struct cli_pull_state
*state
= talloc_get_type_abort(
340 req
->private_data
, struct cli_pull_state
);
343 result
= async_req_print(mem_ctx
, req
);
344 if (result
== NULL
) {
348 return talloc_asprintf_append_buffer(
349 result
, "num_reqs=%d, top_req=%d",
350 state
->num_reqs
, state
->top_req
);
353 static void cli_pull_read_done(struct async_req
*read_req
);
356 * Prepare an async pull request
359 struct async_req
*cli_pull_send(TALLOC_CTX
*mem_ctx
, struct cli_state
*cli
,
360 uint16_t fnum
, off_t start_offset
,
361 SMB_OFF_T size
, size_t window_size
,
362 NTSTATUS (*sink
)(char *buf
, size_t n
,
366 struct async_req
*result
;
367 struct cli_pull_state
*state
;
370 result
= async_req_new(mem_ctx
, cli
->event_ctx
);
371 if (result
== NULL
) {
374 state
= talloc(result
, struct cli_pull_state
);
378 result
->private_data
= state
;
379 result
->print
= cli_pull_print
;
384 state
->start_offset
= start_offset
;
393 if (!async_post_status(result
, NT_STATUS_OK
)) {
399 state
->chunk_size
= cli_read_max_bufsize(cli
);
401 state
->num_reqs
= MAX(window_size
/state
->chunk_size
, 1);
402 state
->num_reqs
= MIN(state
->num_reqs
, cli
->max_mux
);
404 state
->reqs
= TALLOC_ZERO_ARRAY(state
, struct cli_pull_subreq
,
406 if (state
->reqs
== NULL
) {
410 state
->requested
= 0;
412 for (i
=0; i
<state
->num_reqs
; i
++) {
414 size_t request_thistime
;
416 if (state
->requested
>= size
) {
421 size_left
= size
- state
->requested
;
422 request_thistime
= MIN(size_left
, state
->chunk_size
);
424 state
->reqs
[i
].req
= cli_readall_send(
425 state
->reqs
, cli
, fnum
,
426 state
->start_offset
+ state
->requested
,
429 if (state
->reqs
[i
].req
== NULL
) {
433 state
->reqs
[i
].req
->async
.fn
= cli_pull_read_done
;
434 state
->reqs
[i
].req
->async
.priv
= result
;
436 state
->requested
+= request_thistime
;
446 * Handle incoming read replies, push the data into sink and send out new
447 * requests if necessary.
450 static void cli_pull_read_done(struct async_req
*read_req
)
452 struct async_req
*pull_req
= talloc_get_type_abort(
453 read_req
->async
.priv
, struct async_req
);
454 struct cli_pull_state
*state
= talloc_get_type_abort(
455 pull_req
->private_data
, struct cli_pull_state
);
461 status
= cli_readall_recv(read_req
, &received
, &buf
);
462 if (!NT_STATUS_IS_OK(status
)) {
463 async_req_error(state
->req
, status
);
467 for (i
=0; i
<state
->num_reqs
; i
++) {
468 if (state
->reqs
[i
].req
== read_req
) {
473 if (i
== state
->num_reqs
) {
474 /* Got something we did not send. Just drop it. */
475 TALLOC_FREE(read_req
);
479 state
->reqs
[i
].received
= received
;
480 state
->reqs
[i
].buf
= buf
;
483 * This loop is the one to take care of out-of-order replies. All
484 * pending requests are in state->reqs, state->reqs[top_req] is the
485 * one that is to be pushed next. If however a request later than
486 * top_req is replied to, then we can't push yet. If top_req is
487 * replied to at a later point then, we need to push all the finished
491 while (state
->reqs
[state
->top_req
].req
!= NULL
) {
492 struct cli_pull_subreq
*top_read
;
494 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
497 if (state
->reqs
[state
->top_req
].req
->state
< ASYNC_REQ_DONE
) {
498 DEBUG(11, ("cli_pull_read_done: top request not yet "
503 top_read
= &state
->reqs
[state
->top_req
];
505 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
506 "pushed\n", (int)top_read
->received
,
507 (int)state
->pushed
));
509 status
= state
->sink((char *)top_read
->buf
, top_read
->received
,
511 if (!NT_STATUS_IS_OK(status
)) {
512 async_req_error(state
->req
, status
);
515 state
->pushed
+= top_read
->received
;
517 TALLOC_FREE(state
->reqs
[state
->top_req
].req
);
519 if (state
->requested
< state
->size
) {
520 struct async_req
*new_req
;
522 size_t request_thistime
;
524 size_left
= state
->size
- state
->requested
;
525 request_thistime
= MIN(size_left
, state
->chunk_size
);
527 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
528 "at %d, position %d\n",
529 (int)request_thistime
,
530 (int)(state
->start_offset
534 new_req
= cli_readall_send(
535 state
->reqs
, state
->cli
, state
->fnum
,
536 state
->start_offset
+ state
->requested
,
539 if (async_req_nomem(new_req
, state
->req
)) {
543 new_req
->async
.fn
= cli_pull_read_done
;
544 new_req
->async
.priv
= pull_req
;
546 state
->reqs
[state
->top_req
].req
= new_req
;
547 state
->requested
+= request_thistime
;
550 state
->top_req
= (state
->top_req
+1) % state
->num_reqs
;
553 async_req_done(pull_req
);
556 NTSTATUS
cli_pull_recv(struct async_req
*req
, SMB_OFF_T
*received
)
558 struct cli_pull_state
*state
= talloc_get_type_abort(
559 req
->private_data
, struct cli_pull_state
);
561 SMB_ASSERT(req
->state
>= ASYNC_REQ_DONE
);
562 if (req
->state
== ASYNC_REQ_ERROR
) {
565 *received
= state
->pushed
;
569 NTSTATUS
cli_pull(struct cli_state
*cli
, uint16_t fnum
,
570 off_t start_offset
, SMB_OFF_T size
, size_t window_size
,
571 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
),
572 void *priv
, SMB_OFF_T
*received
)
574 TALLOC_CTX
*frame
= talloc_stackframe();
575 struct async_req
*req
;
576 NTSTATUS result
= NT_STATUS_NO_MEMORY
;
578 if (cli_tmp_event_ctx(frame
, cli
) == NULL
) {
582 req
= cli_pull_send(frame
, cli
, fnum
, start_offset
, size
, window_size
,
588 while (req
->state
< ASYNC_REQ_DONE
) {
589 event_loop_once(cli
->event_ctx
);
592 result
= cli_pull_recv(req
, received
);
598 static NTSTATUS
cli_read_sink(char *buf
, size_t n
, void *priv
)
600 char **pbuf
= (char **)priv
;
601 memcpy(*pbuf
, buf
, n
);
606 ssize_t
cli_read(struct cli_state
*cli
, int fnum
, char *buf
,
607 off_t offset
, size_t size
)
612 status
= cli_pull(cli
, fnum
, offset
, size
, size
,
613 cli_read_sink
, &buf
, &ret
);
614 if (!NT_STATUS_IS_OK(status
)) {
615 cli_set_error(cli
, status
);
621 /****************************************************************************
622 Issue a single SMBwrite and don't wait for a reply.
623 ****************************************************************************/
625 static bool cli_issue_write(struct cli_state
*cli
,
634 bool large_writex
= false;
635 /* We can only do direct writes if not signing and not encrypting. */
636 bool direct_writes
= !client_is_signing_on(cli
) && !cli_encryption_on(cli
);
638 if (!direct_writes
&& size
+ 1 > cli
->bufsize
) {
639 cli
->outbuf
= (char *)SMB_REALLOC(cli
->outbuf
, size
+ 1024);
643 cli
->inbuf
= (char *)SMB_REALLOC(cli
->inbuf
, size
+ 1024);
644 if (cli
->inbuf
== NULL
) {
645 SAFE_FREE(cli
->outbuf
);
648 cli
->bufsize
= size
+ 1024;
651 memset(cli
->outbuf
,'\0',smb_size
);
652 memset(cli
->inbuf
,'\0',smb_size
);
654 if (cli
->capabilities
& CAP_LARGE_FILES
) {
659 cli_set_message(cli
->outbuf
,14,0,True
);
661 cli_set_message(cli
->outbuf
,12,0,True
);
664 SCVAL(cli
->outbuf
,smb_com
,SMBwriteX
);
665 SSVAL(cli
->outbuf
,smb_tid
,cli
->cnum
);
666 cli_setup_packet(cli
);
668 SCVAL(cli
->outbuf
,smb_vwv0
,0xFF);
669 SSVAL(cli
->outbuf
,smb_vwv2
,fnum
);
671 SIVAL(cli
->outbuf
,smb_vwv3
,offset
);
672 SIVAL(cli
->outbuf
,smb_vwv5
,0);
673 SSVAL(cli
->outbuf
,smb_vwv7
,mode
);
675 SSVAL(cli
->outbuf
,smb_vwv8
,(mode
& 0x0008) ? size
: 0);
677 * According to CIFS-TR-1p00, this following field should only
678 * be set if CAP_LARGE_WRITEX is set. We should check this
679 * locally. However, this check might already have been
680 * done by our callers.
682 SSVAL(cli
->outbuf
,smb_vwv9
,(size
>>16));
683 SSVAL(cli
->outbuf
,smb_vwv10
,size
);
684 /* +1 is pad byte. */
685 SSVAL(cli
->outbuf
,smb_vwv11
,
686 smb_buf(cli
->outbuf
) - smb_base(cli
->outbuf
) + 1);
689 SIVAL(cli
->outbuf
,smb_vwv12
,(((SMB_BIG_UINT
)offset
)>>32) & 0xffffffff);
692 p
= smb_base(cli
->outbuf
) + SVAL(cli
->outbuf
,smb_vwv11
) -1;
693 *p
++ = '\0'; /* pad byte. */
694 if (!direct_writes
) {
695 memcpy(p
, buf
, size
);
697 if (size
> 0x1FFFF) {
698 /* This is a POSIX 14 word large write. */
699 set_message_bcc(cli
->outbuf
, 0); /* Set bcc to zero. */
700 _smb_setlen_large(cli
->outbuf
,smb_size
+ 28 + 1 /* pad */ + size
- 4);
702 cli_setup_bcc(cli
, p
+size
);
705 SSVAL(cli
->outbuf
,smb_mid
,cli
->mid
+ i
);
707 show_msg(cli
->outbuf
);
709 /* For direct writes we now need to write the data
710 * directly out of buf. */
711 return cli_send_smb_direct_writeX(cli
, buf
, size
);
713 return cli_send_smb(cli
);
717 /****************************************************************************
719 write_mode: 0x0001 disallow write cacheing
720 0x0002 return bytes remaining
721 0x0004 use raw named pipe protocol
722 0x0008 start of message mode named pipe protocol
723 ****************************************************************************/
725 ssize_t
cli_write(struct cli_state
*cli
,
726 int fnum
, uint16 write_mode
,
727 const char *buf
, off_t offset
, size_t size
)
729 ssize_t bwritten
= 0;
730 unsigned int issued
= 0;
731 unsigned int received
= 0;
736 if(cli
->max_mux
> 1) {
737 mpx
= cli
->max_mux
-1;
742 /* Default (small) writesize. */
743 writesize
= (cli
->max_xmit
- (smb_size
+32)) & ~1023;
745 if (write_mode
== 0 &&
746 !client_is_signing_on(cli
) &&
747 !cli_encryption_on(cli
) &&
748 (cli
->posix_capabilities
& CIFS_UNIX_LARGE_WRITE_CAP
) &&
749 (cli
->capabilities
& CAP_LARGE_FILES
)) {
750 /* Only do massive writes if we can do them direct
751 * with no signing or encrypting - not on a pipe. */
752 writesize
= CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE
;
753 } else if ((cli
->capabilities
& CAP_LARGE_WRITEX
) &&
754 (strcmp(cli
->dev
, "LPT1:") != 0)) {
756 /* Printer devices are restricted to max_xmit
757 * writesize in Vista and XPSP3. */
760 writesize
= CLI_SAMBA_MAX_LARGE_WRITEX_SIZE
;
761 } else if (!client_is_signing_on(cli
)) {
762 /* Windows restricts signed writes to max_xmit.
763 * Found by Volker. */
764 writesize
= CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE
;
768 blocks
= (size
+ (writesize
-1)) / writesize
;
770 while (received
< blocks
) {
772 while ((issued
- received
< mpx
) && (issued
< blocks
)) {
773 ssize_t bsent
= issued
* writesize
;
774 ssize_t size1
= MIN(writesize
, size
- bsent
);
776 if (!cli_issue_write(cli
, fnum
, offset
+ bsent
,
784 if (!cli_receive_smb(cli
)) {
790 if (cli_is_error(cli
))
793 bwritten
+= SVAL(cli
->inbuf
, smb_vwv2
);
794 if (writesize
> 0xFFFF) {
795 bwritten
+= (((int)(SVAL(cli
->inbuf
, smb_vwv4
)))<<16);
799 while (received
< issued
&& cli_receive_smb(cli
)) {
806 /****************************************************************************
807 write to a file using a SMBwrite and not bypassing 0 byte writes
808 ****************************************************************************/
810 ssize_t
cli_smbwrite(struct cli_state
*cli
,
811 int fnum
, char *buf
, off_t offset
, size_t size1
)
817 size_t size
= MIN(size1
, cli
->max_xmit
- 48);
819 memset(cli
->outbuf
,'\0',smb_size
);
820 memset(cli
->inbuf
,'\0',smb_size
);
822 cli_set_message(cli
->outbuf
,5, 0,True
);
824 SCVAL(cli
->outbuf
,smb_com
,SMBwrite
);
825 SSVAL(cli
->outbuf
,smb_tid
,cli
->cnum
);
826 cli_setup_packet(cli
);
828 SSVAL(cli
->outbuf
,smb_vwv0
,fnum
);
829 SSVAL(cli
->outbuf
,smb_vwv1
,size
);
830 SIVAL(cli
->outbuf
,smb_vwv2
,offset
);
831 SSVAL(cli
->outbuf
,smb_vwv4
,0);
833 p
= smb_buf(cli
->outbuf
);
835 SSVAL(p
, 0, size
); p
+= 2;
836 memcpy(p
, buf
+ total
, size
); p
+= size
;
838 cli_setup_bcc(cli
, p
);
840 if (!cli_send_smb(cli
))
843 if (!cli_receive_smb(cli
))
846 if (cli_is_error(cli
))
849 size
= SVAL(cli
->inbuf
,smb_vwv0
);