2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 * Fetch an error out of a NBT packet
24 * @param[in] buf The SMB packet
25 * @retval The error, converted to NTSTATUS
28 NTSTATUS
cli_pull_error(char *buf
)
30 uint32_t flags2
= SVAL(buf
, smb_flg2
);
32 if (flags2
& FLAGS2_32_BIT_ERROR_CODES
) {
33 return NT_STATUS(IVAL(buf
, smb_rcls
));
36 /* if the client uses dos errors, but there is no error,
37 we should return no error here, otherwise it looks
38 like an unknown bad NT_STATUS. jmcd */
39 if (CVAL(buf
, smb_rcls
) == 0)
42 return NT_STATUS_DOS(CVAL(buf
, smb_rcls
), SVAL(buf
,smb_err
));
46 * Compatibility helper for the sync APIs: Fake NTSTATUS in cli->inbuf
47 * @param[in] cli The client connection that just received an error
48 * @param[in] status The error to set on "cli"
51 void cli_set_error(struct cli_state
*cli
, NTSTATUS status
)
53 uint32_t flags2
= SVAL(cli
->inbuf
, smb_flg2
);
55 if (NT_STATUS_IS_DOS(status
)) {
56 SSVAL(cli
->inbuf
, smb_flg2
,
57 flags2
& ~FLAGS2_32_BIT_ERROR_CODES
);
58 SCVAL(cli
->inbuf
, smb_rcls
, NT_STATUS_DOS_CLASS(status
));
59 SSVAL(cli
->inbuf
, smb_err
, NT_STATUS_DOS_CODE(status
));
63 SSVAL(cli
->inbuf
, smb_flg2
, flags2
| FLAGS2_32_BIT_ERROR_CODES
);
64 SIVAL(cli
->inbuf
, smb_rcls
, NT_STATUS_V(status
));
69 * @brief Find the smb_cmd offset of the last command pushed
70 * @param[in] buf The buffer we're building up
71 * @retval Where can we put our next andx cmd?
73 * While chaining requests, the "next" request we're looking at needs to put
74 * its SMB_Command before the data the previous request already built up added
75 * to the chain. Find the offset to the place where we have to put our cmd.
78 static bool find_andx_cmd_ofs(uint8_t *buf
, size_t *pofs
)
83 cmd
= CVAL(buf
, smb_com
);
85 SMB_ASSERT(is_andx_req(cmd
));
89 while (CVAL(buf
, ofs
) != 0xff) {
91 if (!is_andx_req(CVAL(buf
, ofs
))) {
96 * ofs is from start of smb header, so add the 4 length
97 * bytes. The next cmd is right after the wct field.
99 ofs
= SVAL(buf
, ofs
+2) + 4 + 1;
101 SMB_ASSERT(ofs
+4 < talloc_get_size(buf
));
109 * @brief Do the smb chaining at a buffer level
110 * @param[in] poutbuf Pointer to the talloc'ed buffer to be modified
111 * @param[in] smb_command The command that we want to issue
112 * @param[in] wct How many words?
113 * @param[in] vwv The words, already in network order
114 * @param[in] bytes_alignment How shall we align "bytes"?
115 * @param[in] num_bytes How many bytes?
116 * @param[in] bytes The data the request ships
118 * smb_splice_chain() adds the vwv and bytes to the request already present in
122 bool smb_splice_chain(uint8_t **poutbuf
, uint8_t smb_command
,
123 uint8_t wct
, const uint16_t *vwv
,
124 size_t bytes_alignment
,
125 uint32_t num_bytes
, const uint8_t *bytes
)
128 size_t old_size
, new_size
;
130 size_t chain_padding
= 0;
131 size_t bytes_padding
= 0;
134 old_size
= talloc_get_size(*poutbuf
);
137 * old_size == smb_wct means we're pushing the first request in for
141 first_request
= (old_size
== smb_wct
);
143 if (!first_request
&& ((old_size
% 4) != 0)) {
145 * Align the wct field of subsequent requests to a 4-byte
148 chain_padding
= 4 - (old_size
% 4);
152 * After the old request comes the new wct field (1 byte), the vwv's
153 * and the num_bytes field. After at we might need to align the bytes
154 * given to us to "bytes_alignment", increasing the num_bytes value.
157 new_size
= old_size
+ chain_padding
+ 1 + wct
* sizeof(uint16_t) + 2;
159 if ((bytes_alignment
!= 0) && ((new_size
% bytes_alignment
) != 0)) {
160 bytes_padding
= bytes_alignment
- (new_size
% bytes_alignment
);
163 new_size
+= bytes_padding
+ num_bytes
;
165 if ((smb_command
!= SMBwriteX
) && (new_size
> 0xffff)) {
166 DEBUG(1, ("splice_chain: %u bytes won't fit\n",
167 (unsigned)new_size
));
171 outbuf
= TALLOC_REALLOC_ARRAY(NULL
, *poutbuf
, uint8_t, new_size
);
172 if (outbuf
== NULL
) {
173 DEBUG(0, ("talloc failed\n"));
179 SCVAL(outbuf
, smb_com
, smb_command
);
183 if (!find_andx_cmd_ofs(outbuf
, &andx_cmd_ofs
)) {
184 DEBUG(1, ("invalid command chain\n"));
185 *poutbuf
= TALLOC_REALLOC_ARRAY(
186 NULL
, *poutbuf
, uint8_t, old_size
);
190 if (chain_padding
!= 0) {
191 memset(outbuf
+ old_size
, 0, chain_padding
);
192 old_size
+= chain_padding
;
195 SCVAL(outbuf
, andx_cmd_ofs
, smb_command
);
196 SSVAL(outbuf
, andx_cmd_ofs
+ 2, old_size
- 4);
202 * Push the chained request:
207 SCVAL(outbuf
, ofs
, wct
);
214 memcpy(outbuf
+ ofs
, vwv
, sizeof(uint16_t) * wct
);
215 ofs
+= sizeof(uint16_t) * wct
;
221 SSVAL(outbuf
, ofs
, num_bytes
+ bytes_padding
);
222 ofs
+= sizeof(uint16_t);
228 if (bytes_padding
!= 0) {
229 memset(outbuf
+ ofs
, 0, bytes_padding
);
230 ofs
+= bytes_padding
;
237 memcpy(outbuf
+ ofs
, bytes
, num_bytes
);
243 * Figure out if there is an andx command behind the current one
244 * @param[in] buf The smb buffer to look at
245 * @param[in] ofs The offset to the wct field that is followed by the cmd
246 * @retval Is there a command following?
249 static bool have_andx_command(const char *buf
, uint16_t ofs
)
252 size_t buflen
= talloc_get_size(buf
);
254 if ((ofs
== buflen
-1) || (ofs
== buflen
)) {
258 wct
= CVAL(buf
, ofs
);
261 * Not enough space for the command and a following pointer
265 return (CVAL(buf
, ofs
+1) != 0xff);
268 #define MAX_SMB_IOV 5
270 struct cli_smb_state
{
271 struct tevent_context
*ev
;
272 struct cli_state
*cli
;
273 uint8_t header
[smb_wct
+1]; /* Space for the header including the wct */
276 * For normal requests, cli_smb_req_send chooses a mid. Secondary
277 * trans requests need to use the mid of the primary request, so we
278 * need a place to store it. Assume it's set if != 0.
283 uint8_t bytecount_buf
[2];
285 struct iovec iov
[MAX_SMB_IOV
+3];
291 struct tevent_req
**chained_requests
;
294 static uint16_t cli_alloc_mid(struct cli_state
*cli
)
296 int num_pending
= talloc_array_length(cli
->pending
);
303 if ((result
== 0) || (result
== 0xffff)) {
307 for (i
=0; i
<num_pending
; i
++) {
308 if (result
== cli_smb_req_mid(cli
->pending
[i
])) {
313 if (i
== num_pending
) {
319 void cli_smb_req_unset_pending(struct tevent_req
*req
)
321 struct cli_smb_state
*state
= tevent_req_data(
322 req
, struct cli_smb_state
);
323 struct cli_state
*cli
= state
->cli
;
324 int num_pending
= talloc_array_length(cli
->pending
);
327 if (num_pending
== 1) {
329 * The pending read_smb tevent_req is a child of
330 * cli->pending. So if nothing is pending anymore, we need to
331 * delete the socket read fde.
333 TALLOC_FREE(cli
->pending
);
337 for (i
=0; i
<num_pending
; i
++) {
338 if (req
== cli
->pending
[i
]) {
342 if (i
== num_pending
) {
344 * Something's seriously broken. Just returning here is the
345 * right thing nevertheless, the point of this routine is to
346 * remove ourselves from cli->pending.
352 * Remove ourselves from the cli->pending array
354 if (num_pending
> 1) {
355 cli
->pending
[i
] = cli
->pending
[num_pending
-1];
359 * No NULL check here, we're shrinking by sizeof(void *), and
360 * talloc_realloc just adjusts the size for this.
362 cli
->pending
= talloc_realloc(NULL
, cli
->pending
, struct tevent_req
*,
367 static int cli_smb_req_destructor(struct tevent_req
*req
)
369 cli_smb_req_unset_pending(req
);
373 static void cli_smb_received(struct tevent_req
*subreq
);
375 bool cli_smb_req_set_pending(struct tevent_req
*req
)
377 struct cli_smb_state
*state
= tevent_req_data(
378 req
, struct cli_smb_state
);
379 struct cli_state
*cli
;
380 struct tevent_req
**pending
;
382 struct tevent_req
*subreq
;
385 num_pending
= talloc_array_length(cli
->pending
);
387 pending
= talloc_realloc(cli
, cli
->pending
, struct tevent_req
*,
389 if (pending
== NULL
) {
392 pending
[num_pending
] = req
;
393 cli
->pending
= pending
;
394 talloc_set_destructor(req
, cli_smb_req_destructor
);
396 if (num_pending
> 0) {
401 * We're the first ones, add the read_smb request that waits for the
402 * answer from the server
404 subreq
= read_smb_send(cli
->pending
, state
->ev
, cli
->fd
);
405 if (subreq
== NULL
) {
406 cli_smb_req_unset_pending(req
);
409 tevent_req_set_callback(subreq
, cli_smb_received
, cli
);
414 * Fetch a smb request's mid. Only valid after the request has been sent by
415 * cli_smb_req_send().
417 uint16_t cli_smb_req_mid(struct tevent_req
*req
)
419 struct cli_smb_state
*state
= tevent_req_data(
420 req
, struct cli_smb_state
);
421 return SVAL(state
->header
, smb_mid
);
424 void cli_smb_req_set_mid(struct tevent_req
*req
, uint16_t mid
)
426 struct cli_smb_state
*state
= tevent_req_data(
427 req
, struct cli_smb_state
);
431 static size_t iov_len(const struct iovec
*iov
, int count
)
435 for (i
=0; i
<count
; i
++) {
436 result
+= iov
[i
].iov_len
;
441 static uint8_t *iov_concat(TALLOC_CTX
*mem_ctx
, const struct iovec
*iov
,
444 size_t len
= iov_len(iov
, count
);
449 buf
= talloc_array(mem_ctx
, uint8_t, len
);
454 for (i
=0; i
<count
; i
++) {
455 memcpy(buf
+copied
, iov
[i
].iov_base
, iov
[i
].iov_len
);
456 copied
+= iov
[i
].iov_len
;
461 struct tevent_req
*cli_smb_req_create(TALLOC_CTX
*mem_ctx
,
462 struct event_context
*ev
,
463 struct cli_state
*cli
,
465 uint8_t additional_flags
,
466 uint8_t wct
, uint16_t *vwv
,
468 struct iovec
*bytes_iov
)
470 struct tevent_req
*result
;
471 struct cli_smb_state
*state
;
473 if (iov_count
> MAX_SMB_IOV
) {
475 * Should not happen :-)
480 result
= tevent_req_create(mem_ctx
, &state
, struct cli_smb_state
);
481 if (result
== NULL
) {
486 state
->mid
= 0; /* Set to auto-choose in cli_smb_req_send */
487 state
->chain_num
= 0;
488 state
->chained_requests
= NULL
;
490 cli_setup_packet_buf(cli
, (char *)state
->header
);
491 SCVAL(state
->header
, smb_com
, smb_command
);
492 SSVAL(state
->header
, smb_tid
, cli
->cnum
);
493 SCVAL(state
->header
, smb_wct
, wct
);
497 SSVAL(state
->bytecount_buf
, 0, iov_len(bytes_iov
, iov_count
));
499 state
->iov
[0].iov_base
= state
->header
;
500 state
->iov
[0].iov_len
= sizeof(state
->header
);
501 state
->iov
[1].iov_base
= state
->vwv
;
502 state
->iov
[1].iov_len
= wct
* sizeof(uint16_t);
503 state
->iov
[2].iov_base
= state
->bytecount_buf
;
504 state
->iov
[2].iov_len
= sizeof(uint16_t);
506 if (iov_count
!= 0) {
507 memcpy(&state
->iov
[3], bytes_iov
,
508 iov_count
* sizeof(*bytes_iov
));
510 state
->iov_count
= iov_count
+ 3;
515 static bool cli_signv(struct cli_state
*cli
, struct iovec
*iov
, int count
,
521 * Obvious optimization: Make cli_calculate_sign_mac work with struct
522 * iovec directly. MD5Update would do that just fine.
525 if ((count
<= 0) || (iov
[0].iov_len
< smb_wct
)) {
529 buf
= iov_concat(talloc_tos(), iov
, count
);
534 cli_calculate_sign_mac(cli
, (char *)buf
, seqnum
);
535 memcpy(iov
[0].iov_base
, buf
, iov
[0].iov_len
);
541 static void cli_smb_sent(struct tevent_req
*subreq
);
543 static bool cli_smb_req_iov_send(struct tevent_req
*req
,
544 struct cli_smb_state
*state
,
545 struct iovec
*iov
, int iov_count
)
547 struct tevent_req
*subreq
;
549 if (iov
[0].iov_len
< smb_wct
) {
553 if (state
->mid
!= 0) {
554 SSVAL(iov
[0].iov_base
, smb_mid
, state
->mid
);
556 SSVAL(iov
[0].iov_base
, smb_mid
, cli_alloc_mid(state
->cli
));
559 smb_setlen((char *)iov
[0].iov_base
, iov_len(iov
, iov_count
) - 4);
561 if (!cli_signv(state
->cli
, iov
, iov_count
, &state
->seqnum
)) {
565 if (cli_encryption_on(state
->cli
)) {
569 buf
= (char *)iov_concat(talloc_tos(), iov
, iov_count
);
573 status
= cli_encrypt_message(state
->cli
, (char *)buf
,
576 if (!NT_STATUS_IS_OK(status
)) {
577 DEBUG(0, ("Error in encrypting client message: %s\n",
581 buf
= (char *)talloc_memdup(state
, enc_buf
,
587 iov
[0].iov_base
= buf
;
588 iov
[0].iov_len
= talloc_get_size(buf
);
589 subreq
= writev_send(state
, state
->ev
, state
->cli
->outgoing
,
590 state
->cli
->fd
, iov
, 1);
592 subreq
= writev_send(state
, state
->ev
, state
->cli
->outgoing
,
593 state
->cli
->fd
, iov
, iov_count
);
595 if (subreq
== NULL
) {
598 tevent_req_set_callback(subreq
, cli_smb_sent
, req
);
602 bool cli_smb_req_send(struct tevent_req
*req
)
604 struct cli_smb_state
*state
= tevent_req_data(
605 req
, struct cli_smb_state
);
607 return cli_smb_req_iov_send(req
, state
, state
->iov
, state
->iov_count
);
610 struct tevent_req
*cli_smb_send(TALLOC_CTX
*mem_ctx
,
611 struct event_context
*ev
,
612 struct cli_state
*cli
,
614 uint8_t additional_flags
,
615 uint8_t wct
, uint16_t *vwv
,
617 const uint8_t *bytes
)
619 struct tevent_req
*req
;
622 iov
.iov_base
= CONST_DISCARD(char *, bytes
);
623 iov
.iov_len
= num_bytes
;
625 req
= cli_smb_req_create(mem_ctx
, ev
, cli
, smb_command
,
626 additional_flags
, wct
, vwv
, 1, &iov
);
630 if (!cli_smb_req_send(req
)) {
636 static void cli_smb_sent(struct tevent_req
*subreq
)
638 struct tevent_req
*req
= tevent_req_callback_data(
639 subreq
, struct tevent_req
);
640 struct cli_smb_state
*state
= tevent_req_data(
641 req
, struct cli_smb_state
);
645 nwritten
= writev_recv(subreq
, &err
);
647 if (nwritten
== -1) {
648 tevent_req_nterror(req
, map_nt_error_from_unix(err
));
652 switch (CVAL(state
->header
, smb_com
)) {
658 tevent_req_done(req
);
661 if ((CVAL(state
->header
, smb_wct
) == 8) &&
662 (CVAL(state
->vwv
+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE
)) {
664 tevent_req_done(req
);
669 if (!cli_smb_req_set_pending(req
)) {
670 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
675 static void cli_smb_received(struct tevent_req
*subreq
)
677 struct cli_state
*cli
= tevent_req_callback_data(
678 subreq
, struct cli_state
);
679 struct tevent_req
*req
;
680 struct cli_smb_state
*state
;
681 struct tevent_context
*ev
;
689 received
= read_smb_recv(subreq
, talloc_tos(), &inbuf
, &err
);
691 if (received
== -1) {
692 status
= map_nt_error_from_unix(err
);
696 if ((IVAL(inbuf
, 4) != 0x424d53ff) /* 0xFF"SMB" */
697 && (SVAL(inbuf
, 4) != 0x45ff)) /* 0xFF"E" */ {
698 DEBUG(10, ("Got non-SMB PDU\n"));
699 status
= NT_STATUS_INVALID_NETWORK_RESPONSE
;
703 if (cli_encryption_on(cli
) && (CVAL(inbuf
, 0) == 0)) {
704 uint16_t enc_ctx_num
;
706 status
= get_enc_ctx_num(inbuf
, &enc_ctx_num
);
707 if (!NT_STATUS_IS_OK(status
)) {
708 DEBUG(10, ("get_enc_ctx_num returned %s\n",
713 if (enc_ctx_num
!= cli
->trans_enc_state
->enc_ctx_num
) {
714 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
716 cli
->trans_enc_state
->enc_ctx_num
));
717 status
= NT_STATUS_INVALID_HANDLE
;
721 status
= common_decrypt_buffer(cli
->trans_enc_state
,
723 if (!NT_STATUS_IS_OK(status
)) {
724 DEBUG(10, ("common_decrypt_buffer returned %s\n",
730 mid
= SVAL(inbuf
, smb_mid
);
731 num_pending
= talloc_array_length(cli
->pending
);
733 for (i
=0; i
<num_pending
; i
++) {
734 if (mid
== cli_smb_req_mid(cli
->pending
[i
])) {
738 if (i
== num_pending
) {
739 /* Dump unexpected reply */
744 req
= cli
->pending
[i
];
745 state
= tevent_req_data(req
, struct cli_smb_state
);
748 if (!cli_check_sign_mac(cli
, (char *)inbuf
, state
->seqnum
+1)) {
749 DEBUG(10, ("cli_check_sign_mac failed\n"));
751 status
= NT_STATUS_ACCESS_DENIED
;
755 if (state
->chained_requests
== NULL
) {
756 state
->inbuf
= talloc_move(state
, &inbuf
);
757 talloc_set_destructor(req
, NULL
);
758 cli_smb_req_destructor(req
);
759 tevent_req_done(req
);
761 struct tevent_req
**chain
= talloc_move(
762 talloc_tos(), &state
->chained_requests
);
763 int num_chained
= talloc_array_length(chain
);
765 for (i
=0; i
<num_chained
; i
++) {
766 state
= tevent_req_data(chain
[i
], struct
768 state
->inbuf
= inbuf
;
769 state
->chain_num
= i
;
770 tevent_req_done(chain
[i
]);
776 if (talloc_array_length(cli
->pending
) > 0) {
778 * Set up another read request for the other pending cli_smb
781 state
= tevent_req_data(cli
->pending
[0], struct cli_smb_state
);
782 subreq
= read_smb_send(cli
->pending
, state
->ev
, cli
->fd
);
783 if (subreq
== NULL
) {
784 status
= NT_STATUS_NO_MEMORY
;
787 tevent_req_set_callback(subreq
, cli_smb_received
, cli
);
792 * Cancel all pending requests. We don't do a for-loop walking
793 * cli->pending because that array changes in
794 * cli_smb_req_destructor().
796 while (talloc_array_length(cli
->pending
) > 0) {
797 req
= cli
->pending
[0];
798 talloc_set_destructor(req
, NULL
);
799 cli_smb_req_destructor(req
);
800 tevent_req_nterror(req
, status
);
804 NTSTATUS
cli_smb_recv(struct tevent_req
*req
, uint8_t min_wct
,
805 uint8_t *pwct
, uint16_t **pvwv
,
806 uint32_t *pnum_bytes
, uint8_t **pbytes
)
808 struct cli_smb_state
*state
= tevent_req_data(
809 req
, struct cli_smb_state
);
810 NTSTATUS status
= NT_STATUS_OK
;
813 size_t wct_ofs
, bytes_offset
;
816 if (tevent_req_is_nterror(req
, &status
)) {
820 if (state
->inbuf
== NULL
) {
821 /* This was a request without a reply */
826 cmd
= CVAL(state
->inbuf
, smb_com
);
828 for (i
=0; i
<state
->chain_num
; i
++) {
829 if (i
< state
->chain_num
-1) {
831 return NT_STATUS_REQUEST_ABORTED
;
833 if (!is_andx_req(cmd
)) {
834 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
838 if (!have_andx_command((char *)state
->inbuf
, wct_ofs
)) {
840 * This request was not completed because a previous
841 * request in the chain had received an error.
843 return NT_STATUS_REQUEST_ABORTED
;
846 wct_ofs
= SVAL(state
->inbuf
, wct_ofs
+ 3);
849 * Skip the all-present length field. No overflow, we've just
850 * put a 16-bit value into a size_t.
854 if (wct_ofs
+2 > talloc_get_size(state
->inbuf
)) {
855 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
858 cmd
= CVAL(state
->inbuf
, wct_ofs
+ 1);
861 status
= cli_pull_error((char *)state
->inbuf
);
863 if (!have_andx_command((char *)state
->inbuf
, wct_ofs
)
864 && NT_STATUS_IS_ERR(status
)) {
866 * The last command takes the error code. All further commands
867 * down the requested chain will get a
868 * NT_STATUS_REQUEST_ABORTED.
873 wct
= CVAL(state
->inbuf
, wct_ofs
);
874 bytes_offset
= wct_ofs
+ 1 + wct
* sizeof(uint16_t);
875 num_bytes
= SVAL(state
->inbuf
, bytes_offset
);
878 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
882 * wct_ofs is a 16-bit value plus 4, wct is a 8-bit value, num_bytes
883 * is a 16-bit value. So bytes_offset being size_t should be far from
886 if ((bytes_offset
+ 2 > talloc_get_size(state
->inbuf
))
887 || (bytes_offset
> 0xffff)) {
888 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
895 *pvwv
= (uint16_t *)(state
->inbuf
+ wct_ofs
+ 1);
897 if (pnum_bytes
!= NULL
) {
898 *pnum_bytes
= num_bytes
;
900 if (pbytes
!= NULL
) {
901 *pbytes
= (uint8_t *)state
->inbuf
+ bytes_offset
+ 2;
907 size_t cli_smb_wct_ofs(struct tevent_req
**reqs
, int num_reqs
)
912 wct_ofs
= smb_wct
- 4;
914 for (i
=0; i
<num_reqs
; i
++) {
915 struct cli_smb_state
*state
;
916 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
917 wct_ofs
+= iov_len(state
->iov
+1, state
->iov_count
-1);
918 wct_ofs
= (wct_ofs
+ 3) & ~3;
923 bool cli_smb_chain_send(struct tevent_req
**reqs
, int num_reqs
)
925 struct cli_smb_state
*first_state
= tevent_req_data(
926 reqs
[0], struct cli_smb_state
);
927 struct cli_smb_state
*last_state
= tevent_req_data(
928 reqs
[num_reqs
-1], struct cli_smb_state
);
929 struct cli_smb_state
*state
;
931 size_t chain_padding
= 0;
933 struct iovec
*iov
= NULL
;
934 struct iovec
*this_iov
;
937 for (i
=0; i
<num_reqs
; i
++) {
938 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
939 iovlen
+= state
->iov_count
;
942 iov
= talloc_array(last_state
, struct iovec
, iovlen
);
947 first_state
->chained_requests
= (struct tevent_req
**)talloc_memdup(
948 last_state
, reqs
, sizeof(*reqs
) * num_reqs
);
949 if (first_state
->chained_requests
== NULL
) {
953 wct_offset
= smb_wct
- 4;
956 for (i
=0; i
<num_reqs
; i
++) {
957 size_t next_padding
= 0;
960 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
962 if (i
< num_reqs
-1) {
963 if (!is_andx_req(CVAL(state
->header
, smb_com
))
964 || CVAL(state
->header
, smb_wct
) < 2) {
969 wct_offset
+= iov_len(state
->iov
+1, state
->iov_count
-1) + 1;
970 if ((wct_offset
% 4) != 0) {
971 next_padding
= 4 - (wct_offset
% 4);
973 wct_offset
+= next_padding
;
976 if (i
< num_reqs
-1) {
977 struct cli_smb_state
*next_state
= tevent_req_data(
978 reqs
[i
+1], struct cli_smb_state
);
979 SCVAL(vwv
+0, 0, CVAL(next_state
->header
, smb_com
));
981 SSVAL(vwv
+1, 0, wct_offset
);
982 } else if (is_andx_req(CVAL(state
->header
, smb_com
))) {
983 /* properly end the chain */
984 SCVAL(vwv
+0, 0, 0xff);
985 SCVAL(vwv
+0, 1, 0xff);
990 this_iov
[0] = state
->iov
[0];
993 * This one is a bit subtle. We have to add
994 * chain_padding bytes between the requests, and we
995 * have to also include the wct field of the
996 * subsequent requests. We use the subsequent header
997 * for the padding, it contains the wct field in its
1000 this_iov
[0].iov_len
= chain_padding
+1;
1001 this_iov
[0].iov_base
= &state
->header
[
1002 sizeof(state
->header
) - this_iov
[0].iov_len
];
1003 memset(this_iov
[0].iov_base
, 0, this_iov
[0].iov_len
-1);
1005 memcpy(this_iov
+1, state
->iov
+1,
1006 sizeof(struct iovec
) * (state
->iov_count
-1));
1007 this_iov
+= state
->iov_count
;
1008 chain_padding
= next_padding
;
1011 if (!cli_smb_req_iov_send(reqs
[0], last_state
, iov
, iovlen
)) {
1020 uint8_t *cli_smb_inbuf(struct tevent_req
*req
)
1022 struct cli_smb_state
*state
= tevent_req_data(
1023 req
, struct cli_smb_state
);
1024 return state
->inbuf
;
1027 bool cli_has_async_calls(struct cli_state
*cli
)
1029 return ((tevent_queue_length(cli
->outgoing
) != 0)
1030 || (talloc_array_length(cli
->pending
) != 0));