2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "libsmb/libsmb.h"
22 #include "../lib/async_req/async_sock.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/util/tevent_unix.h"
25 #include "async_smb.h"
26 #include "smb_crypt.h"
27 #include "libsmb/nmblib.h"
30 static NTSTATUS
cli_pull_raw_error(const uint8_t *buf
)
32 uint32_t flags2
= SVAL(buf
, smb_flg2
);
33 NTSTATUS status
= NT_STATUS(IVAL(buf
, smb_rcls
));
35 if (NT_STATUS_IS_OK(status
)) {
39 if (flags2
& FLAGS2_32_BIT_ERROR_CODES
) {
43 return NT_STATUS_DOS(CVAL(buf
, smb_rcls
), SVAL(buf
,smb_err
));
47 * Figure out if there is an andx command behind the current one
48 * @param[in] buf The smb buffer to look at
49 * @param[in] ofs The offset to the wct field that is followed by the cmd
50 * @retval Is there a command following?
53 static bool have_andx_command(const char *buf
, uint16_t ofs
)
56 size_t buflen
= talloc_get_size(buf
);
58 if ((ofs
== buflen
-1) || (ofs
== buflen
)) {
65 * Not enough space for the command and a following pointer
69 return (CVAL(buf
, ofs
+1) != 0xff);
74 struct cli_smb_state
{
75 struct tevent_context
*ev
;
76 struct cli_state
*cli
;
77 uint8_t header
[smb_wct
+1]; /* Space for the header including the wct */
80 * For normal requests, cli_smb_req_send chooses a mid. Secondary
81 * trans requests need to use the mid of the primary request, so we
82 * need a place to store it. Assume it's set if != 0.
87 uint8_t bytecount_buf
[2];
89 struct iovec iov
[MAX_SMB_IOV
+3];
96 struct tevent_req
**chained_requests
;
101 static uint16_t cli_alloc_mid(struct cli_state
*cli
)
103 int num_pending
= talloc_array_length(cli
->conn
.pending
);
109 result
= cli
->smb1
.mid
++;
110 if ((result
== 0) || (result
== 0xffff)) {
114 for (i
=0; i
<num_pending
; i
++) {
115 if (result
== cli_smb_req_mid(cli
->conn
.pending
[i
])) {
120 if (i
== num_pending
) {
126 void cli_smb_req_unset_pending(struct tevent_req
*req
)
128 struct cli_smb_state
*state
= tevent_req_data(
129 req
, struct cli_smb_state
);
130 struct cli_state
*cli
= state
->cli
;
131 int num_pending
= talloc_array_length(cli
->conn
.pending
);
134 if (state
->mid
!= 0) {
136 * This is a [nt]trans[2] request which waits
137 * for more than one reply.
142 talloc_set_destructor(req
, NULL
);
144 if (num_pending
== 1) {
146 * The pending read_smb tevent_req is a child of
147 * cli->pending. So if nothing is pending anymore, we need to
148 * delete the socket read fde.
150 TALLOC_FREE(cli
->conn
.pending
);
151 cli
->conn
.read_smb_req
= NULL
;
155 for (i
=0; i
<num_pending
; i
++) {
156 if (req
== cli
->conn
.pending
[i
]) {
160 if (i
== num_pending
) {
162 * Something's seriously broken. Just returning here is the
163 * right thing nevertheless, the point of this routine is to
164 * remove ourselves from cli->conn.pending.
170 * Remove ourselves from the cli->conn.pending array
172 for (; i
< (num_pending
- 1); i
++) {
173 cli
->conn
.pending
[i
] = cli
->conn
.pending
[i
+1];
177 * No NULL check here, we're shrinking by sizeof(void *), and
178 * talloc_realloc just adjusts the size for this.
180 cli
->conn
.pending
= talloc_realloc(NULL
, cli
->conn
.pending
,
186 static int cli_smb_req_destructor(struct tevent_req
*req
)
188 struct cli_smb_state
*state
= tevent_req_data(
189 req
, struct cli_smb_state
);
191 * Make sure we really remove it from
192 * the pending array on destruction.
195 cli_smb_req_unset_pending(req
);
199 static bool cli_state_receive_next(struct cli_state
*cli
);
200 static void cli_state_notify_pending(struct cli_state
*cli
, NTSTATUS status
);
202 bool cli_smb_req_set_pending(struct tevent_req
*req
)
204 struct cli_smb_state
*state
= tevent_req_data(
205 req
, struct cli_smb_state
);
206 struct cli_state
*cli
;
207 struct tevent_req
**pending
;
211 num_pending
= talloc_array_length(cli
->conn
.pending
);
213 pending
= talloc_realloc(cli
, cli
->conn
.pending
, struct tevent_req
*,
215 if (pending
== NULL
) {
218 pending
[num_pending
] = req
;
219 cli
->conn
.pending
= pending
;
220 talloc_set_destructor(req
, cli_smb_req_destructor
);
222 if (!cli_state_receive_next(cli
)) {
224 * the caller should notify the current request
226 * And all other pending requests get notified
227 * by cli_state_notify_pending().
229 cli_smb_req_unset_pending(req
);
230 cli_state_notify_pending(cli
, NT_STATUS_NO_MEMORY
);
237 static void cli_smb_received(struct tevent_req
*subreq
);
238 static NTSTATUS
cli_state_dispatch_smb1(struct cli_state
*cli
,
242 static bool cli_state_receive_next(struct cli_state
*cli
)
244 size_t num_pending
= talloc_array_length(cli
->conn
.pending
);
245 struct tevent_req
*req
;
246 struct cli_smb_state
*state
;
248 if (cli
->conn
.read_smb_req
!= NULL
) {
252 if (num_pending
== 0) {
256 req
= cli
->conn
.pending
[0];
257 state
= tevent_req_data(req
, struct cli_smb_state
);
259 cli
->conn
.dispatch_incoming
= cli_state_dispatch_smb1
;
262 * We're the first ones, add the read_smb request that waits for the
263 * answer from the server
265 cli
->conn
.read_smb_req
= read_smb_send(cli
->conn
.pending
, state
->ev
,
267 if (cli
->conn
.read_smb_req
== NULL
) {
270 tevent_req_set_callback(cli
->conn
.read_smb_req
, cli_smb_received
, cli
);
274 static void cli_state_notify_pending(struct cli_state
*cli
, NTSTATUS status
)
276 cli_state_disconnect(cli
);
279 * Cancel all pending requests. We do not do a for-loop walking
280 * cli->conn.pending because that array changes in
281 * cli_smb_req_destructor().
283 while (talloc_array_length(cli
->conn
.pending
) > 0) {
284 struct tevent_req
*req
;
285 struct cli_smb_state
*state
;
287 req
= cli
->conn
.pending
[0];
288 state
= tevent_req_data(req
, struct cli_smb_state
);
290 cli_smb_req_unset_pending(req
);
293 * we need to defer the callback, because we may notify more
296 tevent_req_defer_callback(req
, state
->ev
);
297 tevent_req_nterror(req
, status
);
302 * Fetch a smb request's mid. Only valid after the request has been sent by
303 * cli_smb_req_send().
305 uint16_t cli_smb_req_mid(struct tevent_req
*req
)
307 struct cli_smb_state
*state
= tevent_req_data(
308 req
, struct cli_smb_state
);
310 if (state
->mid
!= 0) {
314 return SVAL(state
->header
, smb_mid
);
317 void cli_smb_req_set_mid(struct tevent_req
*req
, uint16_t mid
)
319 struct cli_smb_state
*state
= tevent_req_data(
320 req
, struct cli_smb_state
);
324 uint32_t cli_smb_req_seqnum(struct tevent_req
*req
)
326 struct cli_smb_state
*state
= tevent_req_data(
327 req
, struct cli_smb_state
);
328 return state
->seqnum
;
331 void cli_smb_req_set_seqnum(struct tevent_req
*req
, uint32_t seqnum
)
333 struct cli_smb_state
*state
= tevent_req_data(
334 req
, struct cli_smb_state
);
335 state
->seqnum
= seqnum
;
338 static size_t iov_len(const struct iovec
*iov
, int count
)
342 for (i
=0; i
<count
; i
++) {
343 result
+= iov
[i
].iov_len
;
348 static uint8_t *iov_concat(TALLOC_CTX
*mem_ctx
, const struct iovec
*iov
,
351 size_t len
= iov_len(iov
, count
);
356 buf
= talloc_array(mem_ctx
, uint8_t, len
);
361 for (i
=0; i
<count
; i
++) {
362 memcpy(buf
+copied
, iov
[i
].iov_base
, iov
[i
].iov_len
);
363 copied
+= iov
[i
].iov_len
;
368 struct tevent_req
*cli_smb_req_create(TALLOC_CTX
*mem_ctx
,
369 struct event_context
*ev
,
370 struct cli_state
*cli
,
372 uint8_t additional_flags
,
373 uint8_t wct
, uint16_t *vwv
,
375 struct iovec
*bytes_iov
)
377 struct tevent_req
*result
;
378 struct cli_smb_state
*state
;
379 struct timeval endtime
;
381 if (iov_count
> MAX_SMB_IOV
) {
383 * Should not happen :-)
388 result
= tevent_req_create(mem_ctx
, &state
, struct cli_smb_state
);
389 if (result
== NULL
) {
394 state
->mid
= 0; /* Set to auto-choose in cli_smb_req_send */
395 state
->chain_num
= 0;
396 state
->chained_requests
= NULL
;
398 cli_setup_packet_buf(cli
, (char *)state
->header
);
399 SCVAL(state
->header
, smb_com
, smb_command
);
400 SSVAL(state
->header
, smb_tid
, cli
->smb1
.tid
);
401 SCVAL(state
->header
, smb_wct
, wct
);
405 SSVAL(state
->bytecount_buf
, 0, iov_len(bytes_iov
, iov_count
));
407 state
->iov
[0].iov_base
= (void *)state
->header
;
408 state
->iov
[0].iov_len
= sizeof(state
->header
);
409 state
->iov
[1].iov_base
= (void *)state
->vwv
;
410 state
->iov
[1].iov_len
= wct
* sizeof(uint16_t);
411 state
->iov
[2].iov_base
= (void *)state
->bytecount_buf
;
412 state
->iov
[2].iov_len
= sizeof(uint16_t);
414 if (iov_count
!= 0) {
415 memcpy(&state
->iov
[3], bytes_iov
,
416 iov_count
* sizeof(*bytes_iov
));
418 state
->iov_count
= iov_count
+ 3;
421 endtime
= timeval_current_ofs_msec(cli
->timeout
);
422 if (!tevent_req_set_endtime(result
, ev
, endtime
)) {
423 tevent_req_oom(result
);
427 switch (smb_command
) {
432 state
->one_way
= true;
436 (CVAL(vwv
+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE
)) {
437 state
->one_way
= true;
445 static NTSTATUS
cli_signv(struct cli_state
*cli
, struct iovec
*iov
, int count
,
451 * Obvious optimization: Make cli_calculate_sign_mac work with struct
452 * iovec directly. MD5Update would do that just fine.
455 if ((count
<= 0) || (iov
[0].iov_len
< smb_wct
)) {
456 return NT_STATUS_INVALID_PARAMETER
;
459 buf
= iov_concat(talloc_tos(), iov
, count
);
461 return NT_STATUS_NO_MEMORY
;
464 cli_calculate_sign_mac(cli
, (char *)buf
, seqnum
);
465 memcpy(iov
[0].iov_base
, buf
, iov
[0].iov_len
);
471 static void cli_smb_sent(struct tevent_req
*subreq
);
473 static NTSTATUS
cli_smb_req_iov_send(struct tevent_req
*req
,
474 struct cli_smb_state
*state
,
475 struct iovec
*iov
, int iov_count
)
477 struct tevent_req
*subreq
;
480 if (!cli_state_is_connected(state
->cli
)) {
481 return NT_STATUS_CONNECTION_DISCONNECTED
;
484 if (iov
[0].iov_len
< smb_wct
) {
485 return NT_STATUS_INVALID_PARAMETER
;
488 if (state
->mid
!= 0) {
489 SSVAL(iov
[0].iov_base
, smb_mid
, state
->mid
);
491 uint16_t mid
= cli_alloc_mid(state
->cli
);
492 SSVAL(iov
[0].iov_base
, smb_mid
, mid
);
495 smb_setlen((char *)iov
[0].iov_base
, iov_len(iov
, iov_count
) - 4);
497 status
= cli_signv(state
->cli
, iov
, iov_count
, &state
->seqnum
);
499 if (!NT_STATUS_IS_OK(status
)) {
503 if (cli_encryption_on(state
->cli
)) {
506 buf
= (char *)iov_concat(talloc_tos(), iov
, iov_count
);
508 return NT_STATUS_NO_MEMORY
;
510 status
= common_encrypt_buffer(state
->cli
->trans_enc_state
,
511 (char *)buf
, &enc_buf
);
513 if (!NT_STATUS_IS_OK(status
)) {
514 DEBUG(0, ("Error in encrypting client message: %s\n",
518 buf
= (char *)talloc_memdup(state
, enc_buf
,
522 return NT_STATUS_NO_MEMORY
;
524 iov
[0].iov_base
= (void *)buf
;
525 iov
[0].iov_len
= talloc_get_size(buf
);
528 subreq
= writev_send(state
, state
->ev
, state
->cli
->conn
.outgoing
,
529 state
->cli
->conn
.fd
, false, iov
, iov_count
);
530 if (subreq
== NULL
) {
531 return NT_STATUS_NO_MEMORY
;
533 tevent_req_set_callback(subreq
, cli_smb_sent
, req
);
537 NTSTATUS
cli_smb_req_send(struct tevent_req
*req
)
539 struct cli_smb_state
*state
= tevent_req_data(
540 req
, struct cli_smb_state
);
542 return cli_smb_req_iov_send(req
, state
, state
->iov
, state
->iov_count
);
545 struct tevent_req
*cli_smb_send(TALLOC_CTX
*mem_ctx
,
546 struct event_context
*ev
,
547 struct cli_state
*cli
,
549 uint8_t additional_flags
,
550 uint8_t wct
, uint16_t *vwv
,
552 const uint8_t *bytes
)
554 struct tevent_req
*req
;
558 iov
.iov_base
= discard_const_p(void, bytes
);
559 iov
.iov_len
= num_bytes
;
561 req
= cli_smb_req_create(mem_ctx
, ev
, cli
, smb_command
,
562 additional_flags
, wct
, vwv
, 1, &iov
);
567 status
= cli_smb_req_send(req
);
568 if (!NT_STATUS_IS_OK(status
)) {
569 tevent_req_nterror(req
, status
);
570 return tevent_req_post(req
, ev
);
575 static void cli_smb_sent(struct tevent_req
*subreq
)
577 struct tevent_req
*req
= tevent_req_callback_data(
578 subreq
, struct tevent_req
);
579 struct cli_smb_state
*state
= tevent_req_data(
580 req
, struct cli_smb_state
);
584 nwritten
= writev_recv(subreq
, &err
);
586 if (nwritten
== -1) {
587 NTSTATUS status
= map_nt_error_from_unix(err
);
588 cli_state_notify_pending(state
->cli
, status
);
592 if (state
->one_way
) {
594 tevent_req_done(req
);
598 if (!cli_smb_req_set_pending(req
)) {
599 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
604 static void cli_smb_received(struct tevent_req
*subreq
)
606 struct cli_state
*cli
= tevent_req_callback_data(
607 subreq
, struct cli_state
);
608 TALLOC_CTX
*frame
= talloc_stackframe();
614 if (subreq
!= cli
->conn
.read_smb_req
) {
615 DEBUG(1, ("Internal error: cli_smb_received called with "
616 "unexpected subreq\n"));
617 status
= NT_STATUS_INTERNAL_ERROR
;
618 cli_state_notify_pending(cli
, status
);
623 received
= read_smb_recv(subreq
, frame
, &inbuf
, &err
);
625 cli
->conn
.read_smb_req
= NULL
;
626 if (received
== -1) {
627 status
= map_nt_error_from_unix(err
);
628 cli_state_notify_pending(cli
, status
);
633 status
= cli
->conn
.dispatch_incoming(cli
, frame
, inbuf
);
635 if (NT_STATUS_IS_OK(status
)) {
637 * We should not do any more processing
638 * as the dispatch function called
642 } else if (!NT_STATUS_EQUAL(status
, NT_STATUS_RETRY
)) {
644 * We got an error, so notify all pending requests
646 cli_state_notify_pending(cli
, status
);
651 * We got NT_STATUS_RETRY, so we may ask for a
654 if (!cli_state_receive_next(cli
)) {
655 cli_state_notify_pending(cli
, NT_STATUS_NO_MEMORY
);
659 static NTSTATUS
cli_state_dispatch_smb1(struct cli_state
*cli
,
663 struct tevent_req
*req
;
664 struct cli_smb_state
*state
;
671 if ((IVAL(inbuf
, 4) != 0x424d53ff) /* 0xFF"SMB" */
672 && (SVAL(inbuf
, 4) != 0x45ff)) /* 0xFF"E" */ {
673 DEBUG(10, ("Got non-SMB PDU\n"));
674 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
677 if (cli_encryption_on(cli
) && (CVAL(inbuf
, 0) == 0)) {
678 uint16_t enc_ctx_num
;
680 status
= get_enc_ctx_num(inbuf
, &enc_ctx_num
);
681 if (!NT_STATUS_IS_OK(status
)) {
682 DEBUG(10, ("get_enc_ctx_num returned %s\n",
687 if (enc_ctx_num
!= cli
->trans_enc_state
->enc_ctx_num
) {
688 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
690 cli
->trans_enc_state
->enc_ctx_num
));
691 return NT_STATUS_INVALID_HANDLE
;
694 status
= common_decrypt_buffer(cli
->trans_enc_state
,
696 if (!NT_STATUS_IS_OK(status
)) {
697 DEBUG(10, ("common_decrypt_buffer returned %s\n",
703 mid
= SVAL(inbuf
, smb_mid
);
704 num_pending
= talloc_array_length(cli
->conn
.pending
);
706 for (i
=0; i
<num_pending
; i
++) {
707 if (mid
== cli_smb_req_mid(cli
->conn
.pending
[i
])) {
711 if (i
== num_pending
) {
712 /* Dump unexpected reply */
713 return NT_STATUS_RETRY
;
716 oplock_break
= false;
720 * Paranoia checks that this is really an oplock break request.
722 oplock_break
= (smb_len(inbuf
) == 51); /* hdr + 8 words */
723 oplock_break
&= ((CVAL(inbuf
, smb_flg
) & FLAG_REPLY
) == 0);
724 oplock_break
&= (CVAL(inbuf
, smb_com
) == SMBlockingX
);
725 oplock_break
&= (SVAL(inbuf
, smb_vwv6
) == 0);
726 oplock_break
&= (SVAL(inbuf
, smb_vwv7
) == 0);
729 /* Dump unexpected reply */
730 return NT_STATUS_RETRY
;
734 req
= cli
->conn
.pending
[i
];
735 state
= tevent_req_data(req
, struct cli_smb_state
);
737 if (!oplock_break
/* oplock breaks are not signed */
738 && !cli_check_sign_mac(cli
, (char *)inbuf
, state
->seqnum
+1)) {
739 DEBUG(10, ("cli_check_sign_mac failed\n"));
740 return NT_STATUS_ACCESS_DENIED
;
743 if (state
->chained_requests
!= NULL
) {
744 struct tevent_req
**chain
= talloc_move(frame
,
745 &state
->chained_requests
);
746 int num_chained
= talloc_array_length(chain
);
749 * We steal the inbuf to the chain,
750 * so that it will stay until all
751 * requests of the chain are finished.
753 * Each requests in the chain will
754 * hold a talloc reference to the chain.
755 * This way we do not expose the talloc_reference()
756 * behavior to the callers.
758 talloc_steal(chain
, inbuf
);
760 for (i
=0; i
<num_chained
; i
++) {
761 struct tevent_req
**ref
;
764 state
= tevent_req_data(req
, struct cli_smb_state
);
766 cli_smb_req_unset_pending(req
);
769 * as we finish multiple requests here
770 * we need to defer the callbacks as
771 * they could destroy our current stack state.
773 tevent_req_defer_callback(req
, state
->ev
);
775 ref
= talloc_reference(state
, chain
);
776 if (tevent_req_nomem(ref
, req
)) {
780 state
->inbuf
= inbuf
;
781 state
->chain_num
= i
;
782 state
->chain_length
= num_chained
;
784 tevent_req_done(req
);
787 return NT_STATUS_RETRY
;
790 cli_smb_req_unset_pending(req
);
792 state
->inbuf
= talloc_move(state
, &inbuf
);
793 state
->chain_num
= 0;
794 state
->chain_length
= 1;
796 if (talloc_array_length(cli
->conn
.pending
) == 0) {
797 tevent_req_done(req
);
801 tevent_req_defer_callback(req
, state
->ev
);
802 tevent_req_done(req
);
803 return NT_STATUS_RETRY
;
806 NTSTATUS
cli_smb_recv(struct tevent_req
*req
,
807 TALLOC_CTX
*mem_ctx
, uint8_t **pinbuf
,
808 uint8_t min_wct
, uint8_t *pwct
, uint16_t **pvwv
,
809 uint32_t *pnum_bytes
, uint8_t **pbytes
)
811 struct cli_smb_state
*state
= tevent_req_data(
812 req
, struct cli_smb_state
);
813 NTSTATUS status
= NT_STATUS_OK
;
816 size_t wct_ofs
, bytes_offset
;
819 if (tevent_req_is_nterror(req
, &status
)) {
823 if (state
->inbuf
== NULL
) {
825 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
842 /* This was a request without a reply */
847 cmd
= CVAL(state
->inbuf
, smb_com
);
849 for (i
=0; i
<state
->chain_num
; i
++) {
850 if (i
< state
->chain_num
-1) {
852 return NT_STATUS_REQUEST_ABORTED
;
854 if (!is_andx_req(cmd
)) {
855 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
859 if (!have_andx_command((char *)state
->inbuf
, wct_ofs
)) {
861 * This request was not completed because a previous
862 * request in the chain had received an error.
864 return NT_STATUS_REQUEST_ABORTED
;
867 wct_ofs
= SVAL(state
->inbuf
, wct_ofs
+ 3);
870 * Skip the all-present length field. No overflow, we've just
871 * put a 16-bit value into a size_t.
875 if (wct_ofs
+2 > talloc_get_size(state
->inbuf
)) {
876 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
879 cmd
= CVAL(state
->inbuf
, wct_ofs
+ 1);
882 state
->cli
->raw_status
= cli_pull_raw_error(state
->inbuf
);
883 if (NT_STATUS_IS_DOS(state
->cli
->raw_status
)) {
884 uint8_t eclass
= NT_STATUS_DOS_CLASS(state
->cli
->raw_status
);
885 uint16_t ecode
= NT_STATUS_DOS_CODE(state
->cli
->raw_status
);
887 * TODO: is it really a good idea to do a mapping here?
889 * The old cli_pull_error() also does it, so I do not change
892 status
= dos_to_ntstatus(eclass
, ecode
);
894 status
= state
->cli
->raw_status
;
897 if (!have_andx_command((char *)state
->inbuf
, wct_ofs
)) {
899 if ((cmd
== SMBsesssetupX
)
901 status
, NT_STATUS_MORE_PROCESSING_REQUIRED
)) {
903 * NT_STATUS_MORE_PROCESSING_REQUIRED is a
904 * valid return code for session setup
909 if (NT_STATUS_IS_ERR(status
)) {
911 * The last command takes the error code. All
912 * further commands down the requested chain
913 * will get a NT_STATUS_REQUEST_ABORTED.
921 wct
= CVAL(state
->inbuf
, wct_ofs
);
922 bytes_offset
= wct_ofs
+ 1 + wct
* sizeof(uint16_t);
923 num_bytes
= SVAL(state
->inbuf
, bytes_offset
);
926 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
930 * wct_ofs is a 16-bit value plus 4, wct is a 8-bit value, num_bytes
931 * is a 16-bit value. So bytes_offset being size_t should be far from
934 if ((bytes_offset
+ 2 > talloc_get_size(state
->inbuf
))
935 || (bytes_offset
> 0xffff)) {
936 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
943 *pvwv
= (uint16_t *)(state
->inbuf
+ wct_ofs
+ 1);
945 if (pnum_bytes
!= NULL
) {
946 *pnum_bytes
= num_bytes
;
948 if (pbytes
!= NULL
) {
949 *pbytes
= (uint8_t *)state
->inbuf
+ bytes_offset
+ 2;
951 if ((mem_ctx
!= NULL
) && (pinbuf
!= NULL
)) {
952 if (state
->chain_num
== state
->chain_length
-1) {
953 *pinbuf
= talloc_move(mem_ctx
, &state
->inbuf
);
955 *pinbuf
= state
->inbuf
;
962 size_t cli_smb_wct_ofs(struct tevent_req
**reqs
, int num_reqs
)
967 wct_ofs
= smb_wct
- 4;
969 for (i
=0; i
<num_reqs
; i
++) {
970 struct cli_smb_state
*state
;
971 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
972 wct_ofs
+= iov_len(state
->iov
+1, state
->iov_count
-1);
973 wct_ofs
= (wct_ofs
+ 3) & ~3;
978 NTSTATUS
cli_smb_chain_send(struct tevent_req
**reqs
, int num_reqs
)
980 struct cli_smb_state
*first_state
= tevent_req_data(
981 reqs
[0], struct cli_smb_state
);
982 struct cli_smb_state
*last_state
= tevent_req_data(
983 reqs
[num_reqs
-1], struct cli_smb_state
);
984 struct cli_smb_state
*state
;
986 size_t chain_padding
= 0;
988 struct iovec
*iov
= NULL
;
989 struct iovec
*this_iov
;
993 for (i
=0; i
<num_reqs
; i
++) {
994 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
995 iovlen
+= state
->iov_count
;
998 iov
= talloc_array(last_state
, struct iovec
, iovlen
);
1000 return NT_STATUS_NO_MEMORY
;
1003 first_state
->chained_requests
= (struct tevent_req
**)talloc_memdup(
1004 last_state
, reqs
, sizeof(*reqs
) * num_reqs
);
1005 if (first_state
->chained_requests
== NULL
) {
1007 return NT_STATUS_NO_MEMORY
;
1010 wct_offset
= smb_wct
- 4;
1013 for (i
=0; i
<num_reqs
; i
++) {
1014 size_t next_padding
= 0;
1017 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
1019 if (i
< num_reqs
-1) {
1020 if (!is_andx_req(CVAL(state
->header
, smb_com
))
1021 || CVAL(state
->header
, smb_wct
) < 2) {
1023 TALLOC_FREE(first_state
->chained_requests
);
1024 return NT_STATUS_INVALID_PARAMETER
;
1028 wct_offset
+= iov_len(state
->iov
+1, state
->iov_count
-1) + 1;
1029 if ((wct_offset
% 4) != 0) {
1030 next_padding
= 4 - (wct_offset
% 4);
1032 wct_offset
+= next_padding
;
1035 if (i
< num_reqs
-1) {
1036 struct cli_smb_state
*next_state
= tevent_req_data(
1037 reqs
[i
+1], struct cli_smb_state
);
1038 SCVAL(vwv
+0, 0, CVAL(next_state
->header
, smb_com
));
1040 SSVAL(vwv
+1, 0, wct_offset
);
1041 } else if (is_andx_req(CVAL(state
->header
, smb_com
))) {
1042 /* properly end the chain */
1043 SCVAL(vwv
+0, 0, 0xff);
1044 SCVAL(vwv
+0, 1, 0xff);
1049 this_iov
[0] = state
->iov
[0];
1052 * This one is a bit subtle. We have to add
1053 * chain_padding bytes between the requests, and we
1054 * have to also include the wct field of the
1055 * subsequent requests. We use the subsequent header
1056 * for the padding, it contains the wct field in its
1059 this_iov
[0].iov_len
= chain_padding
+1;
1060 this_iov
[0].iov_base
= (void *)&state
->header
[
1061 sizeof(state
->header
) - this_iov
[0].iov_len
];
1062 memset(this_iov
[0].iov_base
, 0, this_iov
[0].iov_len
-1);
1064 memcpy(this_iov
+1, state
->iov
+1,
1065 sizeof(struct iovec
) * (state
->iov_count
-1));
1066 this_iov
+= state
->iov_count
;
1067 chain_padding
= next_padding
;
1070 status
= cli_smb_req_iov_send(reqs
[0], last_state
, iov
, iovlen
);
1071 if (!NT_STATUS_IS_OK(status
)) {
1073 TALLOC_FREE(first_state
->chained_requests
);
1077 return NT_STATUS_OK
;
1080 bool cli_has_async_calls(struct cli_state
*cli
)
1082 return ((tevent_queue_length(cli
->conn
.outgoing
) != 0)
1083 || (talloc_array_length(cli
->conn
.pending
) != 0));