2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "libsmb/libsmb.h"
22 #include "../lib/async_req/async_sock.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/util/tevent_unix.h"
25 #include "async_smb.h"
26 #include "../libcli/smb/smb_seal.h"
27 #include "libsmb/nmblib.h"
28 #include "../libcli/smb/read_smb.h"
30 static NTSTATUS
cli_pull_raw_error(const uint8_t *buf
)
32 const uint8_t *hdr
= buf
+ NBT_HDR_SIZE
;
33 uint32_t flags2
= SVAL(hdr
, HDR_FLG2
);
34 NTSTATUS status
= NT_STATUS(IVAL(hdr
, HDR_RCLS
));
36 if (NT_STATUS_IS_OK(status
)) {
40 if (flags2
& FLAGS2_32_BIT_ERROR_CODES
) {
44 return NT_STATUS_DOS(CVAL(hdr
, HDR_RCLS
), SVAL(hdr
, HDR_ERR
));
48 * Figure out if there is an andx command behind the current one
49 * @param[in] buf The smb buffer to look at
50 * @param[in] ofs The offset to the wct field that is followed by the cmd
51 * @retval Is there a command following?
54 static bool have_andx_command(const char *buf
, uint16_t ofs
, uint8_t cmd
)
57 size_t buflen
= talloc_get_size(buf
);
59 if (!is_andx_req(cmd
)) {
63 if ((ofs
== buflen
-1) || (ofs
== buflen
)) {
70 * Not enough space for the command and a following pointer
74 return (CVAL(buf
, ofs
+1) != 0xff);
79 struct cli_smb_state
{
80 struct tevent_context
*ev
;
81 struct cli_state
*cli
;
82 uint8_t header
[smb_wct
+1]; /* Space for the header including the wct */
85 * For normal requests, cli_smb_req_send chooses a mid. Secondary
86 * trans requests need to use the mid of the primary request, so we
87 * need a place to store it. Assume it's set if != 0.
92 uint8_t bytecount_buf
[2];
94 struct iovec iov
[MAX_SMB_IOV
+3];
101 struct tevent_req
**chained_requests
;
106 static uint16_t cli_alloc_mid(struct cli_state
*cli
)
108 int num_pending
= talloc_array_length(cli
->conn
.pending
);
114 result
= cli
->conn
.smb1
.mid
++;
115 if ((result
== 0) || (result
== 0xffff)) {
119 for (i
=0; i
<num_pending
; i
++) {
120 if (result
== cli_smb_req_mid(cli
->conn
.pending
[i
])) {
125 if (i
== num_pending
) {
131 void cli_smb_req_unset_pending(struct tevent_req
*req
)
133 struct cli_smb_state
*state
= tevent_req_data(
134 req
, struct cli_smb_state
);
135 struct cli_state
*cli
= state
->cli
;
136 int num_pending
= talloc_array_length(cli
->conn
.pending
);
139 if (state
->mid
!= 0) {
141 * This is a [nt]trans[2] request which waits
142 * for more than one reply.
147 talloc_set_destructor(req
, NULL
);
149 if (num_pending
== 1) {
151 * The pending read_smb tevent_req is a child of
152 * cli->pending. So if nothing is pending anymore, we need to
153 * delete the socket read fde.
155 TALLOC_FREE(cli
->conn
.pending
);
156 cli
->conn
.read_smb_req
= NULL
;
160 for (i
=0; i
<num_pending
; i
++) {
161 if (req
== cli
->conn
.pending
[i
]) {
165 if (i
== num_pending
) {
167 * Something's seriously broken. Just returning here is the
168 * right thing nevertheless, the point of this routine is to
169 * remove ourselves from cli->conn.pending.
175 * Remove ourselves from the cli->conn.pending array
177 for (; i
< (num_pending
- 1); i
++) {
178 cli
->conn
.pending
[i
] = cli
->conn
.pending
[i
+1];
182 * No NULL check here, we're shrinking by sizeof(void *), and
183 * talloc_realloc just adjusts the size for this.
185 cli
->conn
.pending
= talloc_realloc(NULL
, cli
->conn
.pending
,
191 static int cli_smb_req_destructor(struct tevent_req
*req
)
193 struct cli_smb_state
*state
= tevent_req_data(
194 req
, struct cli_smb_state
);
196 * Make sure we really remove it from
197 * the pending array on destruction.
200 cli_smb_req_unset_pending(req
);
204 static bool cli_state_receive_next(struct cli_state
*cli
);
205 static void cli_state_notify_pending(struct cli_state
*cli
, NTSTATUS status
);
207 bool cli_smb_req_set_pending(struct tevent_req
*req
)
209 struct cli_smb_state
*state
= tevent_req_data(
210 req
, struct cli_smb_state
);
211 struct cli_state
*cli
;
212 struct tevent_req
**pending
;
216 num_pending
= talloc_array_length(cli
->conn
.pending
);
218 pending
= talloc_realloc(cli
, cli
->conn
.pending
, struct tevent_req
*,
220 if (pending
== NULL
) {
223 pending
[num_pending
] = req
;
224 cli
->conn
.pending
= pending
;
225 talloc_set_destructor(req
, cli_smb_req_destructor
);
227 if (!cli_state_receive_next(cli
)) {
229 * the caller should notify the current request
231 * And all other pending requests get notified
232 * by cli_state_notify_pending().
234 cli_smb_req_unset_pending(req
);
235 cli_state_notify_pending(cli
, NT_STATUS_NO_MEMORY
);
242 static void cli_smb_received(struct tevent_req
*subreq
);
243 static NTSTATUS
cli_state_dispatch_smb1(struct cli_state
*cli
,
247 static bool cli_state_receive_next(struct cli_state
*cli
)
249 size_t num_pending
= talloc_array_length(cli
->conn
.pending
);
250 struct tevent_req
*req
;
251 struct cli_smb_state
*state
;
253 if (cli
->conn
.read_smb_req
!= NULL
) {
257 if (num_pending
== 0) {
261 req
= cli
->conn
.pending
[0];
262 state
= tevent_req_data(req
, struct cli_smb_state
);
264 cli
->conn
.dispatch_incoming
= cli_state_dispatch_smb1
;
267 * We're the first ones, add the read_smb request that waits for the
268 * answer from the server
270 cli
->conn
.read_smb_req
= read_smb_send(cli
->conn
.pending
, state
->ev
,
272 if (cli
->conn
.read_smb_req
== NULL
) {
275 tevent_req_set_callback(cli
->conn
.read_smb_req
, cli_smb_received
, cli
);
279 static void cli_state_notify_pending(struct cli_state
*cli
, NTSTATUS status
)
281 cli_state_disconnect(cli
);
284 * Cancel all pending requests. We do not do a for-loop walking
285 * cli->conn.pending because that array changes in
286 * cli_smb_req_destructor().
288 while (talloc_array_length(cli
->conn
.pending
) > 0) {
289 struct tevent_req
*req
;
290 struct cli_smb_state
*state
;
292 req
= cli
->conn
.pending
[0];
293 state
= tevent_req_data(req
, struct cli_smb_state
);
296 * We're dead. No point waiting for trans2
301 cli_smb_req_unset_pending(req
);
304 * we need to defer the callback, because we may notify more
307 tevent_req_defer_callback(req
, state
->ev
);
308 tevent_req_nterror(req
, status
);
313 * Fetch a smb request's mid. Only valid after the request has been sent by
314 * cli_smb_req_send().
316 uint16_t cli_smb_req_mid(struct tevent_req
*req
)
318 struct cli_smb_state
*state
= tevent_req_data(
319 req
, struct cli_smb_state
);
321 if (state
->mid
!= 0) {
325 return SVAL(state
->header
, smb_mid
);
328 void cli_smb_req_set_mid(struct tevent_req
*req
, uint16_t mid
)
330 struct cli_smb_state
*state
= tevent_req_data(
331 req
, struct cli_smb_state
);
335 uint32_t cli_smb_req_seqnum(struct tevent_req
*req
)
337 struct cli_smb_state
*state
= tevent_req_data(
338 req
, struct cli_smb_state
);
339 return state
->seqnum
;
342 void cli_smb_req_set_seqnum(struct tevent_req
*req
, uint32_t seqnum
)
344 struct cli_smb_state
*state
= tevent_req_data(
345 req
, struct cli_smb_state
);
346 state
->seqnum
= seqnum
;
349 static size_t iov_len(const struct iovec
*iov
, int count
)
353 for (i
=0; i
<count
; i
++) {
354 result
+= iov
[i
].iov_len
;
359 static uint8_t *iov_concat(TALLOC_CTX
*mem_ctx
, const struct iovec
*iov
,
362 size_t len
= iov_len(iov
, count
);
367 buf
= talloc_array(mem_ctx
, uint8_t, len
);
372 for (i
=0; i
<count
; i
++) {
373 memcpy(buf
+copied
, iov
[i
].iov_base
, iov
[i
].iov_len
);
374 copied
+= iov
[i
].iov_len
;
379 struct tevent_req
*cli_smb_req_create(TALLOC_CTX
*mem_ctx
,
380 struct tevent_context
*ev
,
381 struct cli_state
*cli
,
383 uint8_t additional_flags
,
384 uint8_t wct
, uint16_t *vwv
,
386 struct iovec
*bytes_iov
)
388 struct tevent_req
*result
;
389 struct cli_smb_state
*state
;
390 struct timeval endtime
;
392 if (iov_count
> MAX_SMB_IOV
) {
394 * Should not happen :-)
399 result
= tevent_req_create(mem_ctx
, &state
, struct cli_smb_state
);
400 if (result
== NULL
) {
405 state
->mid
= 0; /* Set to auto-choose in cli_smb_req_send */
406 state
->chain_num
= 0;
407 state
->chained_requests
= NULL
;
409 cli_setup_packet_buf(cli
, (char *)state
->header
);
410 SCVAL(state
->header
, smb_com
, smb_command
);
411 SSVAL(state
->header
, smb_tid
, cli
->smb1
.tid
);
412 SCVAL(state
->header
, smb_wct
, wct
);
416 SSVAL(state
->bytecount_buf
, 0, iov_len(bytes_iov
, iov_count
));
418 state
->iov
[0].iov_base
= (void *)state
->header
;
419 state
->iov
[0].iov_len
= sizeof(state
->header
);
420 state
->iov
[1].iov_base
= (void *)state
->vwv
;
421 state
->iov
[1].iov_len
= wct
* sizeof(uint16_t);
422 state
->iov
[2].iov_base
= (void *)state
->bytecount_buf
;
423 state
->iov
[2].iov_len
= sizeof(uint16_t);
425 if (iov_count
!= 0) {
426 memcpy(&state
->iov
[3], bytes_iov
,
427 iov_count
* sizeof(*bytes_iov
));
429 state
->iov_count
= iov_count
+ 3;
432 endtime
= timeval_current_ofs_msec(cli
->timeout
);
433 if (!tevent_req_set_endtime(result
, ev
, endtime
)) {
438 switch (smb_command
) {
443 state
->one_way
= true;
447 (CVAL(vwv
+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE
)) {
448 state
->one_way
= true;
456 static NTSTATUS
cli_signv(struct cli_state
*cli
, struct iovec
*iov
, int count
,
462 * Obvious optimization: Make cli_calculate_sign_mac work with struct
463 * iovec directly. MD5Update would do that just fine.
466 if ((count
<= 0) || (iov
[0].iov_len
< smb_wct
)) {
467 return NT_STATUS_INVALID_PARAMETER
;
470 buf
= iov_concat(talloc_tos(), iov
, count
);
472 return NT_STATUS_NO_MEMORY
;
475 cli_calculate_sign_mac(cli
, (char *)buf
, seqnum
);
476 memcpy(iov
[0].iov_base
, buf
, iov
[0].iov_len
);
482 static void cli_smb_sent(struct tevent_req
*subreq
);
484 static NTSTATUS
cli_smb_req_iov_send(struct tevent_req
*req
,
485 struct cli_smb_state
*state
,
486 struct iovec
*iov
, int iov_count
)
488 struct tevent_req
*subreq
;
491 if (!cli_state_is_connected(state
->cli
)) {
492 return NT_STATUS_CONNECTION_DISCONNECTED
;
495 if (iov
[0].iov_len
< smb_wct
) {
496 return NT_STATUS_INVALID_PARAMETER
;
499 if (state
->mid
!= 0) {
500 SSVAL(iov
[0].iov_base
, smb_mid
, state
->mid
);
502 uint16_t mid
= cli_alloc_mid(state
->cli
);
503 SSVAL(iov
[0].iov_base
, smb_mid
, mid
);
506 smb_setlen_nbt((char *)iov
[0].iov_base
, iov_len(iov
, iov_count
) - 4);
508 status
= cli_signv(state
->cli
, iov
, iov_count
, &state
->seqnum
);
510 if (!NT_STATUS_IS_OK(status
)) {
514 if (cli_state_encryption_on(state
->cli
)) {
517 buf
= (char *)iov_concat(talloc_tos(), iov
, iov_count
);
519 return NT_STATUS_NO_MEMORY
;
521 status
= common_encrypt_buffer(state
->cli
->trans_enc_state
,
522 (char *)buf
, &enc_buf
);
524 if (!NT_STATUS_IS_OK(status
)) {
525 DEBUG(0, ("Error in encrypting client message: %s\n",
529 buf
= (char *)talloc_memdup(state
, enc_buf
,
530 smb_len_nbt(enc_buf
)+4);
533 return NT_STATUS_NO_MEMORY
;
535 iov
[0].iov_base
= (void *)buf
;
536 iov
[0].iov_len
= talloc_get_size(buf
);
539 subreq
= writev_send(state
, state
->ev
, state
->cli
->conn
.outgoing
,
540 state
->cli
->conn
.fd
, false, iov
, iov_count
);
541 if (subreq
== NULL
) {
542 return NT_STATUS_NO_MEMORY
;
544 tevent_req_set_callback(subreq
, cli_smb_sent
, req
);
548 NTSTATUS
cli_smb_req_send(struct tevent_req
*req
)
550 struct cli_smb_state
*state
= tevent_req_data(
551 req
, struct cli_smb_state
);
553 if (!tevent_req_is_in_progress(req
)) {
554 return NT_STATUS_INTERNAL_ERROR
;
557 return cli_smb_req_iov_send(req
, state
, state
->iov
, state
->iov_count
);
560 struct tevent_req
*cli_smb_send(TALLOC_CTX
*mem_ctx
,
561 struct tevent_context
*ev
,
562 struct cli_state
*cli
,
564 uint8_t additional_flags
,
565 uint8_t wct
, uint16_t *vwv
,
567 const uint8_t *bytes
)
569 struct tevent_req
*req
;
573 iov
.iov_base
= discard_const_p(void, bytes
);
574 iov
.iov_len
= num_bytes
;
576 req
= cli_smb_req_create(mem_ctx
, ev
, cli
, smb_command
,
577 additional_flags
, wct
, vwv
, 1, &iov
);
581 if (!tevent_req_is_in_progress(req
)) {
582 return tevent_req_post(req
, ev
);
584 status
= cli_smb_req_send(req
);
585 if (!NT_STATUS_IS_OK(status
)) {
586 tevent_req_nterror(req
, status
);
587 return tevent_req_post(req
, ev
);
592 static void cli_smb_sent(struct tevent_req
*subreq
)
594 struct tevent_req
*req
= tevent_req_callback_data(
595 subreq
, struct tevent_req
);
596 struct cli_smb_state
*state
= tevent_req_data(
597 req
, struct cli_smb_state
);
601 nwritten
= writev_recv(subreq
, &err
);
603 if (nwritten
== -1) {
604 NTSTATUS status
= map_nt_error_from_unix_common(err
);
605 cli_state_notify_pending(state
->cli
, status
);
609 if (state
->one_way
) {
611 tevent_req_done(req
);
615 if (!cli_smb_req_set_pending(req
)) {
616 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
621 static void cli_smb_received(struct tevent_req
*subreq
)
623 struct cli_state
*cli
= tevent_req_callback_data(
624 subreq
, struct cli_state
);
625 TALLOC_CTX
*frame
= talloc_stackframe();
631 if (subreq
!= cli
->conn
.read_smb_req
) {
632 DEBUG(1, ("Internal error: cli_smb_received called with "
633 "unexpected subreq\n"));
634 status
= NT_STATUS_INTERNAL_ERROR
;
635 cli_state_notify_pending(cli
, status
);
640 received
= read_smb_recv(subreq
, frame
, &inbuf
, &err
);
642 cli
->conn
.read_smb_req
= NULL
;
643 if (received
== -1) {
644 status
= map_nt_error_from_unix_common(err
);
645 cli_state_notify_pending(cli
, status
);
650 status
= cli
->conn
.dispatch_incoming(cli
, frame
, inbuf
);
652 if (NT_STATUS_IS_OK(status
)) {
654 * We should not do any more processing
655 * as the dispatch function called
659 } else if (!NT_STATUS_EQUAL(status
, NT_STATUS_RETRY
)) {
661 * We got an error, so notify all pending requests
663 cli_state_notify_pending(cli
, status
);
668 * We got NT_STATUS_RETRY, so we may ask for a
671 if (!cli_state_receive_next(cli
)) {
672 cli_state_notify_pending(cli
, NT_STATUS_NO_MEMORY
);
676 static NTSTATUS
cli_state_dispatch_smb1(struct cli_state
*cli
,
680 struct tevent_req
*req
;
681 struct cli_smb_state
*state
;
687 const uint8_t *inhdr
= inbuf
+ NBT_HDR_SIZE
;
689 if ((IVAL(inhdr
, 0) != SMB_MAGIC
) /* 0xFF"SMB" */
690 && (SVAL(inhdr
, 0) != 0x45ff)) /* 0xFF"E" */ {
691 DEBUG(10, ("Got non-SMB PDU\n"));
692 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
695 if (cli_state_encryption_on(cli
) && (CVAL(inbuf
, 0) == 0)) {
696 uint16_t enc_ctx_num
;
698 status
= get_enc_ctx_num(inbuf
, &enc_ctx_num
);
699 if (!NT_STATUS_IS_OK(status
)) {
700 DEBUG(10, ("get_enc_ctx_num returned %s\n",
705 if (enc_ctx_num
!= cli
->trans_enc_state
->enc_ctx_num
) {
706 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
708 cli
->trans_enc_state
->enc_ctx_num
));
709 return NT_STATUS_INVALID_HANDLE
;
712 status
= common_decrypt_buffer(cli
->trans_enc_state
,
714 if (!NT_STATUS_IS_OK(status
)) {
715 DEBUG(10, ("common_decrypt_buffer returned %s\n",
721 mid
= SVAL(inhdr
, HDR_MID
);
722 num_pending
= talloc_array_length(cli
->conn
.pending
);
724 for (i
=0; i
<num_pending
; i
++) {
725 if (mid
== cli_smb_req_mid(cli
->conn
.pending
[i
])) {
729 if (i
== num_pending
) {
730 /* Dump unexpected reply */
731 return NT_STATUS_RETRY
;
734 oplock_break
= false;
738 * Paranoia checks that this is really an oplock break request.
740 oplock_break
= (smb_len_nbt(inbuf
) == 51); /* hdr + 8 words */
741 oplock_break
&= ((CVAL(inhdr
, HDR_FLG
) & FLAG_REPLY
) == 0);
742 oplock_break
&= (CVAL(inhdr
, HDR_COM
) == SMBlockingX
);
743 oplock_break
&= (SVAL(inhdr
, HDR_VWV
+VWV(6)) == 0);
744 oplock_break
&= (SVAL(inhdr
, HDR_VWV
+VWV(7)) == 0);
747 /* Dump unexpected reply */
748 return NT_STATUS_RETRY
;
752 req
= cli
->conn
.pending
[i
];
753 state
= tevent_req_data(req
, struct cli_smb_state
);
755 if (!oplock_break
/* oplock breaks are not signed */
756 && !cli_check_sign_mac(cli
, (char *)inbuf
, state
->seqnum
+1)) {
757 DEBUG(10, ("cli_check_sign_mac failed\n"));
758 return NT_STATUS_ACCESS_DENIED
;
761 if (state
->chained_requests
!= NULL
) {
762 struct tevent_req
**chain
= talloc_move(frame
,
763 &state
->chained_requests
);
764 int num_chained
= talloc_array_length(chain
);
767 * We steal the inbuf to the chain,
768 * so that it will stay until all
769 * requests of the chain are finished.
771 * Each requests in the chain will
772 * hold a talloc reference to the chain.
773 * This way we do not expose the talloc_reference()
774 * behavior to the callers.
776 talloc_steal(chain
, inbuf
);
778 for (i
=0; i
<num_chained
; i
++) {
779 struct tevent_req
**ref
;
782 state
= tevent_req_data(req
, struct cli_smb_state
);
784 cli_smb_req_unset_pending(req
);
787 * as we finish multiple requests here
788 * we need to defer the callbacks as
789 * they could destroy our current stack state.
791 tevent_req_defer_callback(req
, state
->ev
);
793 ref
= talloc_reference(state
, chain
);
794 if (tevent_req_nomem(ref
, req
)) {
798 state
->inbuf
= inbuf
;
799 state
->chain_num
= i
;
800 state
->chain_length
= num_chained
;
802 tevent_req_done(req
);
805 return NT_STATUS_RETRY
;
808 cli_smb_req_unset_pending(req
);
810 state
->inbuf
= talloc_move(state
, &inbuf
);
811 state
->chain_num
= 0;
812 state
->chain_length
= 1;
814 if (talloc_array_length(cli
->conn
.pending
) == 0) {
815 tevent_req_done(req
);
819 tevent_req_defer_callback(req
, state
->ev
);
820 tevent_req_done(req
);
821 return NT_STATUS_RETRY
;
824 NTSTATUS
cli_smb_recv(struct tevent_req
*req
,
825 TALLOC_CTX
*mem_ctx
, uint8_t **pinbuf
,
826 uint8_t min_wct
, uint8_t *pwct
, uint16_t **pvwv
,
827 uint32_t *pnum_bytes
, uint8_t **pbytes
)
829 struct cli_smb_state
*state
= tevent_req_data(
830 req
, struct cli_smb_state
);
831 NTSTATUS status
= NT_STATUS_OK
;
834 size_t wct_ofs
, bytes_offset
;
837 if (tevent_req_is_nterror(req
, &status
)) {
841 if (state
->inbuf
== NULL
) {
843 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
860 /* This was a request without a reply */
865 cmd
= CVAL(state
->inbuf
, smb_com
);
867 for (i
=0; i
<state
->chain_num
; i
++) {
868 if (i
< state
->chain_num
-1) {
870 return NT_STATUS_REQUEST_ABORTED
;
872 if (!is_andx_req(cmd
)) {
873 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
877 if (!have_andx_command((char *)state
->inbuf
, wct_ofs
, cmd
)) {
879 * This request was not completed because a previous
880 * request in the chain had received an error.
882 return NT_STATUS_REQUEST_ABORTED
;
885 cmd
= CVAL(state
->inbuf
, wct_ofs
+ 1);
886 wct_ofs
= SVAL(state
->inbuf
, wct_ofs
+ 3);
889 * Skip the all-present length field. No overflow, we've just
890 * put a 16-bit value into a size_t.
894 if (wct_ofs
+2 > talloc_get_size(state
->inbuf
)) {
895 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
899 state
->cli
->raw_status
= cli_pull_raw_error(state
->inbuf
);
900 if (NT_STATUS_IS_DOS(state
->cli
->raw_status
) &&
901 state
->cli
->map_dos_errors
) {
902 uint8_t eclass
= NT_STATUS_DOS_CLASS(state
->cli
->raw_status
);
903 uint16_t ecode
= NT_STATUS_DOS_CODE(state
->cli
->raw_status
);
905 * TODO: is it really a good idea to do a mapping here?
907 * The old cli_pull_error() also does it, so I do not change
910 status
= dos_to_ntstatus(eclass
, ecode
);
912 status
= state
->cli
->raw_status
;
915 if (!have_andx_command((char *)state
->inbuf
, wct_ofs
, cmd
)) {
917 if ((cmd
== SMBsesssetupX
)
919 status
, NT_STATUS_MORE_PROCESSING_REQUIRED
)) {
921 * NT_STATUS_MORE_PROCESSING_REQUIRED is a
922 * valid return code for session setup
927 if (NT_STATUS_IS_ERR(status
)) {
929 * The last command takes the error code. All
930 * further commands down the requested chain
931 * will get a NT_STATUS_REQUEST_ABORTED.
937 * Only the last request in the chain get the returned
940 status
= NT_STATUS_OK
;
945 wct
= CVAL(state
->inbuf
, wct_ofs
);
946 bytes_offset
= wct_ofs
+ 1 + wct
* sizeof(uint16_t);
947 num_bytes
= SVAL(state
->inbuf
, bytes_offset
);
950 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
954 * wct_ofs is a 16-bit value plus 4, wct is a 8-bit value, num_bytes
955 * is a 16-bit value. So bytes_offset being size_t should be far from
958 if ((bytes_offset
+ 2 > talloc_get_size(state
->inbuf
))
959 || (bytes_offset
> 0xffff)) {
960 return NT_STATUS_INVALID_NETWORK_RESPONSE
;
967 *pvwv
= (uint16_t *)(state
->inbuf
+ wct_ofs
+ 1);
969 if (pnum_bytes
!= NULL
) {
970 *pnum_bytes
= num_bytes
;
972 if (pbytes
!= NULL
) {
973 *pbytes
= (uint8_t *)state
->inbuf
+ bytes_offset
+ 2;
975 if ((mem_ctx
!= NULL
) && (pinbuf
!= NULL
)) {
976 if (state
->chain_num
== state
->chain_length
-1) {
977 *pinbuf
= talloc_move(mem_ctx
, &state
->inbuf
);
979 *pinbuf
= state
->inbuf
;
986 size_t cli_smb_wct_ofs(struct tevent_req
**reqs
, int num_reqs
)
991 wct_ofs
= smb_wct
- 4;
993 for (i
=0; i
<num_reqs
; i
++) {
994 struct cli_smb_state
*state
;
995 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
996 wct_ofs
+= iov_len(state
->iov
+1, state
->iov_count
-1);
997 wct_ofs
= (wct_ofs
+ 3) & ~3;
1002 NTSTATUS
cli_smb_chain_send(struct tevent_req
**reqs
, int num_reqs
)
1004 struct cli_smb_state
*first_state
= tevent_req_data(
1005 reqs
[0], struct cli_smb_state
);
1006 struct cli_smb_state
*last_state
= tevent_req_data(
1007 reqs
[num_reqs
-1], struct cli_smb_state
);
1008 struct cli_smb_state
*state
;
1010 size_t chain_padding
= 0;
1012 struct iovec
*iov
= NULL
;
1013 struct iovec
*this_iov
;
1017 for (i
=0; i
<num_reqs
; i
++) {
1018 if (!tevent_req_is_in_progress(reqs
[i
])) {
1019 return NT_STATUS_INTERNAL_ERROR
;
1022 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
1023 iovlen
+= state
->iov_count
;
1026 iov
= talloc_array(last_state
, struct iovec
, iovlen
);
1028 return NT_STATUS_NO_MEMORY
;
1031 first_state
->chained_requests
= (struct tevent_req
**)talloc_memdup(
1032 last_state
, reqs
, sizeof(*reqs
) * num_reqs
);
1033 if (first_state
->chained_requests
== NULL
) {
1035 return NT_STATUS_NO_MEMORY
;
1038 wct_offset
= smb_wct
- 4;
1041 for (i
=0; i
<num_reqs
; i
++) {
1042 size_t next_padding
= 0;
1045 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
1047 if (i
< num_reqs
-1) {
1048 if (!is_andx_req(CVAL(state
->header
, smb_com
))
1049 || CVAL(state
->header
, smb_wct
) < 2) {
1051 TALLOC_FREE(first_state
->chained_requests
);
1052 return NT_STATUS_INVALID_PARAMETER
;
1056 wct_offset
+= iov_len(state
->iov
+1, state
->iov_count
-1) + 1;
1057 if ((wct_offset
% 4) != 0) {
1058 next_padding
= 4 - (wct_offset
% 4);
1060 wct_offset
+= next_padding
;
1063 if (i
< num_reqs
-1) {
1064 struct cli_smb_state
*next_state
= tevent_req_data(
1065 reqs
[i
+1], struct cli_smb_state
);
1066 SCVAL(vwv
+0, 0, CVAL(next_state
->header
, smb_com
));
1068 SSVAL(vwv
+1, 0, wct_offset
);
1069 } else if (is_andx_req(CVAL(state
->header
, smb_com
))) {
1070 /* properly end the chain */
1071 SCVAL(vwv
+0, 0, 0xff);
1072 SCVAL(vwv
+0, 1, 0xff);
1077 this_iov
[0] = state
->iov
[0];
1080 * This one is a bit subtle. We have to add
1081 * chain_padding bytes between the requests, and we
1082 * have to also include the wct field of the
1083 * subsequent requests. We use the subsequent header
1084 * for the padding, it contains the wct field in its
1087 this_iov
[0].iov_len
= chain_padding
+1;
1088 this_iov
[0].iov_base
= (void *)&state
->header
[
1089 sizeof(state
->header
) - this_iov
[0].iov_len
];
1090 memset(this_iov
[0].iov_base
, 0, this_iov
[0].iov_len
-1);
1092 memcpy(this_iov
+1, state
->iov
+1,
1093 sizeof(struct iovec
) * (state
->iov_count
-1));
1094 this_iov
+= state
->iov_count
;
1095 chain_padding
= next_padding
;
1098 status
= cli_smb_req_iov_send(reqs
[0], last_state
, iov
, iovlen
);
1099 if (!NT_STATUS_IS_OK(status
)) {
1101 TALLOC_FREE(first_state
->chained_requests
);
1105 for (i
=0; i
< (num_reqs
- 1); i
++) {
1106 state
= tevent_req_data(reqs
[i
], struct cli_smb_state
);
1108 state
->seqnum
= last_state
->seqnum
;
1111 return NT_STATUS_OK
;
1114 bool cli_has_async_calls(struct cli_state
*cli
)
1116 return ((tevent_queue_length(cli
->conn
.outgoing
) != 0)
1117 || (talloc_array_length(cli
->conn
.pending
) != 0));