smbXcli: add smbXcli_session infrastructure
[Samba/gebeck_regimport.git] / libcli / smb / smbXcli_base.c
blob2cb5d449d94ea15dd47f24eb0ca7b50959da121a
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "lib/util/dlinklist.h"
28 #include "../libcli/smb/smb_common.h"
29 #include "../libcli/smb/smb_seal.h"
30 #include "../libcli/smb/smb_signing.h"
31 #include "../libcli/smb/read_smb.h"
32 #include "smbXcli_base.h"
33 #include "librpc/ndr/libndr.h"
35 struct smbXcli_conn;
36 struct smbXcli_req;
37 struct smbXcli_session;
39 struct smbXcli_conn {
40 int fd;
41 struct sockaddr_storage local_ss;
42 struct sockaddr_storage remote_ss;
43 const char *remote_name;
45 struct tevent_queue *outgoing;
46 struct tevent_req **pending;
47 struct tevent_req *read_smb_req;
49 enum protocol_types protocol;
50 bool allow_signing;
51 bool desire_signing;
52 bool mandatory_signing;
55 * The incoming dispatch function should return:
56 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
57 * - NT_STATUS_OK, if no more processing is desired, e.g.
58 * the dispatch function called
59 * tevent_req_done().
60 * - All other return values disconnect the connection.
62 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
63 TALLOC_CTX *tmp_mem,
64 uint8_t *inbuf);
66 struct {
67 struct {
68 uint32_t capabilities;
69 uint32_t max_xmit;
70 } client;
72 struct {
73 uint32_t capabilities;
74 uint32_t max_xmit;
75 uint16_t max_mux;
76 uint16_t security_mode;
77 bool readbraw;
78 bool writebraw;
79 bool lockread;
80 bool writeunlock;
81 uint32_t session_key;
82 struct GUID guid;
83 DATA_BLOB gss_blob;
84 uint8_t challenge[8];
85 const char *workgroup;
86 const char *name;
87 int time_zone;
88 NTTIME system_time;
89 } server;
91 uint32_t capabilities;
92 uint32_t max_xmit;
94 uint16_t mid;
96 struct smb_signing_state *signing;
97 struct smb_trans_enc_state *trans_enc;
98 } smb1;
100 struct {
101 struct {
102 uint16_t security_mode;
103 struct GUID guid;
104 } client;
106 struct {
107 uint32_t capabilities;
108 uint16_t security_mode;
109 struct GUID guid;
110 uint32_t max_trans_size;
111 uint32_t max_read_size;
112 uint32_t max_write_size;
113 NTTIME system_time;
114 NTTIME start_time;
115 DATA_BLOB gss_blob;
116 } server;
118 uint64_t mid;
119 uint16_t cur_credits;
120 uint16_t max_credits;
121 } smb2;
123 struct smbXcli_session *sessions;
126 struct smbXcli_session {
127 struct smbXcli_session *prev, *next;
128 struct smbXcli_conn *conn;
130 struct {
131 uint64_t session_id;
132 uint16_t session_flags;
133 DATA_BLOB signing_key;
134 DATA_BLOB session_key;
135 bool should_sign;
136 } smb2;
139 struct smbXcli_req_state {
140 struct tevent_context *ev;
141 struct smbXcli_conn *conn;
143 uint8_t length_hdr[4];
145 bool one_way;
147 uint8_t *inbuf;
149 struct {
150 /* Space for the header including the wct */
151 uint8_t hdr[HDR_VWV];
154 * For normal requests, smb1cli_req_send chooses a mid.
155 * SecondaryV trans requests need to use the mid of the primary
156 * request, so we need a place to store it.
157 * Assume it is set if != 0.
159 uint16_t mid;
161 uint16_t *vwv;
162 uint8_t bytecount_buf[2];
164 #define MAX_SMB_IOV 5
165 /* length_hdr, hdr, words, byte_count, buffers */
166 struct iovec iov[1 + 3 + MAX_SMB_IOV];
167 int iov_count;
169 uint32_t seqnum;
170 struct tevent_req **chained_requests;
172 uint8_t recv_cmd;
173 NTSTATUS recv_status;
174 /* always an array of 3 talloc elements */
175 struct iovec *recv_iov;
176 } smb1;
178 struct {
179 const uint8_t *fixed;
180 uint16_t fixed_len;
181 const uint8_t *dyn;
182 uint32_t dyn_len;
184 uint8_t hdr[64];
185 uint8_t pad[7]; /* padding space for compounding */
187 /* always an array of 3 talloc elements */
188 struct iovec *recv_iov;
190 uint16_t credit_charge;
191 } smb2;
194 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
197 * NT_STATUS_OK, means we do not notify the callers
199 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
201 while (conn->sessions) {
202 conn->sessions->conn = NULL;
203 DLIST_REMOVE(conn->sessions, conn->sessions);
206 if (conn->smb1.trans_enc) {
207 common_free_encryption_state(&conn->smb1.trans_enc);
210 return 0;
213 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
214 int fd,
215 const char *remote_name,
216 enum smb_signing_setting signing_state,
217 uint32_t smb1_capabilities,
218 struct GUID *client_guid)
220 struct smbXcli_conn *conn = NULL;
221 void *ss = NULL;
222 struct sockaddr *sa = NULL;
223 socklen_t sa_length;
224 int ret;
226 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
227 if (!conn) {
228 return NULL;
231 conn->remote_name = talloc_strdup(conn, remote_name);
232 if (conn->remote_name == NULL) {
233 goto error;
236 conn->fd = fd;
238 ss = (void *)&conn->local_ss;
239 sa = (struct sockaddr *)ss;
240 sa_length = sizeof(conn->local_ss);
241 ret = getsockname(fd, sa, &sa_length);
242 if (ret == -1) {
243 goto error;
245 ss = (void *)&conn->remote_ss;
246 sa = (struct sockaddr *)ss;
247 sa_length = sizeof(conn->remote_ss);
248 ret = getpeername(fd, sa, &sa_length);
249 if (ret == -1) {
250 goto error;
253 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
254 if (conn->outgoing == NULL) {
255 goto error;
257 conn->pending = NULL;
259 conn->protocol = PROTOCOL_NONE;
261 switch (signing_state) {
262 case SMB_SIGNING_OFF:
263 /* never */
264 conn->allow_signing = false;
265 conn->desire_signing = false;
266 conn->mandatory_signing = false;
267 break;
268 case SMB_SIGNING_DEFAULT:
269 case SMB_SIGNING_IF_REQUIRED:
270 /* if the server requires it */
271 conn->allow_signing = true;
272 conn->desire_signing = false;
273 conn->mandatory_signing = false;
274 break;
275 case SMB_SIGNING_REQUIRED:
276 /* always */
277 conn->allow_signing = true;
278 conn->desire_signing = true;
279 conn->mandatory_signing = true;
280 break;
283 conn->smb1.client.capabilities = smb1_capabilities;
284 conn->smb1.client.max_xmit = UINT16_MAX;
286 conn->smb1.capabilities = conn->smb1.client.capabilities;
287 conn->smb1.max_xmit = 1024;
289 conn->smb1.mid = 1;
291 /* initialise signing */
292 conn->smb1.signing = smb_signing_init(conn,
293 conn->allow_signing,
294 conn->desire_signing,
295 conn->mandatory_signing);
296 if (!conn->smb1.signing) {
297 goto error;
300 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
301 if (conn->mandatory_signing) {
302 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
304 if (client_guid) {
305 conn->smb2.client.guid = *client_guid;
308 conn->smb2.cur_credits = 1;
309 conn->smb2.max_credits = 0;
311 talloc_set_destructor(conn, smbXcli_conn_destructor);
312 return conn;
314 error:
315 TALLOC_FREE(conn);
316 return NULL;
319 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
321 if (conn == NULL) {
322 return false;
325 if (conn->fd == -1) {
326 return false;
329 return true;
332 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
334 return conn->protocol;
337 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
339 if (conn->protocol >= PROTOCOL_SMB2_02) {
340 return true;
343 if (conn->smb1.capabilities & CAP_UNICODE) {
344 return true;
347 return false;
350 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
352 set_socket_options(conn->fd, options);
355 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
357 return &conn->local_ss;
360 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
362 return &conn->remote_ss;
365 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
367 return conn->remote_name;
370 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
372 if (conn->protocol >= PROTOCOL_SMB2_02) {
374 * TODO...
376 return 1;
379 return conn->smb1.server.max_mux;
382 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
384 if (conn->protocol >= PROTOCOL_SMB2_02) {
385 return conn->smb2.server.system_time;
388 return conn->smb1.server.system_time;
391 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
393 if (conn->protocol >= PROTOCOL_SMB2_02) {
394 return &conn->smb2.server.gss_blob;
397 return &conn->smb1.server.gss_blob;
400 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
402 if (conn->protocol >= PROTOCOL_SMB2_02) {
403 return &conn->smb2.server.guid;
406 return &conn->smb1.server.guid;
409 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
411 return conn->smb1.capabilities;
414 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
416 return conn->smb1.max_xmit;
419 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
421 return conn->smb1.server.session_key;
424 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
426 return conn->smb1.server.challenge;
429 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
431 return conn->smb1.server.security_mode;
434 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
436 return conn->smb1.server.time_zone;
439 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
440 const DATA_BLOB user_session_key,
441 const DATA_BLOB response)
443 return smb_signing_activate(conn->smb1.signing,
444 user_session_key,
445 response);
448 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
449 const uint8_t *buf, uint32_t seqnum)
451 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
454 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
456 return smb_signing_is_active(conn->smb1.signing);
459 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
460 struct smb_trans_enc_state *es)
462 /* Replace the old state, if any. */
463 if (conn->smb1.trans_enc) {
464 common_free_encryption_state(&conn->smb1.trans_enc);
466 conn->smb1.trans_enc = es;
469 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
471 return common_encryption_on(conn->smb1.trans_enc);
475 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
477 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
478 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
480 if (NT_STATUS_IS_OK(status)) {
481 return NT_STATUS_OK;
484 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
485 return status;
488 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
492 * Is the SMB command able to hold an AND_X successor
493 * @param[in] cmd The SMB command in question
494 * @retval Can we add a chained request after "cmd"?
496 bool smb1cli_is_andx_req(uint8_t cmd)
498 switch (cmd) {
499 case SMBtconX:
500 case SMBlockingX:
501 case SMBopenX:
502 case SMBreadX:
503 case SMBwriteX:
504 case SMBsesssetupX:
505 case SMBulogoffX:
506 case SMBntcreateX:
507 return true;
508 break;
509 default:
510 break;
513 return false;
516 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
518 size_t num_pending = talloc_array_length(conn->pending);
519 uint16_t result;
521 while (true) {
522 size_t i;
524 result = conn->smb1.mid++;
525 if ((result == 0) || (result == 0xffff)) {
526 continue;
529 for (i=0; i<num_pending; i++) {
530 if (result == smb1cli_req_mid(conn->pending[i])) {
531 break;
535 if (i == num_pending) {
536 return result;
541 void smbXcli_req_unset_pending(struct tevent_req *req)
543 struct smbXcli_req_state *state =
544 tevent_req_data(req,
545 struct smbXcli_req_state);
546 struct smbXcli_conn *conn = state->conn;
547 size_t num_pending = talloc_array_length(conn->pending);
548 size_t i;
550 if (state->smb1.mid != 0) {
552 * This is a [nt]trans[2] request which waits
553 * for more than one reply.
555 return;
558 talloc_set_destructor(req, NULL);
560 if (num_pending == 1) {
562 * The pending read_smb tevent_req is a child of
563 * conn->pending. So if nothing is pending anymore, we need to
564 * delete the socket read fde.
566 TALLOC_FREE(conn->pending);
567 conn->read_smb_req = NULL;
568 return;
571 for (i=0; i<num_pending; i++) {
572 if (req == conn->pending[i]) {
573 break;
576 if (i == num_pending) {
578 * Something's seriously broken. Just returning here is the
579 * right thing nevertheless, the point of this routine is to
580 * remove ourselves from conn->pending.
582 return;
586 * Remove ourselves from the conn->pending array
588 for (; i < (num_pending - 1); i++) {
589 conn->pending[i] = conn->pending[i+1];
593 * No NULL check here, we're shrinking by sizeof(void *), and
594 * talloc_realloc just adjusts the size for this.
596 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
597 num_pending - 1);
598 return;
601 static int smbXcli_req_destructor(struct tevent_req *req)
603 struct smbXcli_req_state *state =
604 tevent_req_data(req,
605 struct smbXcli_req_state);
608 * Make sure we really remove it from
609 * the pending array on destruction.
611 state->smb1.mid = 0;
612 smbXcli_req_unset_pending(req);
613 return 0;
616 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
618 bool smbXcli_req_set_pending(struct tevent_req *req)
620 struct smbXcli_req_state *state =
621 tevent_req_data(req,
622 struct smbXcli_req_state);
623 struct smbXcli_conn *conn;
624 struct tevent_req **pending;
625 size_t num_pending;
627 conn = state->conn;
629 if (!smbXcli_conn_is_connected(conn)) {
630 return false;
633 num_pending = talloc_array_length(conn->pending);
635 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
636 num_pending+1);
637 if (pending == NULL) {
638 return false;
640 pending[num_pending] = req;
641 conn->pending = pending;
642 talloc_set_destructor(req, smbXcli_req_destructor);
644 if (!smbXcli_conn_receive_next(conn)) {
646 * the caller should notify the current request
648 * And all other pending requests get notified
649 * by smbXcli_conn_disconnect().
651 smbXcli_req_unset_pending(req);
652 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
653 return false;
656 return true;
659 static void smbXcli_conn_received(struct tevent_req *subreq);
661 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
663 size_t num_pending = talloc_array_length(conn->pending);
664 struct tevent_req *req;
665 struct smbXcli_req_state *state;
667 if (conn->read_smb_req != NULL) {
668 return true;
671 if (num_pending == 0) {
672 if (conn->smb2.mid < UINT64_MAX) {
673 /* no more pending requests, so we are done for now */
674 return true;
678 * If there are no more SMB2 requests possible,
679 * because we are out of message ids,
680 * we need to disconnect.
682 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
683 return true;
686 req = conn->pending[0];
687 state = tevent_req_data(req, struct smbXcli_req_state);
690 * We're the first ones, add the read_smb request that waits for the
691 * answer from the server
693 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
694 if (conn->read_smb_req == NULL) {
695 return false;
697 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
698 return true;
701 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
703 if (conn->fd != -1) {
704 close(conn->fd);
706 conn->fd = -1;
709 * Cancel all pending requests. We do not do a for-loop walking
710 * conn->pending because that array changes in
711 * smbXcli_req_unset_pending.
713 while (talloc_array_length(conn->pending) > 0) {
714 struct tevent_req *req;
715 struct smbXcli_req_state *state;
716 struct tevent_req **chain;
717 size_t num_chained;
718 size_t i;
720 req = conn->pending[0];
721 state = tevent_req_data(req, struct smbXcli_req_state);
723 if (state->smb1.chained_requests == NULL) {
725 * We're dead. No point waiting for trans2
726 * replies.
728 state->smb1.mid = 0;
730 smbXcli_req_unset_pending(req);
732 if (NT_STATUS_IS_OK(status)) {
733 /* do not notify the callers */
734 continue;
738 * we need to defer the callback, because we may notify
739 * more then one caller.
741 tevent_req_defer_callback(req, state->ev);
742 tevent_req_nterror(req, status);
743 continue;
746 chain = talloc_move(conn, &state->smb1.chained_requests);
747 num_chained = talloc_array_length(chain);
749 for (i=0; i<num_chained; i++) {
750 req = chain[i];
751 state = tevent_req_data(req, struct smbXcli_req_state);
754 * We're dead. No point waiting for trans2
755 * replies.
757 state->smb1.mid = 0;
759 smbXcli_req_unset_pending(req);
761 if (NT_STATUS_IS_OK(status)) {
762 /* do not notify the callers */
763 continue;
767 * we need to defer the callback, because we may notify
768 * more then one caller.
770 tevent_req_defer_callback(req, state->ev);
771 tevent_req_nterror(req, status);
773 TALLOC_FREE(chain);
778 * Fetch a smb request's mid. Only valid after the request has been sent by
779 * smb1cli_req_send().
781 uint16_t smb1cli_req_mid(struct tevent_req *req)
783 struct smbXcli_req_state *state =
784 tevent_req_data(req,
785 struct smbXcli_req_state);
787 if (state->smb1.mid != 0) {
788 return state->smb1.mid;
791 return SVAL(state->smb1.hdr, HDR_MID);
794 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
796 struct smbXcli_req_state *state =
797 tevent_req_data(req,
798 struct smbXcli_req_state);
800 state->smb1.mid = mid;
803 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
805 struct smbXcli_req_state *state =
806 tevent_req_data(req,
807 struct smbXcli_req_state);
809 return state->smb1.seqnum;
812 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
814 struct smbXcli_req_state *state =
815 tevent_req_data(req,
816 struct smbXcli_req_state);
818 state->smb1.seqnum = seqnum;
821 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
823 size_t result = 0;
824 int i;
825 for (i=0; i<count; i++) {
826 result += iov[i].iov_len;
828 return result;
831 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
832 const struct iovec *iov,
833 int count)
835 size_t len = smbXcli_iov_len(iov, count);
836 size_t copied;
837 uint8_t *buf;
838 int i;
840 buf = talloc_array(mem_ctx, uint8_t, len);
841 if (buf == NULL) {
842 return NULL;
844 copied = 0;
845 for (i=0; i<count; i++) {
846 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
847 copied += iov[i].iov_len;
849 return buf;
852 static void smb1cli_req_flags(enum protocol_types protocol,
853 uint32_t smb1_capabilities,
854 uint8_t smb_command,
855 uint8_t additional_flags,
856 uint8_t clear_flags,
857 uint8_t *_flags,
858 uint16_t additional_flags2,
859 uint16_t clear_flags2,
860 uint16_t *_flags2)
862 uint8_t flags = 0;
863 uint16_t flags2 = 0;
865 if (protocol >= PROTOCOL_LANMAN1) {
866 flags |= FLAG_CASELESS_PATHNAMES;
867 flags |= FLAG_CANONICAL_PATHNAMES;
870 if (protocol >= PROTOCOL_LANMAN2) {
871 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
872 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
875 if (protocol >= PROTOCOL_NT1) {
876 flags2 |= FLAGS2_IS_LONG_NAME;
878 if (smb1_capabilities & CAP_UNICODE) {
879 flags2 |= FLAGS2_UNICODE_STRINGS;
881 if (smb1_capabilities & CAP_STATUS32) {
882 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
884 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
885 flags2 |= FLAGS2_EXTENDED_SECURITY;
889 flags |= additional_flags;
890 flags &= ~clear_flags;
891 flags2 |= additional_flags2;
892 flags2 &= ~clear_flags2;
894 *_flags = flags;
895 *_flags2 = flags2;
898 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
899 struct tevent_context *ev,
900 struct smbXcli_conn *conn,
901 uint8_t smb_command,
902 uint8_t additional_flags,
903 uint8_t clear_flags,
904 uint16_t additional_flags2,
905 uint16_t clear_flags2,
906 uint32_t timeout_msec,
907 uint32_t pid,
908 uint16_t tid,
909 uint16_t uid,
910 uint8_t wct, uint16_t *vwv,
911 int iov_count,
912 struct iovec *bytes_iov)
914 struct tevent_req *req;
915 struct smbXcli_req_state *state;
916 uint8_t flags = 0;
917 uint16_t flags2 = 0;
919 if (iov_count > MAX_SMB_IOV) {
921 * Should not happen :-)
923 return NULL;
926 req = tevent_req_create(mem_ctx, &state,
927 struct smbXcli_req_state);
928 if (req == NULL) {
929 return NULL;
931 state->ev = ev;
932 state->conn = conn;
934 state->smb1.recv_cmd = 0xFF;
935 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
936 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
937 if (state->smb1.recv_iov == NULL) {
938 TALLOC_FREE(req);
939 return NULL;
942 smb1cli_req_flags(conn->protocol,
943 conn->smb1.capabilities,
944 smb_command,
945 additional_flags,
946 clear_flags,
947 &flags,
948 additional_flags2,
949 clear_flags2,
950 &flags2);
952 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
953 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
954 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
955 SCVAL(state->smb1.hdr, HDR_FLG, flags);
956 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
957 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
958 SSVAL(state->smb1.hdr, HDR_TID, tid);
959 SSVAL(state->smb1.hdr, HDR_PID, pid);
960 SSVAL(state->smb1.hdr, HDR_UID, uid);
961 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
962 SSVAL(state->smb1.hdr, HDR_WCT, wct);
964 state->smb1.vwv = vwv;
966 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
968 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
969 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
970 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
971 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
972 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
973 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
974 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
975 state->smb1.iov[3].iov_len = sizeof(uint16_t);
977 if (iov_count != 0) {
978 memcpy(&state->smb1.iov[4], bytes_iov,
979 iov_count * sizeof(*bytes_iov));
981 state->smb1.iov_count = iov_count + 4;
983 if (timeout_msec > 0) {
984 struct timeval endtime;
986 endtime = timeval_current_ofs_msec(timeout_msec);
987 if (!tevent_req_set_endtime(req, ev, endtime)) {
988 return req;
992 switch (smb_command) {
993 case SMBtranss:
994 case SMBtranss2:
995 case SMBnttranss:
996 case SMBntcancel:
997 state->one_way = true;
998 break;
999 case SMBlockingX:
1000 if ((wct == 8) &&
1001 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
1002 state->one_way = true;
1004 break;
1007 return req;
1010 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
1011 struct iovec *iov, int iov_count,
1012 uint32_t *seqnum)
1014 uint8_t *buf;
1017 * Obvious optimization: Make cli_calculate_sign_mac work with struct
1018 * iovec directly. MD5Update would do that just fine.
1021 if (iov_count < 4) {
1022 return NT_STATUS_INVALID_PARAMETER_MIX;
1024 if (iov[0].iov_len != NBT_HDR_SIZE) {
1025 return NT_STATUS_INVALID_PARAMETER_MIX;
1027 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1028 return NT_STATUS_INVALID_PARAMETER_MIX;
1030 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1031 return NT_STATUS_INVALID_PARAMETER_MIX;
1033 if (iov[3].iov_len != sizeof(uint16_t)) {
1034 return NT_STATUS_INVALID_PARAMETER_MIX;
1037 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1038 if (buf == NULL) {
1039 return NT_STATUS_NO_MEMORY;
1042 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
1043 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1044 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1046 TALLOC_FREE(buf);
1047 return NT_STATUS_OK;
1050 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1051 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1052 TALLOC_CTX *tmp_mem,
1053 uint8_t *inbuf);
1055 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1056 struct smbXcli_req_state *state,
1057 struct iovec *iov, int iov_count)
1059 struct tevent_req *subreq;
1060 NTSTATUS status;
1061 uint16_t mid;
1063 if (!smbXcli_conn_is_connected(state->conn)) {
1064 return NT_STATUS_CONNECTION_DISCONNECTED;
1067 if (state->conn->protocol > PROTOCOL_NT1) {
1068 return NT_STATUS_REVISION_MISMATCH;
1071 if (iov_count < 4) {
1072 return NT_STATUS_INVALID_PARAMETER_MIX;
1074 if (iov[0].iov_len != NBT_HDR_SIZE) {
1075 return NT_STATUS_INVALID_PARAMETER_MIX;
1077 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1078 return NT_STATUS_INVALID_PARAMETER_MIX;
1080 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1081 return NT_STATUS_INVALID_PARAMETER_MIX;
1083 if (iov[3].iov_len != sizeof(uint16_t)) {
1084 return NT_STATUS_INVALID_PARAMETER_MIX;
1087 if (state->smb1.mid != 0) {
1088 mid = state->smb1.mid;
1089 } else {
1090 mid = smb1cli_alloc_mid(state->conn);
1092 SSVAL(iov[1].iov_base, HDR_MID, mid);
1094 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1096 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1097 &state->smb1.seqnum);
1099 if (!NT_STATUS_IS_OK(status)) {
1100 return status;
1104 * If we supported multiple encrytion contexts
1105 * here we'd look up based on tid.
1107 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1108 char *buf, *enc_buf;
1110 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1111 if (buf == NULL) {
1112 return NT_STATUS_NO_MEMORY;
1114 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1115 (char *)buf, &enc_buf);
1116 TALLOC_FREE(buf);
1117 if (!NT_STATUS_IS_OK(status)) {
1118 DEBUG(0, ("Error in encrypting client message: %s\n",
1119 nt_errstr(status)));
1120 return status;
1122 buf = (char *)talloc_memdup(state, enc_buf,
1123 smb_len_nbt(enc_buf)+4);
1124 SAFE_FREE(enc_buf);
1125 if (buf == NULL) {
1126 return NT_STATUS_NO_MEMORY;
1128 iov[0].iov_base = (void *)buf;
1129 iov[0].iov_len = talloc_get_size(buf);
1130 iov_count = 1;
1133 if (state->conn->dispatch_incoming == NULL) {
1134 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1137 subreq = writev_send(state, state->ev, state->conn->outgoing,
1138 state->conn->fd, false, iov, iov_count);
1139 if (subreq == NULL) {
1140 return NT_STATUS_NO_MEMORY;
1142 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1143 return NT_STATUS_OK;
1146 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1147 struct tevent_context *ev,
1148 struct smbXcli_conn *conn,
1149 uint8_t smb_command,
1150 uint8_t additional_flags,
1151 uint8_t clear_flags,
1152 uint16_t additional_flags2,
1153 uint16_t clear_flags2,
1154 uint32_t timeout_msec,
1155 uint32_t pid,
1156 uint16_t tid,
1157 uint16_t uid,
1158 uint8_t wct, uint16_t *vwv,
1159 uint32_t num_bytes,
1160 const uint8_t *bytes)
1162 struct tevent_req *req;
1163 struct iovec iov;
1164 NTSTATUS status;
1166 iov.iov_base = discard_const_p(void, bytes);
1167 iov.iov_len = num_bytes;
1169 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1170 additional_flags, clear_flags,
1171 additional_flags2, clear_flags2,
1172 timeout_msec,
1173 pid, tid, uid,
1174 wct, vwv, 1, &iov);
1175 if (req == NULL) {
1176 return NULL;
1178 if (!tevent_req_is_in_progress(req)) {
1179 return tevent_req_post(req, ev);
1181 status = smb1cli_req_chain_submit(&req, 1);
1182 if (tevent_req_nterror(req, status)) {
1183 return tevent_req_post(req, ev);
1185 return req;
1188 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1190 struct tevent_req *req =
1191 tevent_req_callback_data(subreq,
1192 struct tevent_req);
1193 struct smbXcli_req_state *state =
1194 tevent_req_data(req,
1195 struct smbXcli_req_state);
1196 ssize_t nwritten;
1197 int err;
1199 nwritten = writev_recv(subreq, &err);
1200 TALLOC_FREE(subreq);
1201 if (nwritten == -1) {
1202 NTSTATUS status = map_nt_error_from_unix_common(err);
1203 smbXcli_conn_disconnect(state->conn, status);
1204 return;
1207 if (state->one_way) {
1208 state->inbuf = NULL;
1209 tevent_req_done(req);
1210 return;
1213 if (!smbXcli_req_set_pending(req)) {
1214 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1215 return;
1219 static void smbXcli_conn_received(struct tevent_req *subreq)
1221 struct smbXcli_conn *conn =
1222 tevent_req_callback_data(subreq,
1223 struct smbXcli_conn);
1224 TALLOC_CTX *frame = talloc_stackframe();
1225 NTSTATUS status;
1226 uint8_t *inbuf;
1227 ssize_t received;
1228 int err;
1230 if (subreq != conn->read_smb_req) {
1231 DEBUG(1, ("Internal error: cli_smb_received called with "
1232 "unexpected subreq\n"));
1233 status = NT_STATUS_INTERNAL_ERROR;
1234 smbXcli_conn_disconnect(conn, status);
1235 TALLOC_FREE(frame);
1236 return;
1238 conn->read_smb_req = NULL;
1240 received = read_smb_recv(subreq, frame, &inbuf, &err);
1241 TALLOC_FREE(subreq);
1242 if (received == -1) {
1243 status = map_nt_error_from_unix_common(err);
1244 smbXcli_conn_disconnect(conn, status);
1245 TALLOC_FREE(frame);
1246 return;
1249 status = conn->dispatch_incoming(conn, frame, inbuf);
1250 TALLOC_FREE(frame);
1251 if (NT_STATUS_IS_OK(status)) {
1253 * We should not do any more processing
1254 * as the dispatch function called
1255 * tevent_req_done().
1257 return;
1258 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1260 * We got an error, so notify all pending requests
1262 smbXcli_conn_disconnect(conn, status);
1263 return;
1267 * We got NT_STATUS_RETRY, so we may ask for a
1268 * next incoming pdu.
1270 if (!smbXcli_conn_receive_next(conn)) {
1271 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1275 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1276 struct iovec **piov, int *pnum_iov)
1278 struct iovec *iov;
1279 int num_iov;
1280 size_t buflen;
1281 size_t taken;
1282 size_t remaining;
1283 uint8_t *hdr;
1284 uint8_t cmd;
1285 uint32_t wct_ofs;
1287 buflen = smb_len_nbt(buf);
1288 taken = 0;
1290 hdr = buf + NBT_HDR_SIZE;
1292 if (buflen < MIN_SMB_SIZE) {
1293 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1297 * This returns iovec elements in the following order:
1299 * - SMB header
1301 * - Parameter Block
1302 * - Data Block
1304 * - Parameter Block
1305 * - Data Block
1307 * - Parameter Block
1308 * - Data Block
1310 num_iov = 1;
1312 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1313 if (iov == NULL) {
1314 return NT_STATUS_NO_MEMORY;
1316 iov[0].iov_base = hdr;
1317 iov[0].iov_len = HDR_WCT;
1318 taken += HDR_WCT;
1320 cmd = CVAL(hdr, HDR_COM);
1321 wct_ofs = HDR_WCT;
1323 while (true) {
1324 size_t len = buflen - taken;
1325 struct iovec *cur;
1326 struct iovec *iov_tmp;
1327 uint8_t wct;
1328 uint32_t bcc_ofs;
1329 uint16_t bcc;
1330 size_t needed;
1333 * we need at least WCT and BCC
1335 needed = sizeof(uint8_t) + sizeof(uint16_t);
1336 if (len < needed) {
1337 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1338 __location__, (int)len, (int)needed));
1339 goto inval;
1343 * Now we check if the specified words are there
1345 wct = CVAL(hdr, wct_ofs);
1346 needed += wct * sizeof(uint16_t);
1347 if (len < needed) {
1348 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1349 __location__, (int)len, (int)needed));
1350 goto inval;
1354 * Now we check if the specified bytes are there
1356 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1357 bcc = SVAL(hdr, bcc_ofs);
1358 needed += bcc * sizeof(uint8_t);
1359 if (len < needed) {
1360 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1361 __location__, (int)len, (int)needed));
1362 goto inval;
1366 * we allocate 2 iovec structures for words and bytes
1368 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1369 num_iov + 2);
1370 if (iov_tmp == NULL) {
1371 TALLOC_FREE(iov);
1372 return NT_STATUS_NO_MEMORY;
1374 iov = iov_tmp;
1375 cur = &iov[num_iov];
1376 num_iov += 2;
1378 cur[0].iov_len = wct * sizeof(uint16_t);
1379 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1380 cur[1].iov_len = bcc * sizeof(uint8_t);
1381 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1383 taken += needed;
1385 if (!smb1cli_is_andx_req(cmd)) {
1387 * If the current command does not have AndX chanining
1388 * we are done.
1390 break;
1393 if (wct == 0 && bcc == 0) {
1395 * An empty response also ends the chain,
1396 * most likely with an error.
1398 break;
1401 if (wct < 2) {
1402 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1403 __location__, (int)wct, (int)cmd));
1404 goto inval;
1406 cmd = CVAL(cur[0].iov_base, 0);
1407 if (cmd == 0xFF) {
1409 * If it is the end of the chain we are also done.
1411 break;
1413 wct_ofs = SVAL(cur[0].iov_base, 2);
1415 if (wct_ofs < taken) {
1416 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1418 if (wct_ofs > buflen) {
1419 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1423 * we consumed everything up to the start of the next
1424 * parameter block.
1426 taken = wct_ofs;
1429 remaining = buflen - taken;
1431 if (remaining > 0 && num_iov >= 3) {
1433 * The last DATA block gets the remaining
1434 * bytes, this is needed to support
1435 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1437 iov[num_iov-1].iov_len += remaining;
1440 *piov = iov;
1441 *pnum_iov = num_iov;
1442 return NT_STATUS_OK;
1444 inval:
1445 TALLOC_FREE(iov);
1446 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1449 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1450 TALLOC_CTX *tmp_mem,
1451 uint8_t *inbuf)
1453 struct tevent_req *req;
1454 struct smbXcli_req_state *state;
1455 NTSTATUS status;
1456 size_t num_pending;
1457 size_t i;
1458 uint8_t cmd;
1459 uint16_t mid;
1460 bool oplock_break;
1461 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1462 struct iovec *iov = NULL;
1463 int num_iov = 0;
1464 struct tevent_req **chain = NULL;
1465 size_t num_chained = 0;
1466 size_t num_responses = 0;
1468 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1469 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1470 DEBUG(10, ("Got non-SMB PDU\n"));
1471 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1475 * If we supported multiple encrytion contexts
1476 * here we'd look up based on tid.
1478 if (common_encryption_on(conn->smb1.trans_enc)
1479 && (CVAL(inbuf, 0) == 0)) {
1480 uint16_t enc_ctx_num;
1482 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1483 if (!NT_STATUS_IS_OK(status)) {
1484 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1485 nt_errstr(status)));
1486 return status;
1489 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1490 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1491 enc_ctx_num,
1492 conn->smb1.trans_enc->enc_ctx_num));
1493 return NT_STATUS_INVALID_HANDLE;
1496 status = common_decrypt_buffer(conn->smb1.trans_enc,
1497 (char *)inbuf);
1498 if (!NT_STATUS_IS_OK(status)) {
1499 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1500 nt_errstr(status)));
1501 return status;
1505 mid = SVAL(inhdr, HDR_MID);
1506 num_pending = talloc_array_length(conn->pending);
1508 for (i=0; i<num_pending; i++) {
1509 if (mid == smb1cli_req_mid(conn->pending[i])) {
1510 break;
1513 if (i == num_pending) {
1514 /* Dump unexpected reply */
1515 return NT_STATUS_RETRY;
1518 oplock_break = false;
1520 if (mid == 0xffff) {
1522 * Paranoia checks that this is really an oplock break request.
1524 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1525 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1526 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1527 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1528 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1530 if (!oplock_break) {
1531 /* Dump unexpected reply */
1532 return NT_STATUS_RETRY;
1536 req = conn->pending[i];
1537 state = tevent_req_data(req, struct smbXcli_req_state);
1539 if (!oplock_break /* oplock breaks are not signed */
1540 && !smb_signing_check_pdu(conn->smb1.signing,
1541 inbuf, state->smb1.seqnum+1)) {
1542 DEBUG(10, ("cli_check_sign_mac failed\n"));
1543 return NT_STATUS_ACCESS_DENIED;
1546 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1547 &iov, &num_iov);
1548 if (!NT_STATUS_IS_OK(status)) {
1549 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1550 nt_errstr(status)));
1551 return status;
1554 cmd = CVAL(inhdr, HDR_COM);
1555 status = smb1cli_pull_raw_error(inhdr);
1557 if (state->smb1.chained_requests == NULL) {
1558 if (num_iov != 3) {
1559 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1562 smbXcli_req_unset_pending(req);
1564 state->smb1.recv_cmd = cmd;
1565 state->smb1.recv_status = status;
1566 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1568 state->smb1.recv_iov[0] = iov[0];
1569 state->smb1.recv_iov[1] = iov[1];
1570 state->smb1.recv_iov[2] = iov[2];
1572 if (talloc_array_length(conn->pending) == 0) {
1573 tevent_req_done(req);
1574 return NT_STATUS_OK;
1577 tevent_req_defer_callback(req, state->ev);
1578 tevent_req_done(req);
1579 return NT_STATUS_RETRY;
1582 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1583 num_chained = talloc_array_length(chain);
1584 num_responses = (num_iov - 1)/2;
1586 if (num_responses > num_chained) {
1587 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1590 for (i=0; i<num_chained; i++) {
1591 size_t iov_idx = 1 + (i*2);
1592 struct iovec *cur = &iov[iov_idx];
1593 uint8_t *inbuf_ref;
1595 req = chain[i];
1596 state = tevent_req_data(req, struct smbXcli_req_state);
1598 smbXcli_req_unset_pending(req);
1601 * as we finish multiple requests here
1602 * we need to defer the callbacks as
1603 * they could destroy our current stack state.
1605 tevent_req_defer_callback(req, state->ev);
1607 if (i >= num_responses) {
1608 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1609 continue;
1612 state->smb1.recv_cmd = cmd;
1614 if (i == (num_responses - 1)) {
1616 * The last request in the chain gets the status
1618 state->smb1.recv_status = status;
1619 } else {
1620 cmd = CVAL(cur[0].iov_base, 0);
1621 state->smb1.recv_status = NT_STATUS_OK;
1624 state->inbuf = inbuf;
1627 * Note: here we use talloc_reference() in a way
1628 * that does not expose it to the caller.
1630 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1631 if (tevent_req_nomem(inbuf_ref, req)) {
1632 continue;
1635 /* copy the related buffers */
1636 state->smb1.recv_iov[0] = iov[0];
1637 state->smb1.recv_iov[1] = cur[0];
1638 state->smb1.recv_iov[2] = cur[1];
1640 tevent_req_done(req);
1643 return NT_STATUS_RETRY;
1646 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1647 TALLOC_CTX *mem_ctx,
1648 struct iovec **piov,
1649 uint8_t **phdr,
1650 uint8_t *pwct,
1651 uint16_t **pvwv,
1652 uint32_t *pvwv_offset,
1653 uint32_t *pnum_bytes,
1654 uint8_t **pbytes,
1655 uint32_t *pbytes_offset,
1656 uint8_t **pinbuf,
1657 const struct smb1cli_req_expected_response *expected,
1658 size_t num_expected)
1660 struct smbXcli_req_state *state =
1661 tevent_req_data(req,
1662 struct smbXcli_req_state);
1663 NTSTATUS status = NT_STATUS_OK;
1664 struct iovec *recv_iov = NULL;
1665 uint8_t *hdr = NULL;
1666 uint8_t wct = 0;
1667 uint32_t vwv_offset = 0;
1668 uint16_t *vwv = NULL;
1669 uint32_t num_bytes = 0;
1670 uint32_t bytes_offset = 0;
1671 uint8_t *bytes = NULL;
1672 size_t i;
1673 bool found_status = false;
1674 bool found_size = false;
1676 if (piov != NULL) {
1677 *piov = NULL;
1679 if (phdr != NULL) {
1680 *phdr = 0;
1682 if (pwct != NULL) {
1683 *pwct = 0;
1685 if (pvwv != NULL) {
1686 *pvwv = NULL;
1688 if (pvwv_offset != NULL) {
1689 *pvwv_offset = 0;
1691 if (pnum_bytes != NULL) {
1692 *pnum_bytes = 0;
1694 if (pbytes != NULL) {
1695 *pbytes = NULL;
1697 if (pbytes_offset != NULL) {
1698 *pbytes_offset = 0;
1700 if (pinbuf != NULL) {
1701 *pinbuf = NULL;
1704 if (state->inbuf != NULL) {
1705 recv_iov = state->smb1.recv_iov;
1706 hdr = (uint8_t *)recv_iov[0].iov_base;
1707 wct = recv_iov[1].iov_len/2;
1708 vwv = (uint16_t *)recv_iov[1].iov_base;
1709 vwv_offset = PTR_DIFF(vwv, hdr);
1710 num_bytes = recv_iov[2].iov_len;
1711 bytes = (uint8_t *)recv_iov[2].iov_base;
1712 bytes_offset = PTR_DIFF(bytes, hdr);
1715 if (tevent_req_is_nterror(req, &status)) {
1716 for (i=0; i < num_expected; i++) {
1717 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1718 found_status = true;
1719 break;
1723 if (found_status) {
1724 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1727 return status;
1730 if (num_expected == 0) {
1731 found_status = true;
1732 found_size = true;
1735 status = state->smb1.recv_status;
1737 for (i=0; i < num_expected; i++) {
1738 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1739 continue;
1742 found_status = true;
1743 if (expected[i].wct == 0) {
1744 found_size = true;
1745 break;
1748 if (expected[i].wct == wct) {
1749 found_size = true;
1750 break;
1754 if (!found_status) {
1755 return status;
1758 if (!found_size) {
1759 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1762 if (piov != NULL) {
1763 *piov = talloc_move(mem_ctx, &recv_iov);
1766 if (phdr != NULL) {
1767 *phdr = hdr;
1769 if (pwct != NULL) {
1770 *pwct = wct;
1772 if (pvwv != NULL) {
1773 *pvwv = vwv;
1775 if (pvwv_offset != NULL) {
1776 *pvwv_offset = vwv_offset;
1778 if (pnum_bytes != NULL) {
1779 *pnum_bytes = num_bytes;
1781 if (pbytes != NULL) {
1782 *pbytes = bytes;
1784 if (pbytes_offset != NULL) {
1785 *pbytes_offset = bytes_offset;
1787 if (pinbuf != NULL) {
1788 *pinbuf = state->inbuf;
1791 return status;
1794 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1796 size_t wct_ofs;
1797 int i;
1799 wct_ofs = HDR_WCT;
1801 for (i=0; i<num_reqs; i++) {
1802 struct smbXcli_req_state *state;
1803 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1804 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1805 state->smb1.iov_count-2);
1806 wct_ofs = (wct_ofs + 3) & ~3;
1808 return wct_ofs;
1811 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1813 struct smbXcli_req_state *first_state =
1814 tevent_req_data(reqs[0],
1815 struct smbXcli_req_state);
1816 struct smbXcli_req_state *state;
1817 size_t wct_offset;
1818 size_t chain_padding = 0;
1819 int i, iovlen;
1820 struct iovec *iov = NULL;
1821 struct iovec *this_iov;
1822 NTSTATUS status;
1823 size_t nbt_len;
1825 if (num_reqs == 1) {
1826 return smb1cli_req_writev_submit(reqs[0], first_state,
1827 first_state->smb1.iov,
1828 first_state->smb1.iov_count);
1831 iovlen = 0;
1832 for (i=0; i<num_reqs; i++) {
1833 if (!tevent_req_is_in_progress(reqs[i])) {
1834 return NT_STATUS_INTERNAL_ERROR;
1837 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1839 if (state->smb1.iov_count < 4) {
1840 return NT_STATUS_INVALID_PARAMETER_MIX;
1843 if (i == 0) {
1845 * The NBT and SMB header
1847 iovlen += 2;
1848 } else {
1850 * Chain padding
1852 iovlen += 1;
1856 * words and bytes
1858 iovlen += state->smb1.iov_count - 2;
1861 iov = talloc_zero_array(first_state, struct iovec, iovlen);
1862 if (iov == NULL) {
1863 return NT_STATUS_NO_MEMORY;
1866 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1867 first_state, reqs, sizeof(*reqs) * num_reqs);
1868 if (first_state->smb1.chained_requests == NULL) {
1869 TALLOC_FREE(iov);
1870 return NT_STATUS_NO_MEMORY;
1873 wct_offset = HDR_WCT;
1874 this_iov = iov;
1876 for (i=0; i<num_reqs; i++) {
1877 size_t next_padding = 0;
1878 uint16_t *vwv;
1880 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1882 if (i < num_reqs-1) {
1883 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1884 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1885 TALLOC_FREE(iov);
1886 TALLOC_FREE(first_state->smb1.chained_requests);
1887 return NT_STATUS_INVALID_PARAMETER_MIX;
1891 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1892 state->smb1.iov_count-2) + 1;
1893 if ((wct_offset % 4) != 0) {
1894 next_padding = 4 - (wct_offset % 4);
1896 wct_offset += next_padding;
1897 vwv = state->smb1.vwv;
1899 if (i < num_reqs-1) {
1900 struct smbXcli_req_state *next_state =
1901 tevent_req_data(reqs[i+1],
1902 struct smbXcli_req_state);
1903 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1904 SCVAL(vwv+0, 1, 0);
1905 SSVAL(vwv+1, 0, wct_offset);
1906 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1907 /* properly end the chain */
1908 SCVAL(vwv+0, 0, 0xff);
1909 SCVAL(vwv+0, 1, 0xff);
1910 SSVAL(vwv+1, 0, 0);
1913 if (i == 0) {
1915 * The NBT and SMB header
1917 this_iov[0] = state->smb1.iov[0];
1918 this_iov[1] = state->smb1.iov[1];
1919 this_iov += 2;
1920 } else {
1922 * This one is a bit subtle. We have to add
1923 * chain_padding bytes between the requests, and we
1924 * have to also include the wct field of the
1925 * subsequent requests. We use the subsequent header
1926 * for the padding, it contains the wct field in its
1927 * last byte.
1929 this_iov[0].iov_len = chain_padding+1;
1930 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1931 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1932 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1933 this_iov += 1;
1937 * copy the words and bytes
1939 memcpy(this_iov, state->smb1.iov+2,
1940 sizeof(struct iovec) * (state->smb1.iov_count-2));
1941 this_iov += state->smb1.iov_count - 2;
1942 chain_padding = next_padding;
1945 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1946 if (nbt_len > first_state->conn->smb1.max_xmit) {
1947 TALLOC_FREE(iov);
1948 TALLOC_FREE(first_state->smb1.chained_requests);
1949 return NT_STATUS_INVALID_PARAMETER_MIX;
1952 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
1953 if (!NT_STATUS_IS_OK(status)) {
1954 TALLOC_FREE(iov);
1955 TALLOC_FREE(first_state->smb1.chained_requests);
1956 return status;
1959 return NT_STATUS_OK;
1962 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1964 return ((tevent_queue_length(conn->outgoing) != 0)
1965 || (talloc_array_length(conn->pending) != 0));
1968 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
1970 return conn->smb2.server.capabilities;
1973 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
1975 return conn->smb2.server.security_mode;
1978 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
1980 return conn->smb2.server.max_trans_size;
1983 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
1985 return conn->smb2.server.max_read_size;
1988 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
1990 return conn->smb2.server.max_write_size;
1993 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
1994 uint16_t max_credits)
1996 conn->smb2.max_credits = max_credits;
1999 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
2000 struct tevent_context *ev,
2001 struct smbXcli_conn *conn,
2002 uint16_t cmd,
2003 uint32_t additional_flags,
2004 uint32_t clear_flags,
2005 uint32_t timeout_msec,
2006 uint32_t pid,
2007 uint32_t tid,
2008 uint64_t uid,
2009 const uint8_t *fixed,
2010 uint16_t fixed_len,
2011 const uint8_t *dyn,
2012 uint32_t dyn_len)
2014 struct tevent_req *req;
2015 struct smbXcli_req_state *state;
2016 uint32_t flags = 0;
2018 req = tevent_req_create(mem_ctx, &state,
2019 struct smbXcli_req_state);
2020 if (req == NULL) {
2021 return NULL;
2024 state->ev = ev;
2025 state->conn = conn;
2027 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
2028 if (state->smb2.recv_iov == NULL) {
2029 TALLOC_FREE(req);
2030 return NULL;
2033 flags |= additional_flags;
2034 flags &= ~clear_flags;
2036 state->smb2.fixed = fixed;
2037 state->smb2.fixed_len = fixed_len;
2038 state->smb2.dyn = dyn;
2039 state->smb2.dyn_len = dyn_len;
2041 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2042 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2043 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2044 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2045 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2046 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2047 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2049 switch (cmd) {
2050 case SMB2_OP_CANCEL:
2051 state->one_way = true;
2052 break;
2053 case SMB2_OP_BREAK:
2055 * If this is a dummy request, it will have
2056 * UINT64_MAX as message id.
2057 * If we send on break acknowledgement,
2058 * this gets overwritten later.
2060 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2061 break;
2064 if (timeout_msec > 0) {
2065 struct timeval endtime;
2067 endtime = timeval_current_ofs_msec(timeout_msec);
2068 if (!tevent_req_set_endtime(req, ev, endtime)) {
2069 return req;
2073 return req;
2076 static void smb2cli_writev_done(struct tevent_req *subreq);
2077 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2078 TALLOC_CTX *tmp_mem,
2079 uint8_t *inbuf);
2081 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2082 int num_reqs)
2084 struct smbXcli_req_state *state;
2085 struct tevent_req *subreq;
2086 struct iovec *iov;
2087 int i, num_iov, nbt_len;
2090 * 1 for the nbt length
2091 * per request: HDR, fixed, dyn, padding
2092 * -1 because the last one does not need padding
2095 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2096 if (iov == NULL) {
2097 return NT_STATUS_NO_MEMORY;
2100 num_iov = 1;
2101 nbt_len = 0;
2103 for (i=0; i<num_reqs; i++) {
2104 size_t reqlen;
2105 bool ret;
2106 uint64_t avail;
2107 uint16_t charge;
2108 uint16_t credits;
2109 uint64_t mid;
2111 if (!tevent_req_is_in_progress(reqs[i])) {
2112 return NT_STATUS_INTERNAL_ERROR;
2115 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2117 if (!smbXcli_conn_is_connected(state->conn)) {
2118 return NT_STATUS_CONNECTION_DISCONNECTED;
2121 if ((state->conn->protocol != PROTOCOL_NONE) &&
2122 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2123 return NT_STATUS_REVISION_MISMATCH;
2126 avail = UINT64_MAX - state->conn->smb2.mid;
2127 if (avail < 1) {
2128 return NT_STATUS_CONNECTION_ABORTED;
2131 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2132 charge = (MAX(state->smb2.dyn_len, 1) - 1)/ 65536 + 1;
2133 } else {
2134 charge = 1;
2137 charge = MAX(state->smb2.credit_charge, charge);
2139 avail = MIN(avail, state->conn->smb2.cur_credits);
2140 if (avail < charge) {
2141 return NT_STATUS_INTERNAL_ERROR;
2144 credits = 0;
2145 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
2146 credits = state->conn->smb2.max_credits -
2147 state->conn->smb2.cur_credits;
2149 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
2150 credits += 1;
2153 mid = state->conn->smb2.mid;
2154 state->conn->smb2.mid += charge;
2155 state->conn->smb2.cur_credits -= charge;
2157 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2158 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
2160 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
2161 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2163 iov[num_iov].iov_base = state->smb2.hdr;
2164 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2165 num_iov += 1;
2167 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2168 iov[num_iov].iov_len = state->smb2.fixed_len;
2169 num_iov += 1;
2171 if (state->smb2.dyn != NULL) {
2172 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2173 iov[num_iov].iov_len = state->smb2.dyn_len;
2174 num_iov += 1;
2177 reqlen = sizeof(state->smb2.hdr);
2178 reqlen += state->smb2.fixed_len;
2179 reqlen += state->smb2.dyn_len;
2181 if (i < num_reqs-1) {
2182 if ((reqlen % 8) > 0) {
2183 uint8_t pad = 8 - (reqlen % 8);
2184 iov[num_iov].iov_base = state->smb2.pad;
2185 iov[num_iov].iov_len = pad;
2186 num_iov += 1;
2187 reqlen += pad;
2189 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2191 nbt_len += reqlen;
2193 ret = smbXcli_req_set_pending(reqs[i]);
2194 if (!ret) {
2195 return NT_STATUS_NO_MEMORY;
2200 * TODO: Do signing here
2203 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2204 _smb_setlen_tcp(state->length_hdr, nbt_len);
2205 iov[0].iov_base = state->length_hdr;
2206 iov[0].iov_len = sizeof(state->length_hdr);
2208 if (state->conn->dispatch_incoming == NULL) {
2209 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2212 subreq = writev_send(state, state->ev, state->conn->outgoing,
2213 state->conn->fd, false, iov, num_iov);
2214 if (subreq == NULL) {
2215 return NT_STATUS_NO_MEMORY;
2217 tevent_req_set_callback(subreq, smb2cli_writev_done, reqs[0]);
2218 return NT_STATUS_OK;
2221 void smb2cli_req_set_credit_charge(struct tevent_req *req, uint16_t charge)
2223 struct smbXcli_req_state *state =
2224 tevent_req_data(req,
2225 struct smbXcli_req_state);
2227 state->smb2.credit_charge = charge;
2230 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2231 struct tevent_context *ev,
2232 struct smbXcli_conn *conn,
2233 uint16_t cmd,
2234 uint32_t additional_flags,
2235 uint32_t clear_flags,
2236 uint32_t timeout_msec,
2237 uint32_t pid,
2238 uint32_t tid,
2239 uint64_t uid,
2240 const uint8_t *fixed,
2241 uint16_t fixed_len,
2242 const uint8_t *dyn,
2243 uint32_t dyn_len)
2245 struct tevent_req *req;
2246 NTSTATUS status;
2248 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2249 additional_flags, clear_flags,
2250 timeout_msec,
2251 pid, tid, uid,
2252 fixed, fixed_len, dyn, dyn_len);
2253 if (req == NULL) {
2254 return NULL;
2256 if (!tevent_req_is_in_progress(req)) {
2257 return tevent_req_post(req, ev);
2259 status = smb2cli_req_compound_submit(&req, 1);
2260 if (tevent_req_nterror(req, status)) {
2261 return tevent_req_post(req, ev);
2263 return req;
2266 static void smb2cli_writev_done(struct tevent_req *subreq)
2268 struct tevent_req *req =
2269 tevent_req_callback_data(subreq,
2270 struct tevent_req);
2271 struct smbXcli_req_state *state =
2272 tevent_req_data(req,
2273 struct smbXcli_req_state);
2274 ssize_t nwritten;
2275 int err;
2277 nwritten = writev_recv(subreq, &err);
2278 TALLOC_FREE(subreq);
2279 if (nwritten == -1) {
2280 /* here, we need to notify all pending requests */
2281 NTSTATUS status = map_nt_error_from_unix_common(err);
2282 smbXcli_conn_disconnect(state->conn, status);
2283 return;
2287 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2288 struct iovec **piov, int *pnum_iov)
2290 struct iovec *iov;
2291 int num_iov;
2292 size_t buflen;
2293 size_t taken;
2294 uint8_t *first_hdr;
2296 num_iov = 0;
2298 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2299 if (iov == NULL) {
2300 return NT_STATUS_NO_MEMORY;
2303 buflen = smb_len_tcp(buf);
2304 taken = 0;
2305 first_hdr = buf + NBT_HDR_SIZE;
2307 while (taken < buflen) {
2308 size_t len = buflen - taken;
2309 uint8_t *hdr = first_hdr + taken;
2310 struct iovec *cur;
2311 size_t full_size;
2312 size_t next_command_ofs;
2313 uint16_t body_size;
2314 struct iovec *iov_tmp;
2317 * We need the header plus the body length field
2320 if (len < SMB2_HDR_BODY + 2) {
2321 DEBUG(10, ("%d bytes left, expected at least %d\n",
2322 (int)len, SMB2_HDR_BODY));
2323 goto inval;
2325 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2326 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2327 IVAL(hdr, 0)));
2328 goto inval;
2330 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2331 DEBUG(10, ("Got HDR len %d, expected %d\n",
2332 SVAL(hdr, 4), SMB2_HDR_BODY));
2333 goto inval;
2336 full_size = len;
2337 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2338 body_size = SVAL(hdr, SMB2_HDR_BODY);
2340 if (next_command_ofs != 0) {
2341 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2342 goto inval;
2344 if (next_command_ofs > full_size) {
2345 goto inval;
2347 full_size = next_command_ofs;
2349 if (body_size < 2) {
2350 goto inval;
2352 body_size &= 0xfffe;
2354 if (body_size > (full_size - SMB2_HDR_BODY)) {
2355 goto inval;
2358 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2359 num_iov + 3);
2360 if (iov_tmp == NULL) {
2361 TALLOC_FREE(iov);
2362 return NT_STATUS_NO_MEMORY;
2364 iov = iov_tmp;
2365 cur = &iov[num_iov];
2366 num_iov += 3;
2368 cur[0].iov_base = hdr;
2369 cur[0].iov_len = SMB2_HDR_BODY;
2370 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2371 cur[1].iov_len = body_size;
2372 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2373 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2375 taken += full_size;
2378 *piov = iov;
2379 *pnum_iov = num_iov;
2380 return NT_STATUS_OK;
2382 inval:
2383 TALLOC_FREE(iov);
2384 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2387 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2388 uint64_t mid)
2390 size_t num_pending = talloc_array_length(conn->pending);
2391 size_t i;
2393 for (i=0; i<num_pending; i++) {
2394 struct tevent_req *req = conn->pending[i];
2395 struct smbXcli_req_state *state =
2396 tevent_req_data(req,
2397 struct smbXcli_req_state);
2399 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2400 return req;
2403 return NULL;
2406 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2407 TALLOC_CTX *tmp_mem,
2408 uint8_t *inbuf)
2410 struct tevent_req *req;
2411 struct smbXcli_req_state *state = NULL;
2412 struct iovec *iov;
2413 int i, num_iov;
2414 NTSTATUS status;
2415 bool defer = true;
2417 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2418 &iov, &num_iov);
2419 if (!NT_STATUS_IS_OK(status)) {
2420 return status;
2423 for (i=0; i<num_iov; i+=3) {
2424 uint8_t *inbuf_ref = NULL;
2425 struct iovec *cur = &iov[i];
2426 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2427 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2428 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2429 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2430 uint16_t req_opcode;
2431 uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT);
2432 uint32_t new_credits;
2434 new_credits = conn->smb2.cur_credits;
2435 new_credits += credits;
2436 if (new_credits > UINT16_MAX) {
2437 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2439 conn->smb2.cur_credits += credits;
2441 req = smb2cli_conn_find_pending(conn, mid);
2442 if (req == NULL) {
2443 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2445 state = tevent_req_data(req, struct smbXcli_req_state);
2447 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2448 if (opcode != req_opcode) {
2449 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2452 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2453 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2456 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2457 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2458 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2459 uint32_t req_flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2460 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2462 req_flags |= SMB2_HDR_FLAG_ASYNC;
2463 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2464 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2465 continue;
2468 smbXcli_req_unset_pending(req);
2471 * There might be more than one response
2472 * we need to defer the notifications
2474 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2475 defer = false;
2478 if (defer) {
2479 tevent_req_defer_callback(req, state->ev);
2483 * Note: here we use talloc_reference() in a way
2484 * that does not expose it to the caller.
2486 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2487 if (tevent_req_nomem(inbuf_ref, req)) {
2488 continue;
2491 /* copy the related buffers */
2492 state->smb2.recv_iov[0] = cur[0];
2493 state->smb2.recv_iov[1] = cur[1];
2494 state->smb2.recv_iov[2] = cur[2];
2496 tevent_req_done(req);
2499 if (defer) {
2500 return NT_STATUS_RETRY;
2503 return NT_STATUS_OK;
2506 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2507 struct iovec **piov,
2508 const struct smb2cli_req_expected_response *expected,
2509 size_t num_expected)
2511 struct smbXcli_req_state *state =
2512 tevent_req_data(req,
2513 struct smbXcli_req_state);
2514 NTSTATUS status;
2515 size_t body_size;
2516 bool found_status = false;
2517 bool found_size = false;
2518 size_t i;
2520 if (piov != NULL) {
2521 *piov = NULL;
2524 if (tevent_req_is_nterror(req, &status)) {
2525 for (i=0; i < num_expected; i++) {
2526 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2527 found_status = true;
2528 break;
2532 if (found_status) {
2533 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2536 return status;
2539 if (num_expected == 0) {
2540 found_status = true;
2541 found_size = true;
2544 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2545 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2547 for (i=0; i < num_expected; i++) {
2548 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
2549 continue;
2552 found_status = true;
2553 if (expected[i].body_size == 0) {
2554 found_size = true;
2555 break;
2558 if (expected[i].body_size == body_size) {
2559 found_size = true;
2560 break;
2564 if (!found_status) {
2565 return status;
2568 if (!found_size) {
2569 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2572 if (piov != NULL) {
2573 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
2576 return status;
2579 static const struct {
2580 enum protocol_types proto;
2581 const char *smb1_name;
2582 } smb1cli_prots[] = {
2583 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
2584 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
2585 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
2586 {PROTOCOL_LANMAN1, "LANMAN1.0"},
2587 {PROTOCOL_LANMAN2, "LM1.2X002"},
2588 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
2589 {PROTOCOL_LANMAN2, "LANMAN2.1"},
2590 {PROTOCOL_LANMAN2, "Samba"},
2591 {PROTOCOL_NT1, "NT LANMAN 1.0"},
2592 {PROTOCOL_NT1, "NT LM 0.12"},
2593 {PROTOCOL_SMB2_02, "SMB 2.002"},
2594 {PROTOCOL_SMB2_10, "SMB 2.???"},
2597 static const struct {
2598 enum protocol_types proto;
2599 uint16_t smb2_dialect;
2600 } smb2cli_prots[] = {
2601 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
2602 {PROTOCOL_SMB2_10, SMB2_DIALECT_REVISION_210},
2603 {PROTOCOL_SMB2_22, SMB2_DIALECT_REVISION_222},
2606 struct smbXcli_negprot_state {
2607 struct smbXcli_conn *conn;
2608 struct tevent_context *ev;
2609 uint32_t timeout_msec;
2610 enum protocol_types min_protocol;
2611 enum protocol_types max_protocol;
2613 struct {
2614 uint8_t fixed[36];
2615 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
2616 } smb2;
2619 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
2620 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
2621 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
2622 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
2623 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
2624 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
2625 TALLOC_CTX *frame,
2626 uint8_t *inbuf);
2628 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
2629 struct tevent_context *ev,
2630 struct smbXcli_conn *conn,
2631 uint32_t timeout_msec,
2632 enum protocol_types min_protocol,
2633 enum protocol_types max_protocol)
2635 struct tevent_req *req, *subreq;
2636 struct smbXcli_negprot_state *state;
2638 req = tevent_req_create(mem_ctx, &state,
2639 struct smbXcli_negprot_state);
2640 if (req == NULL) {
2641 return NULL;
2643 state->conn = conn;
2644 state->ev = ev;
2645 state->timeout_msec = timeout_msec;
2646 state->min_protocol = min_protocol;
2647 state->max_protocol = max_protocol;
2649 if (min_protocol == PROTOCOL_NONE) {
2650 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2651 return tevent_req_post(req, ev);
2654 if (max_protocol == PROTOCOL_NONE) {
2655 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2656 return tevent_req_post(req, ev);
2659 if (min_protocol > max_protocol) {
2660 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
2661 return tevent_req_post(req, ev);
2664 if ((min_protocol < PROTOCOL_SMB2_02) &&
2665 (max_protocol < PROTOCOL_SMB2_02)) {
2667 * SMB1 only...
2669 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
2671 subreq = smbXcli_negprot_smb1_subreq(state);
2672 if (tevent_req_nomem(subreq, req)) {
2673 return tevent_req_post(req, ev);
2675 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
2676 return req;
2679 if ((min_protocol >= PROTOCOL_SMB2_02) &&
2680 (max_protocol >= PROTOCOL_SMB2_02)) {
2682 * SMB2 only...
2684 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2686 subreq = smbXcli_negprot_smb2_subreq(state);
2687 if (tevent_req_nomem(subreq, req)) {
2688 return tevent_req_post(req, ev);
2690 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
2691 return req;
2695 * We send an SMB1 negprot with the SMB2 dialects
2696 * and expect a SMB1 or a SMB2 response.
2698 * smbXcli_negprot_dispatch_incoming() will fix the
2699 * callback to match protocol of the response.
2701 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
2703 subreq = smbXcli_negprot_smb1_subreq(state);
2704 if (tevent_req_nomem(subreq, req)) {
2705 return tevent_req_post(req, ev);
2707 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
2708 return req;
2711 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
2713 struct tevent_req *req =
2714 tevent_req_callback_data(subreq,
2715 struct tevent_req);
2716 NTSTATUS status;
2719 * we just want the low level error
2721 status = tevent_req_simple_recv_ntstatus(subreq);
2722 TALLOC_FREE(subreq);
2723 if (tevent_req_nterror(req, status)) {
2724 return;
2727 /* this should never happen */
2728 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2731 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
2733 size_t i;
2734 DATA_BLOB bytes = data_blob_null;
2735 uint8_t flags;
2736 uint16_t flags2;
2738 /* setup the protocol strings */
2739 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2740 uint8_t c = 2;
2741 bool ok;
2743 if (smb1cli_prots[i].proto < state->min_protocol) {
2744 continue;
2747 if (smb1cli_prots[i].proto > state->max_protocol) {
2748 continue;
2751 ok = data_blob_append(state, &bytes, &c, sizeof(c));
2752 if (!ok) {
2753 return NULL;
2757 * We now it is already ascii and
2758 * we want NULL termination.
2760 ok = data_blob_append(state, &bytes,
2761 smb1cli_prots[i].smb1_name,
2762 strlen(smb1cli_prots[i].smb1_name)+1);
2763 if (!ok) {
2764 return NULL;
2768 smb1cli_req_flags(state->max_protocol,
2769 state->conn->smb1.client.capabilities,
2770 SMBnegprot,
2771 0, 0, &flags,
2772 0, 0, &flags2);
2774 return smb1cli_req_send(state, state->ev, state->conn,
2775 SMBnegprot,
2776 flags, ~flags,
2777 flags2, ~flags2,
2778 state->timeout_msec,
2779 0xFFFE, 0, 0, /* pid, tid, uid */
2780 0, NULL, /* wct, vwv */
2781 bytes.length, bytes.data);
2784 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
2786 struct tevent_req *req =
2787 tevent_req_callback_data(subreq,
2788 struct tevent_req);
2789 struct smbXcli_negprot_state *state =
2790 tevent_req_data(req,
2791 struct smbXcli_negprot_state);
2792 struct smbXcli_conn *conn = state->conn;
2793 struct iovec *recv_iov = NULL;
2794 uint8_t *inhdr;
2795 uint8_t wct;
2796 uint16_t *vwv;
2797 uint32_t num_bytes;
2798 uint8_t *bytes;
2799 NTSTATUS status;
2800 uint16_t protnum;
2801 size_t i;
2802 size_t num_prots = 0;
2803 uint8_t flags;
2804 uint32_t client_capabilities = conn->smb1.client.capabilities;
2805 uint32_t both_capabilities;
2806 uint32_t server_capabilities = 0;
2807 uint32_t capabilities;
2808 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
2809 uint32_t server_max_xmit = 0;
2810 uint32_t max_xmit;
2811 uint32_t server_max_mux = 0;
2812 uint16_t server_security_mode = 0;
2813 uint32_t server_session_key = 0;
2814 bool server_readbraw = false;
2815 bool server_writebraw = false;
2816 bool server_lockread = false;
2817 bool server_writeunlock = false;
2818 struct GUID server_guid = GUID_zero();
2819 DATA_BLOB server_gss_blob = data_blob_null;
2820 uint8_t server_challenge[8];
2821 char *server_workgroup = NULL;
2822 char *server_name = NULL;
2823 int server_time_zone = 0;
2824 NTTIME server_system_time = 0;
2825 static const struct smb1cli_req_expected_response expected[] = {
2827 .status = NT_STATUS_OK,
2828 .wct = 0x11, /* NT1 */
2831 .status = NT_STATUS_OK,
2832 .wct = 0x0D, /* LM */
2835 .status = NT_STATUS_OK,
2836 .wct = 0x01, /* CORE */
2840 ZERO_STRUCT(server_challenge);
2842 status = smb1cli_req_recv(subreq, state,
2843 &recv_iov,
2844 &inhdr,
2845 &wct,
2846 &vwv,
2847 NULL, /* pvwv_offset */
2848 &num_bytes,
2849 &bytes,
2850 NULL, /* pbytes_offset */
2851 NULL, /* pinbuf */
2852 expected, ARRAY_SIZE(expected));
2853 TALLOC_FREE(subreq);
2854 if (tevent_req_nterror(req, status)) {
2855 return;
2858 flags = CVAL(inhdr, HDR_FLG);
2860 protnum = SVAL(vwv, 0);
2862 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
2863 if (smb1cli_prots[i].proto < state->min_protocol) {
2864 continue;
2867 if (smb1cli_prots[i].proto > state->max_protocol) {
2868 continue;
2871 if (protnum != num_prots) {
2872 num_prots++;
2873 continue;
2876 conn->protocol = smb1cli_prots[i].proto;
2877 break;
2880 if (conn->protocol == PROTOCOL_NONE) {
2881 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2882 return;
2885 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
2886 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
2887 "and the selected protocol level doesn't support it.\n"));
2888 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
2889 return;
2892 if (flags & FLAG_SUPPORT_LOCKREAD) {
2893 server_lockread = true;
2894 server_writeunlock = true;
2897 if (conn->protocol >= PROTOCOL_NT1) {
2898 const char *client_signing = NULL;
2899 bool server_mandatory = false;
2900 bool server_allowed = false;
2901 const char *server_signing = NULL;
2902 bool ok;
2903 uint8_t key_len;
2905 if (wct != 0x11) {
2906 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2907 return;
2910 /* NT protocol */
2911 server_security_mode = CVAL(vwv + 1, 0);
2912 server_max_mux = SVAL(vwv + 1, 1);
2913 server_max_xmit = IVAL(vwv + 3, 1);
2914 server_session_key = IVAL(vwv + 7, 1);
2915 server_time_zone = SVALS(vwv + 15, 1);
2916 server_time_zone *= 60;
2917 /* this time arrives in real GMT */
2918 server_system_time = BVAL(vwv + 11, 1);
2919 server_capabilities = IVAL(vwv + 9, 1);
2921 key_len = CVAL(vwv + 16, 1);
2923 if (server_capabilities & CAP_RAW_MODE) {
2924 server_readbraw = true;
2925 server_writebraw = true;
2927 if (server_capabilities & CAP_LOCK_AND_READ) {
2928 server_lockread = true;
2931 if (server_capabilities & CAP_EXTENDED_SECURITY) {
2932 DATA_BLOB blob1, blob2;
2934 if (num_bytes < 16) {
2935 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2936 return;
2939 blob1 = data_blob_const(bytes, 16);
2940 status = GUID_from_data_blob(&blob1, &server_guid);
2941 if (tevent_req_nterror(req, status)) {
2942 return;
2945 blob1 = data_blob_const(bytes+16, num_bytes-16);
2946 blob2 = data_blob_dup_talloc(state, blob1);
2947 if (blob1.length > 0 &&
2948 tevent_req_nomem(blob2.data, req)) {
2949 return;
2951 server_gss_blob = blob2;
2952 } else {
2953 DATA_BLOB blob1, blob2;
2955 if (num_bytes < key_len) {
2956 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2957 return;
2960 if (key_len != 0 && key_len != 8) {
2961 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
2962 return;
2965 if (key_len == 8) {
2966 memcpy(server_challenge, bytes, 8);
2969 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
2970 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
2971 if (blob1.length > 0) {
2972 size_t len;
2974 len = utf16_len_n(blob1.data,
2975 blob1.length);
2976 blob1.length = len;
2978 ok = convert_string_talloc(state,
2979 CH_UTF16LE,
2980 CH_UNIX,
2981 blob1.data,
2982 blob1.length,
2983 &server_workgroup,
2984 &len);
2985 if (!ok) {
2986 status = map_nt_error_from_unix_common(errno);
2987 tevent_req_nterror(req, status);
2988 return;
2992 blob2.data += blob1.length;
2993 blob2.length -= blob1.length;
2994 if (blob2.length > 0) {
2995 size_t len;
2997 len = utf16_len_n(blob1.data,
2998 blob1.length);
2999 blob1.length = len;
3001 ok = convert_string_talloc(state,
3002 CH_UTF16LE,
3003 CH_UNIX,
3004 blob2.data,
3005 blob2.length,
3006 &server_name,
3007 &len);
3008 if (!ok) {
3009 status = map_nt_error_from_unix_common(errno);
3010 tevent_req_nterror(req, status);
3011 return;
3016 client_signing = "disabled";
3017 if (conn->allow_signing) {
3018 client_signing = "allowed";
3020 if (conn->mandatory_signing) {
3021 client_signing = "required";
3024 server_signing = "not supported";
3025 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
3026 server_signing = "supported";
3027 server_allowed = true;
3029 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
3030 server_signing = "required";
3031 server_mandatory = true;
3034 ok = smb_signing_set_negotiated(conn->smb1.signing,
3035 server_allowed,
3036 server_mandatory);
3037 if (!ok) {
3038 DEBUG(1,("cli_negprot: SMB signing is required, "
3039 "but client[%s] and server[%s] mismatch\n",
3040 client_signing, server_signing));
3041 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3042 return;
3045 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
3046 DATA_BLOB blob1;
3047 uint8_t key_len;
3048 time_t t;
3050 if (wct != 0x0D) {
3051 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3052 return;
3055 server_security_mode = SVAL(vwv + 1, 0);
3056 server_max_xmit = SVAL(vwv + 2, 0);
3057 server_max_mux = SVAL(vwv + 3, 0);
3058 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
3059 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
3060 server_session_key = IVAL(vwv + 6, 0);
3061 server_time_zone = SVALS(vwv + 10, 0);
3062 server_time_zone *= 60;
3063 /* this time is converted to GMT by make_unix_date */
3064 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
3065 unix_to_nt_time(&server_system_time, t);
3066 key_len = SVAL(vwv + 11, 0);
3068 if (num_bytes < key_len) {
3069 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3070 return;
3073 if (key_len != 0 && key_len != 8) {
3074 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3075 return;
3078 if (key_len == 8) {
3079 memcpy(server_challenge, bytes, 8);
3082 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3083 if (blob1.length > 0) {
3084 size_t len;
3085 bool ok;
3087 len = utf16_len_n(blob1.data,
3088 blob1.length);
3089 blob1.length = len;
3091 ok = convert_string_talloc(state,
3092 CH_DOS,
3093 CH_UNIX,
3094 blob1.data,
3095 blob1.length,
3096 &server_workgroup,
3097 &len);
3098 if (!ok) {
3099 status = map_nt_error_from_unix_common(errno);
3100 tevent_req_nterror(req, status);
3101 return;
3105 } else {
3106 /* the old core protocol */
3107 server_time_zone = get_time_zone(time(NULL));
3108 server_max_xmit = 1024;
3109 server_max_mux = 1;
3112 if (server_max_xmit < 1024) {
3113 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3114 return;
3117 if (server_max_mux < 1) {
3118 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3119 return;
3123 * Now calculate the negotiated capabilities
3124 * based on the mask for:
3125 * - client only flags
3126 * - flags used in both directions
3127 * - server only flags
3129 both_capabilities = client_capabilities & server_capabilities;
3130 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3131 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3132 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3134 max_xmit = MIN(client_max_xmit, server_max_xmit);
3136 conn->smb1.server.capabilities = server_capabilities;
3137 conn->smb1.capabilities = capabilities;
3139 conn->smb1.server.max_xmit = server_max_xmit;
3140 conn->smb1.max_xmit = max_xmit;
3142 conn->smb1.server.max_mux = server_max_mux;
3144 conn->smb1.server.security_mode = server_security_mode;
3146 conn->smb1.server.readbraw = server_readbraw;
3147 conn->smb1.server.writebraw = server_writebraw;
3148 conn->smb1.server.lockread = server_lockread;
3149 conn->smb1.server.writeunlock = server_writeunlock;
3151 conn->smb1.server.session_key = server_session_key;
3153 talloc_steal(conn, server_gss_blob.data);
3154 conn->smb1.server.gss_blob = server_gss_blob;
3155 conn->smb1.server.guid = server_guid;
3156 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3157 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3158 conn->smb1.server.name = talloc_move(conn, &server_name);
3160 conn->smb1.server.time_zone = server_time_zone;
3161 conn->smb1.server.system_time = server_system_time;
3163 tevent_req_done(req);
3166 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3168 size_t i;
3169 uint8_t *buf;
3170 uint16_t dialect_count = 0;
3172 buf = state->smb2.dyn;
3173 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3174 if (smb2cli_prots[i].proto < state->min_protocol) {
3175 continue;
3178 if (smb2cli_prots[i].proto > state->max_protocol) {
3179 continue;
3182 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3183 dialect_count++;
3186 buf = state->smb2.fixed;
3187 SSVAL(buf, 0, 36);
3188 SSVAL(buf, 2, dialect_count);
3189 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3190 SSVAL(buf, 6, 0); /* Reserved */
3191 SSVAL(buf, 8, 0); /* Capabilities */
3192 if (state->max_protocol >= PROTOCOL_SMB2_10) {
3193 NTSTATUS status;
3194 DATA_BLOB blob;
3196 status = GUID_to_ndr_blob(&state->conn->smb2.client.guid,
3197 state, &blob);
3198 if (!NT_STATUS_IS_OK(status)) {
3199 return NULL;
3201 memcpy(buf+12, blob.data, 16); /* ClientGuid */
3202 } else {
3203 memset(buf+12, 0, 16); /* ClientGuid */
3205 SBVAL(buf, 28, 0); /* ClientStartTime */
3207 return smb2cli_req_send(state, state->ev,
3208 state->conn, SMB2_OP_NEGPROT,
3209 0, 0, /* flags */
3210 state->timeout_msec,
3211 0xFEFF, 0, 0, /* pid, tid, uid */
3212 state->smb2.fixed, sizeof(state->smb2.fixed),
3213 state->smb2.dyn, dialect_count*2);
3216 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3218 struct tevent_req *req =
3219 tevent_req_callback_data(subreq,
3220 struct tevent_req);
3221 struct smbXcli_negprot_state *state =
3222 tevent_req_data(req,
3223 struct smbXcli_negprot_state);
3224 struct smbXcli_conn *conn = state->conn;
3225 size_t security_offset, security_length;
3226 DATA_BLOB blob;
3227 NTSTATUS status;
3228 struct iovec *iov;
3229 uint8_t *body;
3230 size_t i;
3231 uint16_t dialect_revision;
3232 static const struct smb2cli_req_expected_response expected[] = {
3234 .status = NT_STATUS_OK,
3235 .body_size = 0x41
3239 status = smb2cli_req_recv(subreq, state, &iov,
3240 expected, ARRAY_SIZE(expected));
3241 TALLOC_FREE(subreq);
3242 if (tevent_req_nterror(req, status)) {
3243 return;
3246 body = (uint8_t *)iov[1].iov_base;
3248 dialect_revision = SVAL(body, 4);
3250 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3251 if (smb2cli_prots[i].proto < state->min_protocol) {
3252 continue;
3255 if (smb2cli_prots[i].proto > state->max_protocol) {
3256 continue;
3259 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3260 continue;
3263 conn->protocol = smb2cli_prots[i].proto;
3264 break;
3267 if (conn->protocol == PROTOCOL_NONE) {
3268 if (state->min_protocol >= PROTOCOL_SMB2_02) {
3269 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3270 return;
3273 if (dialect_revision != SMB2_DIALECT_REVISION_2FF) {
3274 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3275 return;
3278 /* make sure we do not loop forever */
3279 state->min_protocol = PROTOCOL_SMB2_02;
3282 * send a SMB2 negprot, in order to negotiate
3283 * the SMB2 dialect. This needs to use the
3284 * message id 1.
3286 state->conn->smb2.mid = 1;
3287 subreq = smbXcli_negprot_smb2_subreq(state);
3288 if (tevent_req_nomem(subreq, req)) {
3289 return;
3291 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3292 return;
3295 conn->smb2.server.security_mode = SVAL(body, 2);
3297 blob = data_blob_const(body + 8, 16);
3298 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3299 if (tevent_req_nterror(req, status)) {
3300 return;
3303 conn->smb2.server.capabilities = IVAL(body, 24);
3304 conn->smb2.server.max_trans_size= IVAL(body, 28);
3305 conn->smb2.server.max_read_size = IVAL(body, 32);
3306 conn->smb2.server.max_write_size= IVAL(body, 36);
3307 conn->smb2.server.system_time = BVAL(body, 40);
3308 conn->smb2.server.start_time = BVAL(body, 48);
3310 security_offset = SVAL(body, 56);
3311 security_length = SVAL(body, 58);
3313 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3314 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3315 return;
3318 if (security_length > iov[2].iov_len) {
3319 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3320 return;
3323 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3324 iov[2].iov_base,
3325 security_length);
3326 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3327 return;
3330 tevent_req_done(req);
3333 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3334 TALLOC_CTX *tmp_mem,
3335 uint8_t *inbuf)
3337 size_t num_pending = talloc_array_length(conn->pending);
3338 struct tevent_req *subreq;
3339 struct smbXcli_req_state *substate;
3340 struct tevent_req *req;
3341 struct smbXcli_negprot_state *state;
3342 uint32_t protocol_magic = IVAL(inbuf, 4);
3344 if (num_pending != 1) {
3345 return NT_STATUS_INTERNAL_ERROR;
3348 subreq = conn->pending[0];
3349 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3350 req = tevent_req_callback_data(subreq, struct tevent_req);
3351 state = tevent_req_data(req, struct smbXcli_negprot_state);
3353 switch (protocol_magic) {
3354 case SMB_MAGIC:
3355 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3356 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3357 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3359 case SMB2_MAGIC:
3360 if (substate->smb2.recv_iov == NULL) {
3362 * For the SMB1 negprot we have move it.
3364 substate->smb2.recv_iov = substate->smb1.recv_iov;
3365 substate->smb1.recv_iov = NULL;
3368 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3369 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3370 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3373 DEBUG(10, ("Got non-SMB PDU\n"));
3374 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3377 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
3379 return tevent_req_simple_recv_ntstatus(req);
3382 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
3383 uint32_t timeout_msec,
3384 enum protocol_types min_protocol,
3385 enum protocol_types max_protocol)
3387 TALLOC_CTX *frame = talloc_stackframe();
3388 struct tevent_context *ev;
3389 struct tevent_req *req;
3390 NTSTATUS status = NT_STATUS_NO_MEMORY;
3391 bool ok;
3393 if (smbXcli_conn_has_async_calls(conn)) {
3395 * Can't use sync call while an async call is in flight
3397 status = NT_STATUS_INVALID_PARAMETER_MIX;
3398 goto fail;
3400 ev = tevent_context_init(frame);
3401 if (ev == NULL) {
3402 goto fail;
3404 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
3405 min_protocol, max_protocol);
3406 if (req == NULL) {
3407 goto fail;
3409 ok = tevent_req_poll(req, ev);
3410 if (!ok) {
3411 status = map_nt_error_from_unix_common(errno);
3412 goto fail;
3414 status = smbXcli_negprot_recv(req);
3415 fail:
3416 TALLOC_FREE(frame);
3417 return status;
3420 static int smbXcli_session_destructor(struct smbXcli_session *session)
3422 if (session->conn == NULL) {
3423 return 0;
3426 DLIST_REMOVE(session->conn->sessions, session);
3427 return 0;
3430 struct smbXcli_session *smbXcli_session_create(TALLOC_CTX *mem_ctx,
3431 struct smbXcli_conn *conn)
3433 struct smbXcli_session *session;
3435 session = talloc_zero(mem_ctx, struct smbXcli_session);
3436 if (session == NULL) {
3437 return NULL;
3439 talloc_set_destructor(session, smbXcli_session_destructor);
3441 DLIST_ADD_END(conn->sessions, session, struct smbXcli_session *);
3442 session->conn = conn;
3444 return session;
3447 uint8_t smb2cli_session_security_mode(struct smbXcli_session *session)
3449 struct smbXcli_conn *conn = session->conn;
3450 uint8_t security_mode = 0;
3452 if (conn == NULL) {
3453 return security_mode;
3456 security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
3457 if (conn->mandatory_signing) {
3458 security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
3461 return security_mode;
3464 uint64_t smb2cli_session_current_id(struct smbXcli_session *session)
3466 return session->smb2.session_id;
3469 void smb2cli_session_set_id_and_flags(struct smbXcli_session *session,
3470 uint64_t session_id,
3471 uint16_t session_flags)
3473 session->smb2.session_id = session_id;
3474 session->smb2.session_flags = session_flags;
3477 NTSTATUS smb2cli_session_update_session_key(struct smbXcli_session *session,
3478 const DATA_BLOB session_key,
3479 const struct iovec *recv_iov)
3481 struct smbXcli_conn *conn = session->conn;
3482 uint16_t no_sign_flags;
3483 DATA_BLOB signing_key;
3484 NTSTATUS status;
3486 if (conn == NULL) {
3487 return NT_STATUS_INVALID_PARAMETER_MIX;
3490 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
3492 if (session->smb2.session_flags & no_sign_flags) {
3493 session->smb2.should_sign = false;
3494 return NT_STATUS_OK;
3497 if (session->smb2.signing_key.length > 0) {
3498 signing_key = session->smb2.signing_key;
3499 } else {
3500 signing_key = session_key;
3503 status = smb2_signing_check_pdu(signing_key, recv_iov, 3);
3504 if (!NT_STATUS_IS_OK(status)) {
3505 return status;
3508 session->smb2.session_key = data_blob_dup_talloc(session, session_key);
3509 if (session->smb2.session_key.data == NULL) {
3510 return NT_STATUS_NO_MEMORY;
3513 if (session->smb2.signing_key.length > 0) {
3514 return NT_STATUS_OK;
3517 session->smb2.signing_key = data_blob_dup_talloc(session, signing_key);
3518 if (session->smb2.signing_key.data == NULL) {
3519 return NT_STATUS_NO_MEMORY;
3522 session->smb2.should_sign = false;
3524 if (conn->desire_signing) {
3525 session->smb2.should_sign = true;
3528 if (conn->smb2.server.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
3529 session->smb2.should_sign = true;
3532 return NT_STATUS_OK;