s4 dns: Move record lookup to dns_utils.c
[Samba/gebeck_regimport.git] / libcli / smb / smbXcli_base.c
blob6aa30ba4b75f7138b24d07324e7ef17ebf21d219
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "lib/util/dlinklist.h"
28 #include "../libcli/smb/smb_common.h"
29 #include "../libcli/smb/smb_seal.h"
30 #include "../libcli/smb/smb_signing.h"
31 #include "../libcli/smb/read_smb.h"
32 #include "smbXcli_base.h"
33 #include "librpc/ndr/libndr.h"
35 struct smbXcli_conn;
36 struct smbXcli_req;
37 struct smbXcli_session;
39 struct smbXcli_conn {
40 int read_fd;
41 int write_fd;
42 struct sockaddr_storage local_ss;
43 struct sockaddr_storage remote_ss;
44 const char *remote_name;
46 struct tevent_queue *outgoing;
47 struct tevent_req **pending;
48 struct tevent_req *read_smb_req;
50 enum protocol_types protocol;
51 bool allow_signing;
52 bool desire_signing;
53 bool mandatory_signing;
56 * The incoming dispatch function should return:
57 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
58 * - NT_STATUS_OK, if no more processing is desired, e.g.
59 * the dispatch function called
60 * tevent_req_done().
61 * - All other return values disconnect the connection.
63 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
64 TALLOC_CTX *tmp_mem,
65 uint8_t *inbuf);
67 struct {
68 struct {
69 uint32_t capabilities;
70 uint32_t max_xmit;
71 } client;
73 struct {
74 uint32_t capabilities;
75 uint32_t max_xmit;
76 uint16_t max_mux;
77 uint16_t security_mode;
78 bool readbraw;
79 bool writebraw;
80 bool lockread;
81 bool writeunlock;
82 uint32_t session_key;
83 struct GUID guid;
84 DATA_BLOB gss_blob;
85 uint8_t challenge[8];
86 const char *workgroup;
87 const char *name;
88 int time_zone;
89 NTTIME system_time;
90 } server;
92 uint32_t capabilities;
93 uint32_t max_xmit;
95 uint16_t mid;
97 struct smb_signing_state *signing;
98 struct smb_trans_enc_state *trans_enc;
100 struct tevent_req *read_braw_req;
101 } smb1;
103 struct {
104 struct {
105 uint16_t security_mode;
106 struct GUID guid;
107 } client;
109 struct {
110 uint32_t capabilities;
111 uint16_t security_mode;
112 struct GUID guid;
113 uint32_t max_trans_size;
114 uint32_t max_read_size;
115 uint32_t max_write_size;
116 NTTIME system_time;
117 NTTIME start_time;
118 DATA_BLOB gss_blob;
119 } server;
121 uint64_t mid;
122 uint16_t cur_credits;
123 uint16_t max_credits;
124 } smb2;
126 struct smbXcli_session *sessions;
129 struct smbXcli_session {
130 struct smbXcli_session *prev, *next;
131 struct smbXcli_conn *conn;
133 struct {
134 uint64_t session_id;
135 uint16_t session_flags;
136 DATA_BLOB signing_key;
137 DATA_BLOB session_key;
138 bool should_sign;
139 bool channel_setup;
140 } smb2;
143 struct smbXcli_req_state {
144 struct tevent_context *ev;
145 struct smbXcli_conn *conn;
146 struct smbXcli_session *session; /* maybe NULL */
148 uint8_t length_hdr[4];
150 bool one_way;
152 uint8_t *inbuf;
154 struct {
155 /* Space for the header including the wct */
156 uint8_t hdr[HDR_VWV];
159 * For normal requests, smb1cli_req_send chooses a mid.
160 * SecondaryV trans requests need to use the mid of the primary
161 * request, so we need a place to store it.
162 * Assume it is set if != 0.
164 uint16_t mid;
166 uint16_t *vwv;
167 uint8_t bytecount_buf[2];
169 #define MAX_SMB_IOV 10
170 /* length_hdr, hdr, words, byte_count, buffers */
171 struct iovec iov[1 + 3 + MAX_SMB_IOV];
172 int iov_count;
174 bool one_way_seqnum;
175 uint32_t seqnum;
176 struct tevent_req **chained_requests;
178 uint8_t recv_cmd;
179 NTSTATUS recv_status;
180 /* always an array of 3 talloc elements */
181 struct iovec *recv_iov;
182 } smb1;
184 struct {
185 const uint8_t *fixed;
186 uint16_t fixed_len;
187 const uint8_t *dyn;
188 uint32_t dyn_len;
190 uint8_t hdr[64];
191 uint8_t pad[7]; /* padding space for compounding */
193 /* always an array of 3 talloc elements */
194 struct iovec *recv_iov;
196 uint16_t credit_charge;
198 bool signing_skipped;
199 bool notify_async;
200 bool got_async;
201 } smb2;
204 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
207 * NT_STATUS_OK, means we do not notify the callers
209 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
211 while (conn->sessions) {
212 conn->sessions->conn = NULL;
213 DLIST_REMOVE(conn->sessions, conn->sessions);
216 if (conn->smb1.trans_enc) {
217 common_free_encryption_state(&conn->smb1.trans_enc);
220 return 0;
223 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
224 int fd,
225 const char *remote_name,
226 enum smb_signing_setting signing_state,
227 uint32_t smb1_capabilities,
228 struct GUID *client_guid)
230 struct smbXcli_conn *conn = NULL;
231 void *ss = NULL;
232 struct sockaddr *sa = NULL;
233 socklen_t sa_length;
234 int ret;
236 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
237 if (!conn) {
238 return NULL;
241 conn->read_fd = fd;
242 conn->write_fd = dup(fd);
243 if (conn->write_fd == -1) {
244 goto error;
247 conn->remote_name = talloc_strdup(conn, remote_name);
248 if (conn->remote_name == NULL) {
249 goto error;
253 ss = (void *)&conn->local_ss;
254 sa = (struct sockaddr *)ss;
255 sa_length = sizeof(conn->local_ss);
256 ret = getsockname(fd, sa, &sa_length);
257 if (ret == -1) {
258 goto error;
260 ss = (void *)&conn->remote_ss;
261 sa = (struct sockaddr *)ss;
262 sa_length = sizeof(conn->remote_ss);
263 ret = getpeername(fd, sa, &sa_length);
264 if (ret == -1) {
265 goto error;
268 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
269 if (conn->outgoing == NULL) {
270 goto error;
272 conn->pending = NULL;
274 conn->protocol = PROTOCOL_NONE;
276 switch (signing_state) {
277 case SMB_SIGNING_OFF:
278 /* never */
279 conn->allow_signing = false;
280 conn->desire_signing = false;
281 conn->mandatory_signing = false;
282 break;
283 case SMB_SIGNING_DEFAULT:
284 case SMB_SIGNING_IF_REQUIRED:
285 /* if the server requires it */
286 conn->allow_signing = true;
287 conn->desire_signing = false;
288 conn->mandatory_signing = false;
289 break;
290 case SMB_SIGNING_REQUIRED:
291 /* always */
292 conn->allow_signing = true;
293 conn->desire_signing = true;
294 conn->mandatory_signing = true;
295 break;
298 conn->smb1.client.capabilities = smb1_capabilities;
299 conn->smb1.client.max_xmit = UINT16_MAX;
301 conn->smb1.capabilities = conn->smb1.client.capabilities;
302 conn->smb1.max_xmit = 1024;
304 conn->smb1.mid = 1;
306 /* initialise signing */
307 conn->smb1.signing = smb_signing_init(conn,
308 conn->allow_signing,
309 conn->desire_signing,
310 conn->mandatory_signing);
311 if (!conn->smb1.signing) {
312 goto error;
315 conn->smb2.client.security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
316 if (conn->mandatory_signing) {
317 conn->smb2.client.security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
319 if (client_guid) {
320 conn->smb2.client.guid = *client_guid;
323 conn->smb2.cur_credits = 1;
324 conn->smb2.max_credits = 0;
326 talloc_set_destructor(conn, smbXcli_conn_destructor);
327 return conn;
329 error:
330 if (conn->write_fd != -1) {
331 close(conn->write_fd);
333 TALLOC_FREE(conn);
334 return NULL;
337 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
339 if (conn == NULL) {
340 return false;
343 if (conn->read_fd == -1) {
344 return false;
347 return true;
350 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
352 return conn->protocol;
355 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
357 if (conn->protocol >= PROTOCOL_SMB2_02) {
358 return true;
361 if (conn->smb1.capabilities & CAP_UNICODE) {
362 return true;
365 return false;
368 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
370 set_socket_options(conn->read_fd, options);
373 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
375 return &conn->local_ss;
378 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
380 return &conn->remote_ss;
383 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
385 return conn->remote_name;
388 uint16_t smbXcli_conn_max_requests(struct smbXcli_conn *conn)
390 if (conn->protocol >= PROTOCOL_SMB2_02) {
392 * TODO...
394 return 1;
397 return conn->smb1.server.max_mux;
400 NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
402 if (conn->protocol >= PROTOCOL_SMB2_02) {
403 return conn->smb2.server.system_time;
406 return conn->smb1.server.system_time;
409 const DATA_BLOB *smbXcli_conn_server_gss_blob(struct smbXcli_conn *conn)
411 if (conn->protocol >= PROTOCOL_SMB2_02) {
412 return &conn->smb2.server.gss_blob;
415 return &conn->smb1.server.gss_blob;
418 const struct GUID *smbXcli_conn_server_guid(struct smbXcli_conn *conn)
420 if (conn->protocol >= PROTOCOL_SMB2_02) {
421 return &conn->smb2.server.guid;
424 return &conn->smb1.server.guid;
427 uint32_t smb1cli_conn_capabilities(struct smbXcli_conn *conn)
429 return conn->smb1.capabilities;
432 uint32_t smb1cli_conn_max_xmit(struct smbXcli_conn *conn)
434 return conn->smb1.max_xmit;
437 uint32_t smb1cli_conn_server_session_key(struct smbXcli_conn *conn)
439 return conn->smb1.server.session_key;
442 const uint8_t *smb1cli_conn_server_challenge(struct smbXcli_conn *conn)
444 return conn->smb1.server.challenge;
447 uint16_t smb1cli_conn_server_security_mode(struct smbXcli_conn *conn)
449 return conn->smb1.server.security_mode;
452 bool smb1cli_conn_server_readbraw(struct smbXcli_conn *conn)
454 return conn->smb1.server.readbraw;
457 bool smb1cli_conn_server_writebraw(struct smbXcli_conn *conn)
459 return conn->smb1.server.writebraw;
462 bool smb1cli_conn_server_lockread(struct smbXcli_conn *conn)
464 return conn->smb1.server.lockread;
467 bool smb1cli_conn_server_writeunlock(struct smbXcli_conn *conn)
469 return conn->smb1.server.writeunlock;
472 int smb1cli_conn_server_time_zone(struct smbXcli_conn *conn)
474 return conn->smb1.server.time_zone;
477 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
478 const DATA_BLOB user_session_key,
479 const DATA_BLOB response)
481 return smb_signing_activate(conn->smb1.signing,
482 user_session_key,
483 response);
486 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
487 const uint8_t *buf, uint32_t seqnum)
489 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
492 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
494 return smb_signing_is_active(conn->smb1.signing);
497 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
498 struct smb_trans_enc_state *es)
500 /* Replace the old state, if any. */
501 if (conn->smb1.trans_enc) {
502 common_free_encryption_state(&conn->smb1.trans_enc);
504 conn->smb1.trans_enc = es;
507 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
509 return common_encryption_on(conn->smb1.trans_enc);
513 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *hdr)
515 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
516 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
518 if (NT_STATUS_IS_OK(status)) {
519 return NT_STATUS_OK;
522 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
523 return status;
526 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
530 * Is the SMB command able to hold an AND_X successor
531 * @param[in] cmd The SMB command in question
532 * @retval Can we add a chained request after "cmd"?
534 bool smb1cli_is_andx_req(uint8_t cmd)
536 switch (cmd) {
537 case SMBtconX:
538 case SMBlockingX:
539 case SMBopenX:
540 case SMBreadX:
541 case SMBwriteX:
542 case SMBsesssetupX:
543 case SMBulogoffX:
544 case SMBntcreateX:
545 return true;
546 break;
547 default:
548 break;
551 return false;
554 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
556 size_t num_pending = talloc_array_length(conn->pending);
557 uint16_t result;
559 while (true) {
560 size_t i;
562 result = conn->smb1.mid++;
563 if ((result == 0) || (result == 0xffff)) {
564 continue;
567 for (i=0; i<num_pending; i++) {
568 if (result == smb1cli_req_mid(conn->pending[i])) {
569 break;
573 if (i == num_pending) {
574 return result;
579 void smbXcli_req_unset_pending(struct tevent_req *req)
581 struct smbXcli_req_state *state =
582 tevent_req_data(req,
583 struct smbXcli_req_state);
584 struct smbXcli_conn *conn = state->conn;
585 size_t num_pending = talloc_array_length(conn->pending);
586 size_t i;
588 if (state->smb1.mid != 0) {
590 * This is a [nt]trans[2] request which waits
591 * for more than one reply.
593 return;
596 talloc_set_destructor(req, NULL);
598 if (num_pending == 1) {
600 * The pending read_smb tevent_req is a child of
601 * conn->pending. So if nothing is pending anymore, we need to
602 * delete the socket read fde.
604 TALLOC_FREE(conn->pending);
605 conn->read_smb_req = NULL;
606 return;
609 for (i=0; i<num_pending; i++) {
610 if (req == conn->pending[i]) {
611 break;
614 if (i == num_pending) {
616 * Something's seriously broken. Just returning here is the
617 * right thing nevertheless, the point of this routine is to
618 * remove ourselves from conn->pending.
620 return;
624 * Remove ourselves from the conn->pending array
626 for (; i < (num_pending - 1); i++) {
627 conn->pending[i] = conn->pending[i+1];
631 * No NULL check here, we're shrinking by sizeof(void *), and
632 * talloc_realloc just adjusts the size for this.
634 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
635 num_pending - 1);
636 return;
639 static int smbXcli_req_destructor(struct tevent_req *req)
641 struct smbXcli_req_state *state =
642 tevent_req_data(req,
643 struct smbXcli_req_state);
646 * Make sure we really remove it from
647 * the pending array on destruction.
649 state->smb1.mid = 0;
650 smbXcli_req_unset_pending(req);
651 return 0;
654 static bool smb1cli_req_cancel(struct tevent_req *req);
655 static bool smb2cli_req_cancel(struct tevent_req *req);
657 static bool smbXcli_req_cancel(struct tevent_req *req)
659 struct smbXcli_req_state *state =
660 tevent_req_data(req,
661 struct smbXcli_req_state);
663 if (!smbXcli_conn_is_connected(state->conn)) {
664 return false;
667 if (state->conn->protocol == PROTOCOL_NONE) {
668 return false;
671 if (state->conn->protocol >= PROTOCOL_SMB2_02) {
672 return smb2cli_req_cancel(req);
675 return smb1cli_req_cancel(req);
678 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
680 bool smbXcli_req_set_pending(struct tevent_req *req)
682 struct smbXcli_req_state *state =
683 tevent_req_data(req,
684 struct smbXcli_req_state);
685 struct smbXcli_conn *conn;
686 struct tevent_req **pending;
687 size_t num_pending;
689 conn = state->conn;
691 if (!smbXcli_conn_is_connected(conn)) {
692 return false;
695 num_pending = talloc_array_length(conn->pending);
697 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
698 num_pending+1);
699 if (pending == NULL) {
700 return false;
702 pending[num_pending] = req;
703 conn->pending = pending;
704 talloc_set_destructor(req, smbXcli_req_destructor);
705 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
707 if (!smbXcli_conn_receive_next(conn)) {
709 * the caller should notify the current request
711 * And all other pending requests get notified
712 * by smbXcli_conn_disconnect().
714 smbXcli_req_unset_pending(req);
715 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
716 return false;
719 return true;
722 static void smbXcli_conn_received(struct tevent_req *subreq);
724 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
726 size_t num_pending = talloc_array_length(conn->pending);
727 struct tevent_req *req;
728 struct smbXcli_req_state *state;
730 if (conn->read_smb_req != NULL) {
731 return true;
734 if (num_pending == 0) {
735 if (conn->smb2.mid < UINT64_MAX) {
736 /* no more pending requests, so we are done for now */
737 return true;
741 * If there are no more SMB2 requests possible,
742 * because we are out of message ids,
743 * we need to disconnect.
745 smbXcli_conn_disconnect(conn, NT_STATUS_CONNECTION_ABORTED);
746 return true;
749 req = conn->pending[0];
750 state = tevent_req_data(req, struct smbXcli_req_state);
753 * We're the first ones, add the read_smb request that waits for the
754 * answer from the server
756 conn->read_smb_req = read_smb_send(conn->pending,
757 state->ev,
758 conn->read_fd);
759 if (conn->read_smb_req == NULL) {
760 return false;
762 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
763 return true;
766 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
768 tevent_queue_stop(conn->outgoing);
770 if (conn->read_fd != -1) {
771 close(conn->read_fd);
773 if (conn->write_fd != -1) {
774 close(conn->write_fd);
776 conn->read_fd = -1;
777 conn->write_fd = -1;
780 * Cancel all pending requests. We do not do a for-loop walking
781 * conn->pending because that array changes in
782 * smbXcli_req_unset_pending.
784 while (talloc_array_length(conn->pending) > 0) {
785 struct tevent_req *req;
786 struct smbXcli_req_state *state;
787 struct tevent_req **chain;
788 size_t num_chained;
789 size_t i;
791 req = conn->pending[0];
792 state = tevent_req_data(req, struct smbXcli_req_state);
794 if (state->smb1.chained_requests == NULL) {
796 * We're dead. No point waiting for trans2
797 * replies.
799 state->smb1.mid = 0;
801 smbXcli_req_unset_pending(req);
803 if (NT_STATUS_IS_OK(status)) {
804 /* do not notify the callers */
805 continue;
809 * we need to defer the callback, because we may notify
810 * more then one caller.
812 tevent_req_defer_callback(req, state->ev);
813 tevent_req_nterror(req, status);
814 continue;
817 chain = talloc_move(conn, &state->smb1.chained_requests);
818 num_chained = talloc_array_length(chain);
820 for (i=0; i<num_chained; i++) {
821 req = chain[i];
822 state = tevent_req_data(req, struct smbXcli_req_state);
825 * We're dead. No point waiting for trans2
826 * replies.
828 state->smb1.mid = 0;
830 smbXcli_req_unset_pending(req);
832 if (NT_STATUS_IS_OK(status)) {
833 /* do not notify the callers */
834 continue;
838 * we need to defer the callback, because we may notify
839 * more than one caller.
841 tevent_req_defer_callback(req, state->ev);
842 tevent_req_nterror(req, status);
844 TALLOC_FREE(chain);
849 * Fetch a smb request's mid. Only valid after the request has been sent by
850 * smb1cli_req_send().
852 uint16_t smb1cli_req_mid(struct tevent_req *req)
854 struct smbXcli_req_state *state =
855 tevent_req_data(req,
856 struct smbXcli_req_state);
858 if (state->smb1.mid != 0) {
859 return state->smb1.mid;
862 return SVAL(state->smb1.hdr, HDR_MID);
865 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
867 struct smbXcli_req_state *state =
868 tevent_req_data(req,
869 struct smbXcli_req_state);
871 state->smb1.mid = mid;
874 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
876 struct smbXcli_req_state *state =
877 tevent_req_data(req,
878 struct smbXcli_req_state);
880 return state->smb1.seqnum;
883 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
885 struct smbXcli_req_state *state =
886 tevent_req_data(req,
887 struct smbXcli_req_state);
889 state->smb1.seqnum = seqnum;
892 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
894 size_t result = 0;
895 int i;
896 for (i=0; i<count; i++) {
897 result += iov[i].iov_len;
899 return result;
902 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
903 const struct iovec *iov,
904 int count)
906 size_t len = smbXcli_iov_len(iov, count);
907 size_t copied;
908 uint8_t *buf;
909 int i;
911 buf = talloc_array(mem_ctx, uint8_t, len);
912 if (buf == NULL) {
913 return NULL;
915 copied = 0;
916 for (i=0; i<count; i++) {
917 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
918 copied += iov[i].iov_len;
920 return buf;
923 static void smb1cli_req_flags(enum protocol_types protocol,
924 uint32_t smb1_capabilities,
925 uint8_t smb_command,
926 uint8_t additional_flags,
927 uint8_t clear_flags,
928 uint8_t *_flags,
929 uint16_t additional_flags2,
930 uint16_t clear_flags2,
931 uint16_t *_flags2)
933 uint8_t flags = 0;
934 uint16_t flags2 = 0;
936 if (protocol >= PROTOCOL_LANMAN1) {
937 flags |= FLAG_CASELESS_PATHNAMES;
938 flags |= FLAG_CANONICAL_PATHNAMES;
941 if (protocol >= PROTOCOL_LANMAN2) {
942 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
943 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
946 if (protocol >= PROTOCOL_NT1) {
947 flags2 |= FLAGS2_IS_LONG_NAME;
949 if (smb1_capabilities & CAP_UNICODE) {
950 flags2 |= FLAGS2_UNICODE_STRINGS;
952 if (smb1_capabilities & CAP_STATUS32) {
953 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
955 if (smb1_capabilities & CAP_EXTENDED_SECURITY) {
956 flags2 |= FLAGS2_EXTENDED_SECURITY;
960 flags |= additional_flags;
961 flags &= ~clear_flags;
962 flags2 |= additional_flags2;
963 flags2 &= ~clear_flags2;
965 *_flags = flags;
966 *_flags2 = flags2;
969 static void smb1cli_req_cancel_done(struct tevent_req *subreq);
971 static bool smb1cli_req_cancel(struct tevent_req *req)
973 struct smbXcli_req_state *state =
974 tevent_req_data(req,
975 struct smbXcli_req_state);
976 uint8_t flags;
977 uint16_t flags2;
978 uint32_t pid;
979 uint16_t tid;
980 uint16_t uid;
981 uint16_t mid;
982 struct tevent_req *subreq;
983 NTSTATUS status;
985 flags = CVAL(state->smb1.hdr, HDR_FLG);
986 flags2 = SVAL(state->smb1.hdr, HDR_FLG2);
987 pid = SVAL(state->smb1.hdr, HDR_PID);
988 pid |= SVAL(state->smb1.hdr, HDR_PIDHIGH)<<16;
989 tid = SVAL(state->smb1.hdr, HDR_TID);
990 uid = SVAL(state->smb1.hdr, HDR_UID);
991 mid = SVAL(state->smb1.hdr, HDR_MID);
993 subreq = smb1cli_req_create(state, state->ev,
994 state->conn,
995 SMBntcancel,
996 flags, 0,
997 flags2, 0,
998 0, /* timeout */
999 pid, tid, uid,
1000 0, NULL, /* vwv */
1001 0, NULL); /* bytes */
1002 if (subreq == NULL) {
1003 return false;
1005 smb1cli_req_set_mid(subreq, mid);
1007 status = smb1cli_req_chain_submit(&subreq, 1);
1008 if (!NT_STATUS_IS_OK(status)) {
1009 TALLOC_FREE(subreq);
1010 return false;
1012 smb1cli_req_set_mid(subreq, 0);
1014 tevent_req_set_callback(subreq, smb1cli_req_cancel_done, NULL);
1016 return true;
1019 static void smb1cli_req_cancel_done(struct tevent_req *subreq)
1021 /* we do not care about the result */
1022 TALLOC_FREE(subreq);
1025 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
1026 struct tevent_context *ev,
1027 struct smbXcli_conn *conn,
1028 uint8_t smb_command,
1029 uint8_t additional_flags,
1030 uint8_t clear_flags,
1031 uint16_t additional_flags2,
1032 uint16_t clear_flags2,
1033 uint32_t timeout_msec,
1034 uint32_t pid,
1035 uint16_t tid,
1036 uint16_t uid,
1037 uint8_t wct, uint16_t *vwv,
1038 int iov_count,
1039 struct iovec *bytes_iov)
1041 struct tevent_req *req;
1042 struct smbXcli_req_state *state;
1043 uint8_t flags = 0;
1044 uint16_t flags2 = 0;
1046 if (iov_count > MAX_SMB_IOV) {
1048 * Should not happen :-)
1050 return NULL;
1053 req = tevent_req_create(mem_ctx, &state,
1054 struct smbXcli_req_state);
1055 if (req == NULL) {
1056 return NULL;
1058 state->ev = ev;
1059 state->conn = conn;
1061 state->smb1.recv_cmd = 0xFF;
1062 state->smb1.recv_status = NT_STATUS_INTERNAL_ERROR;
1063 state->smb1.recv_iov = talloc_zero_array(state, struct iovec, 3);
1064 if (state->smb1.recv_iov == NULL) {
1065 TALLOC_FREE(req);
1066 return NULL;
1069 smb1cli_req_flags(conn->protocol,
1070 conn->smb1.capabilities,
1071 smb_command,
1072 additional_flags,
1073 clear_flags,
1074 &flags,
1075 additional_flags2,
1076 clear_flags2,
1077 &flags2);
1079 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
1080 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
1081 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
1082 SCVAL(state->smb1.hdr, HDR_FLG, flags);
1083 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
1084 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
1085 SSVAL(state->smb1.hdr, HDR_TID, tid);
1086 SSVAL(state->smb1.hdr, HDR_PID, pid);
1087 SSVAL(state->smb1.hdr, HDR_UID, uid);
1088 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
1089 SCVAL(state->smb1.hdr, HDR_WCT, wct);
1091 state->smb1.vwv = vwv;
1093 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
1095 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
1096 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
1097 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
1098 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
1099 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
1100 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
1101 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
1102 state->smb1.iov[3].iov_len = sizeof(uint16_t);
1104 if (iov_count != 0) {
1105 memcpy(&state->smb1.iov[4], bytes_iov,
1106 iov_count * sizeof(*bytes_iov));
1108 state->smb1.iov_count = iov_count + 4;
1110 if (timeout_msec > 0) {
1111 struct timeval endtime;
1113 endtime = timeval_current_ofs_msec(timeout_msec);
1114 if (!tevent_req_set_endtime(req, ev, endtime)) {
1115 return req;
1119 switch (smb_command) {
1120 case SMBtranss:
1121 case SMBtranss2:
1122 case SMBnttranss:
1123 state->one_way = true;
1124 break;
1125 case SMBntcancel:
1126 state->one_way = true;
1127 state->smb1.one_way_seqnum = true;
1128 break;
1129 case SMBlockingX:
1130 if ((wct == 8) &&
1131 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
1132 state->one_way = true;
1134 break;
1137 return req;
1140 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
1141 struct iovec *iov, int iov_count,
1142 uint32_t *seqnum,
1143 bool one_way_seqnum)
1145 TALLOC_CTX *frame = NULL;
1146 uint8_t *buf;
1149 * Obvious optimization: Make cli_calculate_sign_mac work with struct
1150 * iovec directly. MD5Update would do that just fine.
1153 if (iov_count < 4) {
1154 return NT_STATUS_INVALID_PARAMETER_MIX;
1156 if (iov[0].iov_len != NBT_HDR_SIZE) {
1157 return NT_STATUS_INVALID_PARAMETER_MIX;
1159 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1160 return NT_STATUS_INVALID_PARAMETER_MIX;
1162 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1163 return NT_STATUS_INVALID_PARAMETER_MIX;
1165 if (iov[3].iov_len != sizeof(uint16_t)) {
1166 return NT_STATUS_INVALID_PARAMETER_MIX;
1169 frame = talloc_stackframe();
1171 buf = smbXcli_iov_concat(frame, iov, iov_count);
1172 if (buf == NULL) {
1173 return NT_STATUS_NO_MEMORY;
1176 *seqnum = smb_signing_next_seqnum(conn->smb1.signing,
1177 one_way_seqnum);
1178 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
1179 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
1181 TALLOC_FREE(frame);
1182 return NT_STATUS_OK;
1185 static void smb1cli_req_writev_done(struct tevent_req *subreq);
1186 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1187 TALLOC_CTX *tmp_mem,
1188 uint8_t *inbuf);
1190 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
1191 struct smbXcli_req_state *state,
1192 struct iovec *iov, int iov_count)
1194 struct tevent_req *subreq;
1195 NTSTATUS status;
1196 uint8_t cmd;
1197 uint16_t mid;
1199 if (!smbXcli_conn_is_connected(state->conn)) {
1200 return NT_STATUS_CONNECTION_DISCONNECTED;
1203 if (state->conn->protocol > PROTOCOL_NT1) {
1204 return NT_STATUS_REVISION_MISMATCH;
1207 if (iov_count < 4) {
1208 return NT_STATUS_INVALID_PARAMETER_MIX;
1210 if (iov[0].iov_len != NBT_HDR_SIZE) {
1211 return NT_STATUS_INVALID_PARAMETER_MIX;
1213 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
1214 return NT_STATUS_INVALID_PARAMETER_MIX;
1216 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
1217 return NT_STATUS_INVALID_PARAMETER_MIX;
1219 if (iov[3].iov_len != sizeof(uint16_t)) {
1220 return NT_STATUS_INVALID_PARAMETER_MIX;
1223 cmd = CVAL(iov[1].iov_base, HDR_COM);
1224 if (cmd == SMBreadBraw) {
1225 if (smbXcli_conn_has_async_calls(state->conn)) {
1226 return NT_STATUS_INVALID_PARAMETER_MIX;
1228 state->conn->smb1.read_braw_req = req;
1231 if (state->smb1.mid != 0) {
1232 mid = state->smb1.mid;
1233 } else {
1234 mid = smb1cli_alloc_mid(state->conn);
1236 SSVAL(iov[1].iov_base, HDR_MID, mid);
1238 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
1240 status = smb1cli_conn_signv(state->conn, iov, iov_count,
1241 &state->smb1.seqnum,
1242 state->smb1.one_way_seqnum);
1244 if (!NT_STATUS_IS_OK(status)) {
1245 return status;
1249 * If we supported multiple encrytion contexts
1250 * here we'd look up based on tid.
1252 if (common_encryption_on(state->conn->smb1.trans_enc)) {
1253 char *buf, *enc_buf;
1255 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
1256 if (buf == NULL) {
1257 return NT_STATUS_NO_MEMORY;
1259 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
1260 (char *)buf, &enc_buf);
1261 TALLOC_FREE(buf);
1262 if (!NT_STATUS_IS_OK(status)) {
1263 DEBUG(0, ("Error in encrypting client message: %s\n",
1264 nt_errstr(status)));
1265 return status;
1267 buf = (char *)talloc_memdup(state, enc_buf,
1268 smb_len_nbt(enc_buf)+4);
1269 SAFE_FREE(enc_buf);
1270 if (buf == NULL) {
1271 return NT_STATUS_NO_MEMORY;
1273 iov[0].iov_base = (void *)buf;
1274 iov[0].iov_len = talloc_get_size(buf);
1275 iov_count = 1;
1278 if (state->conn->dispatch_incoming == NULL) {
1279 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
1282 tevent_req_set_cancel_fn(req, smbXcli_req_cancel);
1284 subreq = writev_send(state, state->ev, state->conn->outgoing,
1285 state->conn->write_fd, false, iov, iov_count);
1286 if (subreq == NULL) {
1287 return NT_STATUS_NO_MEMORY;
1289 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
1290 return NT_STATUS_OK;
1293 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
1294 struct tevent_context *ev,
1295 struct smbXcli_conn *conn,
1296 uint8_t smb_command,
1297 uint8_t additional_flags,
1298 uint8_t clear_flags,
1299 uint16_t additional_flags2,
1300 uint16_t clear_flags2,
1301 uint32_t timeout_msec,
1302 uint32_t pid,
1303 uint16_t tid,
1304 uint16_t uid,
1305 uint8_t wct, uint16_t *vwv,
1306 uint32_t num_bytes,
1307 const uint8_t *bytes)
1309 struct tevent_req *req;
1310 struct iovec iov;
1311 NTSTATUS status;
1313 iov.iov_base = discard_const_p(void, bytes);
1314 iov.iov_len = num_bytes;
1316 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
1317 additional_flags, clear_flags,
1318 additional_flags2, clear_flags2,
1319 timeout_msec,
1320 pid, tid, uid,
1321 wct, vwv, 1, &iov);
1322 if (req == NULL) {
1323 return NULL;
1325 if (!tevent_req_is_in_progress(req)) {
1326 return tevent_req_post(req, ev);
1328 status = smb1cli_req_chain_submit(&req, 1);
1329 if (tevent_req_nterror(req, status)) {
1330 return tevent_req_post(req, ev);
1332 return req;
1335 static void smb1cli_req_writev_done(struct tevent_req *subreq)
1337 struct tevent_req *req =
1338 tevent_req_callback_data(subreq,
1339 struct tevent_req);
1340 struct smbXcli_req_state *state =
1341 tevent_req_data(req,
1342 struct smbXcli_req_state);
1343 ssize_t nwritten;
1344 int err;
1346 nwritten = writev_recv(subreq, &err);
1347 TALLOC_FREE(subreq);
1348 if (nwritten == -1) {
1349 NTSTATUS status = map_nt_error_from_unix_common(err);
1350 smbXcli_conn_disconnect(state->conn, status);
1351 return;
1354 if (state->one_way) {
1355 state->inbuf = NULL;
1356 tevent_req_done(req);
1357 return;
1360 if (!smbXcli_req_set_pending(req)) {
1361 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1362 return;
1366 static void smbXcli_conn_received(struct tevent_req *subreq)
1368 struct smbXcli_conn *conn =
1369 tevent_req_callback_data(subreq,
1370 struct smbXcli_conn);
1371 TALLOC_CTX *frame = talloc_stackframe();
1372 NTSTATUS status;
1373 uint8_t *inbuf;
1374 ssize_t received;
1375 int err;
1377 if (subreq != conn->read_smb_req) {
1378 DEBUG(1, ("Internal error: cli_smb_received called with "
1379 "unexpected subreq\n"));
1380 status = NT_STATUS_INTERNAL_ERROR;
1381 smbXcli_conn_disconnect(conn, status);
1382 TALLOC_FREE(frame);
1383 return;
1385 conn->read_smb_req = NULL;
1387 received = read_smb_recv(subreq, frame, &inbuf, &err);
1388 TALLOC_FREE(subreq);
1389 if (received == -1) {
1390 status = map_nt_error_from_unix_common(err);
1391 smbXcli_conn_disconnect(conn, status);
1392 TALLOC_FREE(frame);
1393 return;
1396 status = conn->dispatch_incoming(conn, frame, inbuf);
1397 TALLOC_FREE(frame);
1398 if (NT_STATUS_IS_OK(status)) {
1400 * We should not do any more processing
1401 * as the dispatch function called
1402 * tevent_req_done().
1404 return;
1405 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1407 * We got an error, so notify all pending requests
1409 smbXcli_conn_disconnect(conn, status);
1410 return;
1414 * We got NT_STATUS_RETRY, so we may ask for a
1415 * next incoming pdu.
1417 if (!smbXcli_conn_receive_next(conn)) {
1418 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1422 static NTSTATUS smb1cli_inbuf_parse_chain(uint8_t *buf, TALLOC_CTX *mem_ctx,
1423 struct iovec **piov, int *pnum_iov)
1425 struct iovec *iov;
1426 int num_iov;
1427 size_t buflen;
1428 size_t taken;
1429 size_t remaining;
1430 uint8_t *hdr;
1431 uint8_t cmd;
1432 uint32_t wct_ofs;
1434 buflen = smb_len_nbt(buf);
1435 taken = 0;
1437 hdr = buf + NBT_HDR_SIZE;
1439 if (buflen < MIN_SMB_SIZE) {
1440 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1444 * This returns iovec elements in the following order:
1446 * - SMB header
1448 * - Parameter Block
1449 * - Data Block
1451 * - Parameter Block
1452 * - Data Block
1454 * - Parameter Block
1455 * - Data Block
1457 num_iov = 1;
1459 iov = talloc_array(mem_ctx, struct iovec, num_iov);
1460 if (iov == NULL) {
1461 return NT_STATUS_NO_MEMORY;
1463 iov[0].iov_base = hdr;
1464 iov[0].iov_len = HDR_WCT;
1465 taken += HDR_WCT;
1467 cmd = CVAL(hdr, HDR_COM);
1468 wct_ofs = HDR_WCT;
1470 while (true) {
1471 size_t len = buflen - taken;
1472 struct iovec *cur;
1473 struct iovec *iov_tmp;
1474 uint8_t wct;
1475 uint32_t bcc_ofs;
1476 uint16_t bcc;
1477 size_t needed;
1480 * we need at least WCT and BCC
1482 needed = sizeof(uint8_t) + sizeof(uint16_t);
1483 if (len < needed) {
1484 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1485 __location__, (int)len, (int)needed));
1486 goto inval;
1490 * Now we check if the specified words are there
1492 wct = CVAL(hdr, wct_ofs);
1493 needed += wct * sizeof(uint16_t);
1494 if (len < needed) {
1495 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1496 __location__, (int)len, (int)needed));
1497 goto inval;
1501 * Now we check if the specified bytes are there
1503 bcc_ofs = wct_ofs + sizeof(uint8_t) + wct * sizeof(uint16_t);
1504 bcc = SVAL(hdr, bcc_ofs);
1505 needed += bcc * sizeof(uint8_t);
1506 if (len < needed) {
1507 DEBUG(10, ("%s: %d bytes left, expected at least %d\n",
1508 __location__, (int)len, (int)needed));
1509 goto inval;
1513 * we allocate 2 iovec structures for words and bytes
1515 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
1516 num_iov + 2);
1517 if (iov_tmp == NULL) {
1518 TALLOC_FREE(iov);
1519 return NT_STATUS_NO_MEMORY;
1521 iov = iov_tmp;
1522 cur = &iov[num_iov];
1523 num_iov += 2;
1525 cur[0].iov_len = wct * sizeof(uint16_t);
1526 cur[0].iov_base = hdr + (wct_ofs + sizeof(uint8_t));
1527 cur[1].iov_len = bcc * sizeof(uint8_t);
1528 cur[1].iov_base = hdr + (bcc_ofs + sizeof(uint16_t));
1530 taken += needed;
1532 if (!smb1cli_is_andx_req(cmd)) {
1534 * If the current command does not have AndX chanining
1535 * we are done.
1537 break;
1540 if (wct == 0 && bcc == 0) {
1542 * An empty response also ends the chain,
1543 * most likely with an error.
1545 break;
1548 if (wct < 2) {
1549 DEBUG(10, ("%s: wct[%d] < 2 for cmd[0x%02X]\n",
1550 __location__, (int)wct, (int)cmd));
1551 goto inval;
1553 cmd = CVAL(cur[0].iov_base, 0);
1554 if (cmd == 0xFF) {
1556 * If it is the end of the chain we are also done.
1558 break;
1560 wct_ofs = SVAL(cur[0].iov_base, 2);
1562 if (wct_ofs < taken) {
1563 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1565 if (wct_ofs > buflen) {
1566 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1570 * we consumed everything up to the start of the next
1571 * parameter block.
1573 taken = wct_ofs;
1576 remaining = buflen - taken;
1578 if (remaining > 0 && num_iov >= 3) {
1580 * The last DATA block gets the remaining
1581 * bytes, this is needed to support
1582 * CAP_LARGE_WRITEX and CAP_LARGE_READX.
1584 iov[num_iov-1].iov_len += remaining;
1587 *piov = iov;
1588 *pnum_iov = num_iov;
1589 return NT_STATUS_OK;
1591 inval:
1592 TALLOC_FREE(iov);
1593 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1596 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1597 TALLOC_CTX *tmp_mem,
1598 uint8_t *inbuf)
1600 struct tevent_req *req;
1601 struct smbXcli_req_state *state;
1602 NTSTATUS status;
1603 size_t num_pending;
1604 size_t i;
1605 uint8_t cmd;
1606 uint16_t mid;
1607 bool oplock_break;
1608 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1609 struct iovec *iov = NULL;
1610 int num_iov = 0;
1611 struct tevent_req **chain = NULL;
1612 size_t num_chained = 0;
1613 size_t num_responses = 0;
1615 if (conn->smb1.read_braw_req != NULL) {
1616 req = conn->smb1.read_braw_req;
1617 conn->smb1.read_braw_req = NULL;
1618 state = tevent_req_data(req, struct smbXcli_req_state);
1620 smbXcli_req_unset_pending(req);
1622 if (state->smb1.recv_iov == NULL) {
1624 * For requests with more than
1625 * one response, we have to readd the
1626 * recv_iov array.
1628 state->smb1.recv_iov = talloc_zero_array(state,
1629 struct iovec,
1631 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
1632 return NT_STATUS_OK;
1636 state->smb1.recv_iov[0].iov_base = (void *)(inbuf + NBT_HDR_SIZE);
1637 state->smb1.recv_iov[0].iov_len = smb_len_nbt(inbuf);
1638 ZERO_STRUCT(state->smb1.recv_iov[1]);
1639 ZERO_STRUCT(state->smb1.recv_iov[2]);
1641 state->smb1.recv_cmd = SMBreadBraw;
1642 state->smb1.recv_status = NT_STATUS_OK;
1643 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1645 tevent_req_done(req);
1646 return NT_STATUS_OK;
1649 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1650 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1651 DEBUG(10, ("Got non-SMB PDU\n"));
1652 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1656 * If we supported multiple encrytion contexts
1657 * here we'd look up based on tid.
1659 if (common_encryption_on(conn->smb1.trans_enc)
1660 && (CVAL(inbuf, 0) == 0)) {
1661 uint16_t enc_ctx_num;
1663 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1664 if (!NT_STATUS_IS_OK(status)) {
1665 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1666 nt_errstr(status)));
1667 return status;
1670 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1671 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1672 enc_ctx_num,
1673 conn->smb1.trans_enc->enc_ctx_num));
1674 return NT_STATUS_INVALID_HANDLE;
1677 status = common_decrypt_buffer(conn->smb1.trans_enc,
1678 (char *)inbuf);
1679 if (!NT_STATUS_IS_OK(status)) {
1680 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1681 nt_errstr(status)));
1682 return status;
1686 mid = SVAL(inhdr, HDR_MID);
1687 num_pending = talloc_array_length(conn->pending);
1689 for (i=0; i<num_pending; i++) {
1690 if (mid == smb1cli_req_mid(conn->pending[i])) {
1691 break;
1694 if (i == num_pending) {
1695 /* Dump unexpected reply */
1696 return NT_STATUS_RETRY;
1699 oplock_break = false;
1701 if (mid == 0xffff) {
1703 * Paranoia checks that this is really an oplock break request.
1705 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1706 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1707 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1708 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1709 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1711 if (!oplock_break) {
1712 /* Dump unexpected reply */
1713 return NT_STATUS_RETRY;
1717 req = conn->pending[i];
1718 state = tevent_req_data(req, struct smbXcli_req_state);
1720 if (!oplock_break /* oplock breaks are not signed */
1721 && !smb_signing_check_pdu(conn->smb1.signing,
1722 inbuf, state->smb1.seqnum+1)) {
1723 DEBUG(10, ("cli_check_sign_mac failed\n"));
1724 return NT_STATUS_ACCESS_DENIED;
1727 status = smb1cli_inbuf_parse_chain(inbuf, tmp_mem,
1728 &iov, &num_iov);
1729 if (!NT_STATUS_IS_OK(status)) {
1730 DEBUG(10,("smb1cli_inbuf_parse_chain - %s\n",
1731 nt_errstr(status)));
1732 return status;
1735 cmd = CVAL(inhdr, HDR_COM);
1736 status = smb1cli_pull_raw_error(inhdr);
1738 if (state->smb1.chained_requests == NULL) {
1739 if (num_iov != 3) {
1740 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1743 smbXcli_req_unset_pending(req);
1745 if (state->smb1.recv_iov == NULL) {
1747 * For requests with more than
1748 * one response, we have to readd the
1749 * recv_iov array.
1751 state->smb1.recv_iov = talloc_zero_array(state,
1752 struct iovec,
1754 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
1755 return NT_STATUS_OK;
1759 state->smb1.recv_cmd = cmd;
1760 state->smb1.recv_status = status;
1761 state->inbuf = talloc_move(state->smb1.recv_iov, &inbuf);
1763 state->smb1.recv_iov[0] = iov[0];
1764 state->smb1.recv_iov[1] = iov[1];
1765 state->smb1.recv_iov[2] = iov[2];
1767 if (talloc_array_length(conn->pending) == 0) {
1768 tevent_req_done(req);
1769 return NT_STATUS_OK;
1772 tevent_req_defer_callback(req, state->ev);
1773 tevent_req_done(req);
1774 return NT_STATUS_RETRY;
1777 chain = talloc_move(tmp_mem, &state->smb1.chained_requests);
1778 num_chained = talloc_array_length(chain);
1779 num_responses = (num_iov - 1)/2;
1781 if (num_responses > num_chained) {
1782 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1785 for (i=0; i<num_chained; i++) {
1786 size_t iov_idx = 1 + (i*2);
1787 struct iovec *cur = &iov[iov_idx];
1788 uint8_t *inbuf_ref;
1790 req = chain[i];
1791 state = tevent_req_data(req, struct smbXcli_req_state);
1793 smbXcli_req_unset_pending(req);
1796 * as we finish multiple requests here
1797 * we need to defer the callbacks as
1798 * they could destroy our current stack state.
1800 tevent_req_defer_callback(req, state->ev);
1802 if (i >= num_responses) {
1803 tevent_req_nterror(req, NT_STATUS_REQUEST_ABORTED);
1804 continue;
1807 if (state->smb1.recv_iov == NULL) {
1809 * For requests with more than
1810 * one response, we have to readd the
1811 * recv_iov array.
1813 state->smb1.recv_iov = talloc_zero_array(state,
1814 struct iovec,
1816 if (tevent_req_nomem(state->smb1.recv_iov, req)) {
1817 continue;
1821 state->smb1.recv_cmd = cmd;
1823 if (i == (num_responses - 1)) {
1825 * The last request in the chain gets the status
1827 state->smb1.recv_status = status;
1828 } else {
1829 cmd = CVAL(cur[0].iov_base, 0);
1830 state->smb1.recv_status = NT_STATUS_OK;
1833 state->inbuf = inbuf;
1836 * Note: here we use talloc_reference() in a way
1837 * that does not expose it to the caller.
1839 inbuf_ref = talloc_reference(state->smb1.recv_iov, inbuf);
1840 if (tevent_req_nomem(inbuf_ref, req)) {
1841 continue;
1844 /* copy the related buffers */
1845 state->smb1.recv_iov[0] = iov[0];
1846 state->smb1.recv_iov[1] = cur[0];
1847 state->smb1.recv_iov[2] = cur[1];
1849 tevent_req_done(req);
1852 return NT_STATUS_RETRY;
1855 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1856 TALLOC_CTX *mem_ctx,
1857 struct iovec **piov,
1858 uint8_t **phdr,
1859 uint8_t *pwct,
1860 uint16_t **pvwv,
1861 uint32_t *pvwv_offset,
1862 uint32_t *pnum_bytes,
1863 uint8_t **pbytes,
1864 uint32_t *pbytes_offset,
1865 uint8_t **pinbuf,
1866 const struct smb1cli_req_expected_response *expected,
1867 size_t num_expected)
1869 struct smbXcli_req_state *state =
1870 tevent_req_data(req,
1871 struct smbXcli_req_state);
1872 NTSTATUS status = NT_STATUS_OK;
1873 struct iovec *recv_iov = NULL;
1874 uint8_t *hdr = NULL;
1875 uint8_t wct = 0;
1876 uint32_t vwv_offset = 0;
1877 uint16_t *vwv = NULL;
1878 uint32_t num_bytes = 0;
1879 uint32_t bytes_offset = 0;
1880 uint8_t *bytes = NULL;
1881 size_t i;
1882 bool found_status = false;
1883 bool found_size = false;
1885 if (piov != NULL) {
1886 *piov = NULL;
1888 if (phdr != NULL) {
1889 *phdr = 0;
1891 if (pwct != NULL) {
1892 *pwct = 0;
1894 if (pvwv != NULL) {
1895 *pvwv = NULL;
1897 if (pvwv_offset != NULL) {
1898 *pvwv_offset = 0;
1900 if (pnum_bytes != NULL) {
1901 *pnum_bytes = 0;
1903 if (pbytes != NULL) {
1904 *pbytes = NULL;
1906 if (pbytes_offset != NULL) {
1907 *pbytes_offset = 0;
1909 if (pinbuf != NULL) {
1910 *pinbuf = NULL;
1913 if (state->inbuf != NULL) {
1914 recv_iov = state->smb1.recv_iov;
1915 state->smb1.recv_iov = NULL;
1916 if (state->smb1.recv_cmd != SMBreadBraw) {
1917 hdr = (uint8_t *)recv_iov[0].iov_base;
1918 wct = recv_iov[1].iov_len/2;
1919 vwv = (uint16_t *)recv_iov[1].iov_base;
1920 vwv_offset = PTR_DIFF(vwv, hdr);
1921 num_bytes = recv_iov[2].iov_len;
1922 bytes = (uint8_t *)recv_iov[2].iov_base;
1923 bytes_offset = PTR_DIFF(bytes, hdr);
1927 if (tevent_req_is_nterror(req, &status)) {
1928 for (i=0; i < num_expected; i++) {
1929 if (NT_STATUS_EQUAL(status, expected[i].status)) {
1930 found_status = true;
1931 break;
1935 if (found_status) {
1936 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
1939 return status;
1942 if (num_expected == 0) {
1943 found_status = true;
1944 found_size = true;
1947 status = state->smb1.recv_status;
1949 for (i=0; i < num_expected; i++) {
1950 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
1951 continue;
1954 found_status = true;
1955 if (expected[i].wct == 0) {
1956 found_size = true;
1957 break;
1960 if (expected[i].wct == wct) {
1961 found_size = true;
1962 break;
1966 if (!found_status) {
1967 return status;
1970 if (!found_size) {
1971 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1974 if (piov != NULL) {
1975 *piov = talloc_move(mem_ctx, &recv_iov);
1978 if (phdr != NULL) {
1979 *phdr = hdr;
1981 if (pwct != NULL) {
1982 *pwct = wct;
1984 if (pvwv != NULL) {
1985 *pvwv = vwv;
1987 if (pvwv_offset != NULL) {
1988 *pvwv_offset = vwv_offset;
1990 if (pnum_bytes != NULL) {
1991 *pnum_bytes = num_bytes;
1993 if (pbytes != NULL) {
1994 *pbytes = bytes;
1996 if (pbytes_offset != NULL) {
1997 *pbytes_offset = bytes_offset;
1999 if (pinbuf != NULL) {
2000 *pinbuf = state->inbuf;
2003 return status;
2006 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
2008 size_t wct_ofs;
2009 int i;
2011 wct_ofs = HDR_WCT;
2013 for (i=0; i<num_reqs; i++) {
2014 struct smbXcli_req_state *state;
2015 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2016 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
2017 state->smb1.iov_count-2);
2018 wct_ofs = (wct_ofs + 3) & ~3;
2020 return wct_ofs;
2023 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
2025 struct smbXcli_req_state *first_state =
2026 tevent_req_data(reqs[0],
2027 struct smbXcli_req_state);
2028 struct smbXcli_req_state *state;
2029 size_t wct_offset;
2030 size_t chain_padding = 0;
2031 int i, iovlen;
2032 struct iovec *iov = NULL;
2033 struct iovec *this_iov;
2034 NTSTATUS status;
2035 size_t nbt_len;
2037 if (num_reqs == 1) {
2038 return smb1cli_req_writev_submit(reqs[0], first_state,
2039 first_state->smb1.iov,
2040 first_state->smb1.iov_count);
2043 iovlen = 0;
2044 for (i=0; i<num_reqs; i++) {
2045 if (!tevent_req_is_in_progress(reqs[i])) {
2046 return NT_STATUS_INTERNAL_ERROR;
2049 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2051 if (state->smb1.iov_count < 4) {
2052 return NT_STATUS_INVALID_PARAMETER_MIX;
2055 if (i == 0) {
2057 * The NBT and SMB header
2059 iovlen += 2;
2060 } else {
2062 * Chain padding
2064 iovlen += 1;
2068 * words and bytes
2070 iovlen += state->smb1.iov_count - 2;
2073 iov = talloc_zero_array(first_state, struct iovec, iovlen);
2074 if (iov == NULL) {
2075 return NT_STATUS_NO_MEMORY;
2078 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
2079 first_state, reqs, sizeof(*reqs) * num_reqs);
2080 if (first_state->smb1.chained_requests == NULL) {
2081 TALLOC_FREE(iov);
2082 return NT_STATUS_NO_MEMORY;
2085 wct_offset = HDR_WCT;
2086 this_iov = iov;
2088 for (i=0; i<num_reqs; i++) {
2089 size_t next_padding = 0;
2090 uint16_t *vwv;
2092 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2094 if (i < num_reqs-1) {
2095 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
2096 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
2097 TALLOC_FREE(iov);
2098 TALLOC_FREE(first_state->smb1.chained_requests);
2099 return NT_STATUS_INVALID_PARAMETER_MIX;
2103 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
2104 state->smb1.iov_count-2) + 1;
2105 if ((wct_offset % 4) != 0) {
2106 next_padding = 4 - (wct_offset % 4);
2108 wct_offset += next_padding;
2109 vwv = state->smb1.vwv;
2111 if (i < num_reqs-1) {
2112 struct smbXcli_req_state *next_state =
2113 tevent_req_data(reqs[i+1],
2114 struct smbXcli_req_state);
2115 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
2116 SCVAL(vwv+0, 1, 0);
2117 SSVAL(vwv+1, 0, wct_offset);
2118 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
2119 /* properly end the chain */
2120 SCVAL(vwv+0, 0, 0xff);
2121 SCVAL(vwv+0, 1, 0xff);
2122 SSVAL(vwv+1, 0, 0);
2125 if (i == 0) {
2127 * The NBT and SMB header
2129 this_iov[0] = state->smb1.iov[0];
2130 this_iov[1] = state->smb1.iov[1];
2131 this_iov += 2;
2132 } else {
2134 * This one is a bit subtle. We have to add
2135 * chain_padding bytes between the requests, and we
2136 * have to also include the wct field of the
2137 * subsequent requests. We use the subsequent header
2138 * for the padding, it contains the wct field in its
2139 * last byte.
2141 this_iov[0].iov_len = chain_padding+1;
2142 this_iov[0].iov_base = (void *)&state->smb1.hdr[
2143 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
2144 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
2145 this_iov += 1;
2149 * copy the words and bytes
2151 memcpy(this_iov, state->smb1.iov+2,
2152 sizeof(struct iovec) * (state->smb1.iov_count-2));
2153 this_iov += state->smb1.iov_count - 2;
2154 chain_padding = next_padding;
2157 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
2158 if (nbt_len > first_state->conn->smb1.max_xmit) {
2159 TALLOC_FREE(iov);
2160 TALLOC_FREE(first_state->smb1.chained_requests);
2161 return NT_STATUS_INVALID_PARAMETER_MIX;
2164 status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
2165 if (!NT_STATUS_IS_OK(status)) {
2166 TALLOC_FREE(iov);
2167 TALLOC_FREE(first_state->smb1.chained_requests);
2168 return status;
2171 return NT_STATUS_OK;
2174 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
2176 return ((tevent_queue_length(conn->outgoing) != 0)
2177 || (talloc_array_length(conn->pending) != 0));
2180 uint32_t smb2cli_conn_server_capabilities(struct smbXcli_conn *conn)
2182 return conn->smb2.server.capabilities;
2185 uint16_t smb2cli_conn_server_security_mode(struct smbXcli_conn *conn)
2187 return conn->smb2.server.security_mode;
2190 uint32_t smb2cli_conn_max_trans_size(struct smbXcli_conn *conn)
2192 return conn->smb2.server.max_trans_size;
2195 uint32_t smb2cli_conn_max_read_size(struct smbXcli_conn *conn)
2197 return conn->smb2.server.max_read_size;
2200 uint32_t smb2cli_conn_max_write_size(struct smbXcli_conn *conn)
2202 return conn->smb2.server.max_write_size;
2205 void smb2cli_conn_set_max_credits(struct smbXcli_conn *conn,
2206 uint16_t max_credits)
2208 conn->smb2.max_credits = max_credits;
2211 static void smb2cli_req_cancel_done(struct tevent_req *subreq);
2213 static bool smb2cli_req_cancel(struct tevent_req *req)
2215 struct smbXcli_req_state *state =
2216 tevent_req_data(req,
2217 struct smbXcli_req_state);
2218 uint32_t flags = IVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2219 uint32_t pid = IVAL(state->smb2.hdr, SMB2_HDR_PID);
2220 uint32_t tid = IVAL(state->smb2.hdr, SMB2_HDR_TID);
2221 uint64_t mid = BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID);
2222 uint64_t aid = BVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID);
2223 struct smbXcli_session *session = state->session;
2224 uint8_t *fixed = state->smb2.pad;
2225 uint16_t fixed_len = 4;
2226 struct tevent_req *subreq;
2227 struct smbXcli_req_state *substate;
2228 NTSTATUS status;
2230 SSVAL(fixed, 0, 0x04);
2231 SSVAL(fixed, 2, 0);
2233 subreq = smb2cli_req_create(state, state->ev,
2234 state->conn,
2235 SMB2_OP_CANCEL,
2236 flags, 0,
2237 0, /* timeout */
2238 pid, tid, session,
2239 fixed, fixed_len,
2240 NULL, 0);
2241 if (subreq == NULL) {
2242 return false;
2244 substate = tevent_req_data(subreq, struct smbXcli_req_state);
2246 if (flags & SMB2_HDR_FLAG_ASYNC) {
2247 mid = 0;
2250 SIVAL(substate->smb2.hdr, SMB2_HDR_FLAGS, flags);
2251 SIVAL(substate->smb2.hdr, SMB2_HDR_PID, pid);
2252 SIVAL(substate->smb2.hdr, SMB2_HDR_TID, tid);
2253 SBVAL(substate->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2254 SBVAL(substate->smb2.hdr, SMB2_HDR_ASYNC_ID, aid);
2256 status = smb2cli_req_compound_submit(&subreq, 1);
2257 if (!NT_STATUS_IS_OK(status)) {
2258 TALLOC_FREE(subreq);
2259 return false;
2262 tevent_req_set_callback(subreq, smb2cli_req_cancel_done, NULL);
2264 return true;
2267 static void smb2cli_req_cancel_done(struct tevent_req *subreq)
2269 /* we do not care about the result */
2270 TALLOC_FREE(subreq);
2273 struct tevent_req *smb2cli_req_create(TALLOC_CTX *mem_ctx,
2274 struct tevent_context *ev,
2275 struct smbXcli_conn *conn,
2276 uint16_t cmd,
2277 uint32_t additional_flags,
2278 uint32_t clear_flags,
2279 uint32_t timeout_msec,
2280 uint32_t pid,
2281 uint32_t tid,
2282 struct smbXcli_session *session,
2283 const uint8_t *fixed,
2284 uint16_t fixed_len,
2285 const uint8_t *dyn,
2286 uint32_t dyn_len)
2288 struct tevent_req *req;
2289 struct smbXcli_req_state *state;
2290 uint32_t flags = 0;
2291 uint64_t uid = 0;
2293 req = tevent_req_create(mem_ctx, &state,
2294 struct smbXcli_req_state);
2295 if (req == NULL) {
2296 return NULL;
2299 state->ev = ev;
2300 state->conn = conn;
2301 state->session = session;
2303 if (session) {
2304 uid = session->smb2.session_id;
2307 state->smb2.recv_iov = talloc_zero_array(state, struct iovec, 3);
2308 if (state->smb2.recv_iov == NULL) {
2309 TALLOC_FREE(req);
2310 return NULL;
2313 flags |= additional_flags;
2314 flags &= ~clear_flags;
2316 state->smb2.fixed = fixed;
2317 state->smb2.fixed_len = fixed_len;
2318 state->smb2.dyn = dyn;
2319 state->smb2.dyn_len = dyn_len;
2321 SIVAL(state->smb2.hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2322 SSVAL(state->smb2.hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2323 SSVAL(state->smb2.hdr, SMB2_HDR_OPCODE, cmd);
2324 SIVAL(state->smb2.hdr, SMB2_HDR_FLAGS, flags);
2325 SIVAL(state->smb2.hdr, SMB2_HDR_PID, pid);
2326 SIVAL(state->smb2.hdr, SMB2_HDR_TID, tid);
2327 SBVAL(state->smb2.hdr, SMB2_HDR_SESSION_ID, uid);
2329 switch (cmd) {
2330 case SMB2_OP_CANCEL:
2331 state->one_way = true;
2332 break;
2333 case SMB2_OP_BREAK:
2335 * If this is a dummy request, it will have
2336 * UINT64_MAX as message id.
2337 * If we send on break acknowledgement,
2338 * this gets overwritten later.
2340 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
2341 break;
2344 if (timeout_msec > 0) {
2345 struct timeval endtime;
2347 endtime = timeval_current_ofs_msec(timeout_msec);
2348 if (!tevent_req_set_endtime(req, ev, endtime)) {
2349 return req;
2353 return req;
2356 void smb2cli_req_set_notify_async(struct tevent_req *req)
2358 struct smbXcli_req_state *state =
2359 tevent_req_data(req,
2360 struct smbXcli_req_state);
2362 state->smb2.notify_async = true;
2365 static void smb2cli_req_writev_done(struct tevent_req *subreq);
2366 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2367 TALLOC_CTX *tmp_mem,
2368 uint8_t *inbuf);
2370 NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
2371 int num_reqs)
2373 struct smbXcli_req_state *state;
2374 struct tevent_req *subreq;
2375 struct iovec *iov;
2376 int i, num_iov, nbt_len;
2379 * 1 for the nbt length
2380 * per request: HDR, fixed, dyn, padding
2381 * -1 because the last one does not need padding
2384 iov = talloc_array(reqs[0], struct iovec, 1 + 4*num_reqs - 1);
2385 if (iov == NULL) {
2386 return NT_STATUS_NO_MEMORY;
2389 num_iov = 1;
2390 nbt_len = 0;
2392 for (i=0; i<num_reqs; i++) {
2393 int hdr_iov;
2394 size_t reqlen;
2395 bool ret;
2396 uint16_t opcode;
2397 uint64_t avail;
2398 uint16_t charge;
2399 uint16_t credits;
2400 uint64_t mid;
2401 bool should_sign = false;
2403 if (!tevent_req_is_in_progress(reqs[i])) {
2404 return NT_STATUS_INTERNAL_ERROR;
2407 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
2409 if (!smbXcli_conn_is_connected(state->conn)) {
2410 return NT_STATUS_CONNECTION_DISCONNECTED;
2413 if ((state->conn->protocol != PROTOCOL_NONE) &&
2414 (state->conn->protocol < PROTOCOL_SMB2_02)) {
2415 return NT_STATUS_REVISION_MISMATCH;
2418 opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2419 if (opcode == SMB2_OP_CANCEL) {
2420 goto skip_credits;
2423 avail = UINT64_MAX - state->conn->smb2.mid;
2424 if (avail < 1) {
2425 return NT_STATUS_CONNECTION_ABORTED;
2428 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2429 charge = (MAX(state->smb2.dyn_len, 1) - 1)/ 65536 + 1;
2430 } else {
2431 charge = 1;
2434 charge = MAX(state->smb2.credit_charge, charge);
2436 avail = MIN(avail, state->conn->smb2.cur_credits);
2437 if (avail < charge) {
2438 return NT_STATUS_INTERNAL_ERROR;
2441 credits = 0;
2442 if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
2443 credits = state->conn->smb2.max_credits -
2444 state->conn->smb2.cur_credits;
2446 if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
2447 credits += 1;
2450 mid = state->conn->smb2.mid;
2451 state->conn->smb2.mid += charge;
2452 state->conn->smb2.cur_credits -= charge;
2454 if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
2455 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
2457 SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
2458 SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
2460 skip_credits:
2461 hdr_iov = num_iov;
2462 iov[num_iov].iov_base = state->smb2.hdr;
2463 iov[num_iov].iov_len = sizeof(state->smb2.hdr);
2464 num_iov += 1;
2466 iov[num_iov].iov_base = discard_const(state->smb2.fixed);
2467 iov[num_iov].iov_len = state->smb2.fixed_len;
2468 num_iov += 1;
2470 if (state->smb2.dyn != NULL) {
2471 iov[num_iov].iov_base = discard_const(state->smb2.dyn);
2472 iov[num_iov].iov_len = state->smb2.dyn_len;
2473 num_iov += 1;
2476 reqlen = sizeof(state->smb2.hdr);
2477 reqlen += state->smb2.fixed_len;
2478 reqlen += state->smb2.dyn_len;
2480 if (i < num_reqs-1) {
2481 if ((reqlen % 8) > 0) {
2482 uint8_t pad = 8 - (reqlen % 8);
2483 iov[num_iov].iov_base = state->smb2.pad;
2484 iov[num_iov].iov_len = pad;
2485 num_iov += 1;
2486 reqlen += pad;
2488 SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
2490 nbt_len += reqlen;
2492 if (state->session) {
2493 should_sign = state->session->smb2.should_sign;
2494 if (state->session->smb2.channel_setup) {
2495 should_sign = true;
2499 if (should_sign) {
2500 NTSTATUS status;
2502 status = smb2_signing_sign_pdu(state->session->smb2.signing_key,
2503 &iov[hdr_iov], num_iov - hdr_iov);
2504 if (!NT_STATUS_IS_OK(status)) {
2505 return status;
2509 ret = smbXcli_req_set_pending(reqs[i]);
2510 if (!ret) {
2511 return NT_STATUS_NO_MEMORY;
2515 state = tevent_req_data(reqs[0], struct smbXcli_req_state);
2516 _smb_setlen_tcp(state->length_hdr, nbt_len);
2517 iov[0].iov_base = state->length_hdr;
2518 iov[0].iov_len = sizeof(state->length_hdr);
2520 if (state->conn->dispatch_incoming == NULL) {
2521 state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
2524 subreq = writev_send(state, state->ev, state->conn->outgoing,
2525 state->conn->write_fd, false, iov, num_iov);
2526 if (subreq == NULL) {
2527 return NT_STATUS_NO_MEMORY;
2529 tevent_req_set_callback(subreq, smb2cli_req_writev_done, reqs[0]);
2530 return NT_STATUS_OK;
2533 void smb2cli_req_set_credit_charge(struct tevent_req *req, uint16_t charge)
2535 struct smbXcli_req_state *state =
2536 tevent_req_data(req,
2537 struct smbXcli_req_state);
2539 state->smb2.credit_charge = charge;
2542 struct tevent_req *smb2cli_req_send(TALLOC_CTX *mem_ctx,
2543 struct tevent_context *ev,
2544 struct smbXcli_conn *conn,
2545 uint16_t cmd,
2546 uint32_t additional_flags,
2547 uint32_t clear_flags,
2548 uint32_t timeout_msec,
2549 uint32_t pid,
2550 uint32_t tid,
2551 struct smbXcli_session *session,
2552 const uint8_t *fixed,
2553 uint16_t fixed_len,
2554 const uint8_t *dyn,
2555 uint32_t dyn_len)
2557 struct tevent_req *req;
2558 NTSTATUS status;
2560 req = smb2cli_req_create(mem_ctx, ev, conn, cmd,
2561 additional_flags, clear_flags,
2562 timeout_msec,
2563 pid, tid, session,
2564 fixed, fixed_len, dyn, dyn_len);
2565 if (req == NULL) {
2566 return NULL;
2568 if (!tevent_req_is_in_progress(req)) {
2569 return tevent_req_post(req, ev);
2571 status = smb2cli_req_compound_submit(&req, 1);
2572 if (tevent_req_nterror(req, status)) {
2573 return tevent_req_post(req, ev);
2575 return req;
2578 static void smb2cli_req_writev_done(struct tevent_req *subreq)
2580 struct tevent_req *req =
2581 tevent_req_callback_data(subreq,
2582 struct tevent_req);
2583 struct smbXcli_req_state *state =
2584 tevent_req_data(req,
2585 struct smbXcli_req_state);
2586 ssize_t nwritten;
2587 int err;
2589 nwritten = writev_recv(subreq, &err);
2590 TALLOC_FREE(subreq);
2591 if (nwritten == -1) {
2592 /* here, we need to notify all pending requests */
2593 NTSTATUS status = map_nt_error_from_unix_common(err);
2594 smbXcli_conn_disconnect(state->conn, status);
2595 return;
2599 static NTSTATUS smb2cli_inbuf_parse_compound(uint8_t *buf, TALLOC_CTX *mem_ctx,
2600 struct iovec **piov, int *pnum_iov)
2602 struct iovec *iov;
2603 int num_iov;
2604 size_t buflen;
2605 size_t taken;
2606 uint8_t *first_hdr;
2608 num_iov = 0;
2610 iov = talloc_array(mem_ctx, struct iovec, num_iov);
2611 if (iov == NULL) {
2612 return NT_STATUS_NO_MEMORY;
2615 buflen = smb_len_tcp(buf);
2616 taken = 0;
2617 first_hdr = buf + NBT_HDR_SIZE;
2619 while (taken < buflen) {
2620 size_t len = buflen - taken;
2621 uint8_t *hdr = first_hdr + taken;
2622 struct iovec *cur;
2623 size_t full_size;
2624 size_t next_command_ofs;
2625 uint16_t body_size;
2626 struct iovec *iov_tmp;
2629 * We need the header plus the body length field
2632 if (len < SMB2_HDR_BODY + 2) {
2633 DEBUG(10, ("%d bytes left, expected at least %d\n",
2634 (int)len, SMB2_HDR_BODY));
2635 goto inval;
2637 if (IVAL(hdr, 0) != SMB2_MAGIC) {
2638 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
2639 IVAL(hdr, 0)));
2640 goto inval;
2642 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
2643 DEBUG(10, ("Got HDR len %d, expected %d\n",
2644 SVAL(hdr, 4), SMB2_HDR_BODY));
2645 goto inval;
2648 full_size = len;
2649 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
2650 body_size = SVAL(hdr, SMB2_HDR_BODY);
2652 if (next_command_ofs != 0) {
2653 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
2654 goto inval;
2656 if (next_command_ofs > full_size) {
2657 goto inval;
2659 full_size = next_command_ofs;
2661 if (body_size < 2) {
2662 goto inval;
2664 body_size &= 0xfffe;
2666 if (body_size > (full_size - SMB2_HDR_BODY)) {
2667 goto inval;
2670 iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
2671 num_iov + 3);
2672 if (iov_tmp == NULL) {
2673 TALLOC_FREE(iov);
2674 return NT_STATUS_NO_MEMORY;
2676 iov = iov_tmp;
2677 cur = &iov[num_iov];
2678 num_iov += 3;
2680 cur[0].iov_base = hdr;
2681 cur[0].iov_len = SMB2_HDR_BODY;
2682 cur[1].iov_base = hdr + SMB2_HDR_BODY;
2683 cur[1].iov_len = body_size;
2684 cur[2].iov_base = hdr + SMB2_HDR_BODY + body_size;
2685 cur[2].iov_len = full_size - (SMB2_HDR_BODY + body_size);
2687 taken += full_size;
2690 *piov = iov;
2691 *pnum_iov = num_iov;
2692 return NT_STATUS_OK;
2694 inval:
2695 TALLOC_FREE(iov);
2696 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2699 static struct tevent_req *smb2cli_conn_find_pending(struct smbXcli_conn *conn,
2700 uint64_t mid)
2702 size_t num_pending = talloc_array_length(conn->pending);
2703 size_t i;
2705 for (i=0; i<num_pending; i++) {
2706 struct tevent_req *req = conn->pending[i];
2707 struct smbXcli_req_state *state =
2708 tevent_req_data(req,
2709 struct smbXcli_req_state);
2711 if (mid == BVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID)) {
2712 return req;
2715 return NULL;
2718 static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
2719 TALLOC_CTX *tmp_mem,
2720 uint8_t *inbuf)
2722 struct tevent_req *req;
2723 struct smbXcli_req_state *state = NULL;
2724 struct iovec *iov;
2725 int i, num_iov;
2726 NTSTATUS status;
2727 bool defer = true;
2728 struct smbXcli_session *last_session = NULL;
2730 status = smb2cli_inbuf_parse_compound(inbuf, tmp_mem,
2731 &iov, &num_iov);
2732 if (!NT_STATUS_IS_OK(status)) {
2733 return status;
2736 for (i=0; i<num_iov; i+=3) {
2737 uint8_t *inbuf_ref = NULL;
2738 struct iovec *cur = &iov[i];
2739 uint8_t *inhdr = (uint8_t *)cur[0].iov_base;
2740 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2741 uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2742 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2743 uint16_t req_opcode;
2744 uint32_t req_flags;
2745 uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT);
2746 uint32_t new_credits;
2747 struct smbXcli_session *session = NULL;
2748 const DATA_BLOB *signing_key = NULL;
2749 bool should_sign = false;
2751 new_credits = conn->smb2.cur_credits;
2752 new_credits += credits;
2753 if (new_credits > UINT16_MAX) {
2754 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2756 conn->smb2.cur_credits += credits;
2758 req = smb2cli_conn_find_pending(conn, mid);
2759 if (req == NULL) {
2760 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2762 state = tevent_req_data(req, struct smbXcli_req_state);
2764 state->smb2.got_async = false;
2766 req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
2767 if (opcode != req_opcode) {
2768 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2770 req_flags = SVAL(state->smb2.hdr, SMB2_HDR_FLAGS);
2772 if (!(flags & SMB2_HDR_FLAG_REDIRECT)) {
2773 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2776 status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS));
2777 if ((flags & SMB2_HDR_FLAG_ASYNC) &&
2778 NT_STATUS_EQUAL(status, STATUS_PENDING)) {
2779 uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID);
2782 * async interim responses are not signed,
2783 * even if the SMB2_HDR_FLAG_SIGNED flag
2784 * is set.
2786 req_flags |= SMB2_HDR_FLAG_ASYNC;
2787 SBVAL(state->smb2.hdr, SMB2_HDR_FLAGS, req_flags);
2788 SBVAL(state->smb2.hdr, SMB2_HDR_ASYNC_ID, async_id);
2790 if (state->smb2.notify_async) {
2791 state->smb2.got_async = true;
2792 tevent_req_defer_callback(req, state->ev);
2793 tevent_req_notify_callback(req);
2795 continue;
2798 session = state->session;
2799 if (req_flags & SMB2_HDR_FLAG_CHAINED) {
2800 session = last_session;
2802 last_session = session;
2804 if (session) {
2805 should_sign = session->smb2.should_sign;
2806 if (session->smb2.channel_setup) {
2807 should_sign = true;
2811 if (should_sign) {
2812 if (!(flags & SMB2_HDR_FLAG_SIGNED)) {
2813 return NT_STATUS_ACCESS_DENIED;
2817 if (flags & SMB2_HDR_FLAG_SIGNED) {
2818 uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID);
2820 if (session == NULL) {
2821 struct smbXcli_session *s;
2823 s = state->conn->sessions;
2824 for (; s; s = s->next) {
2825 if (s->smb2.session_id != uid) {
2826 continue;
2829 session = s;
2830 break;
2834 if (session == NULL) {
2835 return NT_STATUS_INVALID_NETWORK_RESPONSE;
2838 last_session = session;
2839 signing_key = &session->smb2.signing_key;
2842 if ((opcode == SMB2_OP_SESSSETUP) &&
2843 NT_STATUS_IS_OK(status)) {
2845 * the caller has to check the signing
2846 * as only the caller knows the correct
2847 * session key
2849 signing_key = NULL;
2852 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
2854 * if the server returns NT_STATUS_USER_SESSION_DELETED
2855 * the response is not signed and we should
2856 * propagate the NT_STATUS_USER_SESSION_DELETED
2857 * status to the caller.
2859 if (signing_key) {
2860 signing_key = NULL;
2864 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_NAME_DELETED) ||
2865 NT_STATUS_EQUAL(status, NT_STATUS_FILE_CLOSED) ||
2866 NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) {
2868 * if the server returns
2869 * NT_STATUS_NETWORK_NAME_DELETED
2870 * NT_STATUS_FILE_CLOSED
2871 * NT_STATUS_INVALID_PARAMETER
2872 * the response might not be signed
2873 * as this happens before the signing checks.
2875 * If server echos the signature (or all zeros)
2876 * we should report the status from the server
2877 * to the caller.
2879 if (signing_key) {
2880 int cmp;
2882 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2883 state->smb2.hdr+SMB2_HDR_SIGNATURE,
2884 16);
2885 if (cmp == 0) {
2886 state->smb2.signing_skipped = true;
2887 signing_key = NULL;
2890 if (signing_key) {
2891 int cmp;
2892 static const uint8_t zeros[16];
2894 cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE,
2895 zeros,
2896 16);
2897 if (cmp == 0) {
2898 state->smb2.signing_skipped = true;
2899 signing_key = NULL;
2904 if (signing_key) {
2905 status = smb2_signing_check_pdu(*signing_key, cur, 3);
2906 if (!NT_STATUS_IS_OK(status)) {
2908 * If the signing check fails, we disconnect
2909 * the connection.
2911 return status;
2915 smbXcli_req_unset_pending(req);
2918 * There might be more than one response
2919 * we need to defer the notifications
2921 if ((num_iov == 4) && (talloc_array_length(conn->pending) == 0)) {
2922 defer = false;
2925 if (defer) {
2926 tevent_req_defer_callback(req, state->ev);
2930 * Note: here we use talloc_reference() in a way
2931 * that does not expose it to the caller.
2933 inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf);
2934 if (tevent_req_nomem(inbuf_ref, req)) {
2935 continue;
2938 /* copy the related buffers */
2939 state->smb2.recv_iov[0] = cur[0];
2940 state->smb2.recv_iov[1] = cur[1];
2941 state->smb2.recv_iov[2] = cur[2];
2943 tevent_req_done(req);
2946 if (defer) {
2947 return NT_STATUS_RETRY;
2950 return NT_STATUS_OK;
2953 NTSTATUS smb2cli_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2954 struct iovec **piov,
2955 const struct smb2cli_req_expected_response *expected,
2956 size_t num_expected)
2958 struct smbXcli_req_state *state =
2959 tevent_req_data(req,
2960 struct smbXcli_req_state);
2961 NTSTATUS status;
2962 size_t body_size;
2963 bool found_status = false;
2964 bool found_size = false;
2965 size_t i;
2967 if (piov != NULL) {
2968 *piov = NULL;
2971 if (state->smb2.got_async) {
2972 return STATUS_PENDING;
2975 if (tevent_req_is_nterror(req, &status)) {
2976 for (i=0; i < num_expected; i++) {
2977 if (NT_STATUS_EQUAL(status, expected[i].status)) {
2978 found_status = true;
2979 break;
2983 if (found_status) {
2984 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
2987 return status;
2990 if (num_expected == 0) {
2991 found_status = true;
2992 found_size = true;
2995 status = NT_STATUS(IVAL(state->smb2.recv_iov[0].iov_base, SMB2_HDR_STATUS));
2996 body_size = SVAL(state->smb2.recv_iov[1].iov_base, 0);
2998 for (i=0; i < num_expected; i++) {
2999 if (!NT_STATUS_EQUAL(status, expected[i].status)) {
3000 continue;
3003 found_status = true;
3004 if (expected[i].body_size == 0) {
3005 found_size = true;
3006 break;
3009 if (expected[i].body_size == body_size) {
3010 found_size = true;
3011 break;
3015 if (!found_status) {
3016 return status;
3019 if (state->smb2.signing_skipped) {
3020 if (num_expected > 0) {
3021 return NT_STATUS_ACCESS_DENIED;
3023 if (!NT_STATUS_IS_ERR(status)) {
3024 return NT_STATUS_ACCESS_DENIED;
3028 if (!found_size) {
3029 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3032 if (piov != NULL) {
3033 *piov = talloc_move(mem_ctx, &state->smb2.recv_iov);
3036 return status;
3039 static const struct {
3040 enum protocol_types proto;
3041 const char *smb1_name;
3042 } smb1cli_prots[] = {
3043 {PROTOCOL_CORE, "PC NETWORK PROGRAM 1.0"},
3044 {PROTOCOL_COREPLUS, "MICROSOFT NETWORKS 1.03"},
3045 {PROTOCOL_LANMAN1, "MICROSOFT NETWORKS 3.0"},
3046 {PROTOCOL_LANMAN1, "LANMAN1.0"},
3047 {PROTOCOL_LANMAN2, "LM1.2X002"},
3048 {PROTOCOL_LANMAN2, "DOS LANMAN2.1"},
3049 {PROTOCOL_LANMAN2, "LANMAN2.1"},
3050 {PROTOCOL_LANMAN2, "Samba"},
3051 {PROTOCOL_NT1, "NT LANMAN 1.0"},
3052 {PROTOCOL_NT1, "NT LM 0.12"},
3053 {PROTOCOL_SMB2_02, "SMB 2.002"},
3054 {PROTOCOL_SMB2_10, "SMB 2.???"},
3057 static const struct {
3058 enum protocol_types proto;
3059 uint16_t smb2_dialect;
3060 } smb2cli_prots[] = {
3061 {PROTOCOL_SMB2_02, SMB2_DIALECT_REVISION_202},
3062 {PROTOCOL_SMB2_10, SMB2_DIALECT_REVISION_210},
3063 {PROTOCOL_SMB2_22, SMB2_DIALECT_REVISION_222},
3066 struct smbXcli_negprot_state {
3067 struct smbXcli_conn *conn;
3068 struct tevent_context *ev;
3069 uint32_t timeout_msec;
3070 enum protocol_types min_protocol;
3071 enum protocol_types max_protocol;
3073 struct {
3074 uint8_t fixed[36];
3075 uint8_t dyn[ARRAY_SIZE(smb2cli_prots)*2];
3076 } smb2;
3079 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq);
3080 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state);
3081 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq);
3082 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state);
3083 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq);
3084 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3085 TALLOC_CTX *frame,
3086 uint8_t *inbuf);
3088 struct tevent_req *smbXcli_negprot_send(TALLOC_CTX *mem_ctx,
3089 struct tevent_context *ev,
3090 struct smbXcli_conn *conn,
3091 uint32_t timeout_msec,
3092 enum protocol_types min_protocol,
3093 enum protocol_types max_protocol)
3095 struct tevent_req *req, *subreq;
3096 struct smbXcli_negprot_state *state;
3098 req = tevent_req_create(mem_ctx, &state,
3099 struct smbXcli_negprot_state);
3100 if (req == NULL) {
3101 return NULL;
3103 state->conn = conn;
3104 state->ev = ev;
3105 state->timeout_msec = timeout_msec;
3106 state->min_protocol = min_protocol;
3107 state->max_protocol = max_protocol;
3109 if (min_protocol == PROTOCOL_NONE) {
3110 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3111 return tevent_req_post(req, ev);
3114 if (max_protocol == PROTOCOL_NONE) {
3115 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3116 return tevent_req_post(req, ev);
3119 if (min_protocol > max_protocol) {
3120 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
3121 return tevent_req_post(req, ev);
3124 if ((min_protocol < PROTOCOL_SMB2_02) &&
3125 (max_protocol < PROTOCOL_SMB2_02)) {
3127 * SMB1 only...
3129 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3131 subreq = smbXcli_negprot_smb1_subreq(state);
3132 if (tevent_req_nomem(subreq, req)) {
3133 return tevent_req_post(req, ev);
3135 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3136 return req;
3139 if ((min_protocol >= PROTOCOL_SMB2_02) &&
3140 (max_protocol >= PROTOCOL_SMB2_02)) {
3142 * SMB2 only...
3144 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3146 subreq = smbXcli_negprot_smb2_subreq(state);
3147 if (tevent_req_nomem(subreq, req)) {
3148 return tevent_req_post(req, ev);
3150 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3151 return req;
3155 * We send an SMB1 negprot with the SMB2 dialects
3156 * and expect a SMB1 or a SMB2 response.
3158 * smbXcli_negprot_dispatch_incoming() will fix the
3159 * callback to match protocol of the response.
3161 conn->dispatch_incoming = smbXcli_negprot_dispatch_incoming;
3163 subreq = smbXcli_negprot_smb1_subreq(state);
3164 if (tevent_req_nomem(subreq, req)) {
3165 return tevent_req_post(req, ev);
3167 tevent_req_set_callback(subreq, smbXcli_negprot_invalid_done, req);
3168 return req;
3171 static void smbXcli_negprot_invalid_done(struct tevent_req *subreq)
3173 struct tevent_req *req =
3174 tevent_req_callback_data(subreq,
3175 struct tevent_req);
3176 NTSTATUS status;
3179 * we just want the low level error
3181 status = tevent_req_simple_recv_ntstatus(subreq);
3182 TALLOC_FREE(subreq);
3183 if (tevent_req_nterror(req, status)) {
3184 return;
3187 /* this should never happen */
3188 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
3191 static struct tevent_req *smbXcli_negprot_smb1_subreq(struct smbXcli_negprot_state *state)
3193 size_t i;
3194 DATA_BLOB bytes = data_blob_null;
3195 uint8_t flags;
3196 uint16_t flags2;
3198 /* setup the protocol strings */
3199 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3200 uint8_t c = 2;
3201 bool ok;
3203 if (smb1cli_prots[i].proto < state->min_protocol) {
3204 continue;
3207 if (smb1cli_prots[i].proto > state->max_protocol) {
3208 continue;
3211 ok = data_blob_append(state, &bytes, &c, sizeof(c));
3212 if (!ok) {
3213 return NULL;
3217 * We now it is already ascii and
3218 * we want NULL termination.
3220 ok = data_blob_append(state, &bytes,
3221 smb1cli_prots[i].smb1_name,
3222 strlen(smb1cli_prots[i].smb1_name)+1);
3223 if (!ok) {
3224 return NULL;
3228 smb1cli_req_flags(state->max_protocol,
3229 state->conn->smb1.client.capabilities,
3230 SMBnegprot,
3231 0, 0, &flags,
3232 0, 0, &flags2);
3234 return smb1cli_req_send(state, state->ev, state->conn,
3235 SMBnegprot,
3236 flags, ~flags,
3237 flags2, ~flags2,
3238 state->timeout_msec,
3239 0xFFFE, 0, 0, /* pid, tid, uid */
3240 0, NULL, /* wct, vwv */
3241 bytes.length, bytes.data);
3244 static void smbXcli_negprot_smb1_done(struct tevent_req *subreq)
3246 struct tevent_req *req =
3247 tevent_req_callback_data(subreq,
3248 struct tevent_req);
3249 struct smbXcli_negprot_state *state =
3250 tevent_req_data(req,
3251 struct smbXcli_negprot_state);
3252 struct smbXcli_conn *conn = state->conn;
3253 struct iovec *recv_iov = NULL;
3254 uint8_t *inhdr;
3255 uint8_t wct;
3256 uint16_t *vwv;
3257 uint32_t num_bytes;
3258 uint8_t *bytes;
3259 NTSTATUS status;
3260 uint16_t protnum;
3261 size_t i;
3262 size_t num_prots = 0;
3263 uint8_t flags;
3264 uint32_t client_capabilities = conn->smb1.client.capabilities;
3265 uint32_t both_capabilities;
3266 uint32_t server_capabilities = 0;
3267 uint32_t capabilities;
3268 uint32_t client_max_xmit = conn->smb1.client.max_xmit;
3269 uint32_t server_max_xmit = 0;
3270 uint32_t max_xmit;
3271 uint32_t server_max_mux = 0;
3272 uint16_t server_security_mode = 0;
3273 uint32_t server_session_key = 0;
3274 bool server_readbraw = false;
3275 bool server_writebraw = false;
3276 bool server_lockread = false;
3277 bool server_writeunlock = false;
3278 struct GUID server_guid = GUID_zero();
3279 DATA_BLOB server_gss_blob = data_blob_null;
3280 uint8_t server_challenge[8];
3281 char *server_workgroup = NULL;
3282 char *server_name = NULL;
3283 int server_time_zone = 0;
3284 NTTIME server_system_time = 0;
3285 static const struct smb1cli_req_expected_response expected[] = {
3287 .status = NT_STATUS_OK,
3288 .wct = 0x11, /* NT1 */
3291 .status = NT_STATUS_OK,
3292 .wct = 0x0D, /* LM */
3295 .status = NT_STATUS_OK,
3296 .wct = 0x01, /* CORE */
3300 ZERO_STRUCT(server_challenge);
3302 status = smb1cli_req_recv(subreq, state,
3303 &recv_iov,
3304 &inhdr,
3305 &wct,
3306 &vwv,
3307 NULL, /* pvwv_offset */
3308 &num_bytes,
3309 &bytes,
3310 NULL, /* pbytes_offset */
3311 NULL, /* pinbuf */
3312 expected, ARRAY_SIZE(expected));
3313 TALLOC_FREE(subreq);
3314 if (tevent_req_nterror(req, status)) {
3315 return;
3318 flags = CVAL(inhdr, HDR_FLG);
3320 protnum = SVAL(vwv, 0);
3322 for (i=0; i < ARRAY_SIZE(smb1cli_prots); i++) {
3323 if (smb1cli_prots[i].proto < state->min_protocol) {
3324 continue;
3327 if (smb1cli_prots[i].proto > state->max_protocol) {
3328 continue;
3331 if (protnum != num_prots) {
3332 num_prots++;
3333 continue;
3336 conn->protocol = smb1cli_prots[i].proto;
3337 break;
3340 if (conn->protocol == PROTOCOL_NONE) {
3341 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3342 return;
3345 if ((conn->protocol < PROTOCOL_NT1) && conn->mandatory_signing) {
3346 DEBUG(0,("smbXcli_negprot: SMB signing is mandatory "
3347 "and the selected protocol level doesn't support it.\n"));
3348 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3349 return;
3352 if (flags & FLAG_SUPPORT_LOCKREAD) {
3353 server_lockread = true;
3354 server_writeunlock = true;
3357 if (conn->protocol >= PROTOCOL_NT1) {
3358 const char *client_signing = NULL;
3359 bool server_mandatory = false;
3360 bool server_allowed = false;
3361 const char *server_signing = NULL;
3362 bool ok;
3363 uint8_t key_len;
3365 if (wct != 0x11) {
3366 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3367 return;
3370 /* NT protocol */
3371 server_security_mode = CVAL(vwv + 1, 0);
3372 server_max_mux = SVAL(vwv + 1, 1);
3373 server_max_xmit = IVAL(vwv + 3, 1);
3374 server_session_key = IVAL(vwv + 7, 1);
3375 server_time_zone = SVALS(vwv + 15, 1);
3376 server_time_zone *= 60;
3377 /* this time arrives in real GMT */
3378 server_system_time = BVAL(vwv + 11, 1);
3379 server_capabilities = IVAL(vwv + 9, 1);
3381 key_len = CVAL(vwv + 16, 1);
3383 if (server_capabilities & CAP_RAW_MODE) {
3384 server_readbraw = true;
3385 server_writebraw = true;
3387 if (server_capabilities & CAP_LOCK_AND_READ) {
3388 server_lockread = true;
3391 if (server_capabilities & CAP_EXTENDED_SECURITY) {
3392 DATA_BLOB blob1, blob2;
3394 if (num_bytes < 16) {
3395 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3396 return;
3399 blob1 = data_blob_const(bytes, 16);
3400 status = GUID_from_data_blob(&blob1, &server_guid);
3401 if (tevent_req_nterror(req, status)) {
3402 return;
3405 blob1 = data_blob_const(bytes+16, num_bytes-16);
3406 blob2 = data_blob_dup_talloc(state, blob1);
3407 if (blob1.length > 0 &&
3408 tevent_req_nomem(blob2.data, req)) {
3409 return;
3411 server_gss_blob = blob2;
3412 } else {
3413 DATA_BLOB blob1, blob2;
3415 if (num_bytes < key_len) {
3416 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3417 return;
3420 if (key_len != 0 && key_len != 8) {
3421 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3422 return;
3425 if (key_len == 8) {
3426 memcpy(server_challenge, bytes, 8);
3429 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3430 blob2 = data_blob_const(bytes+key_len, num_bytes-key_len);
3431 if (blob1.length > 0) {
3432 size_t len;
3434 len = utf16_len_n(blob1.data,
3435 blob1.length);
3436 blob1.length = len;
3438 ok = convert_string_talloc(state,
3439 CH_UTF16LE,
3440 CH_UNIX,
3441 blob1.data,
3442 blob1.length,
3443 &server_workgroup,
3444 &len);
3445 if (!ok) {
3446 status = map_nt_error_from_unix_common(errno);
3447 tevent_req_nterror(req, status);
3448 return;
3452 blob2.data += blob1.length;
3453 blob2.length -= blob1.length;
3454 if (blob2.length > 0) {
3455 size_t len;
3457 len = utf16_len_n(blob1.data,
3458 blob1.length);
3459 blob1.length = len;
3461 ok = convert_string_talloc(state,
3462 CH_UTF16LE,
3463 CH_UNIX,
3464 blob2.data,
3465 blob2.length,
3466 &server_name,
3467 &len);
3468 if (!ok) {
3469 status = map_nt_error_from_unix_common(errno);
3470 tevent_req_nterror(req, status);
3471 return;
3476 client_signing = "disabled";
3477 if (conn->allow_signing) {
3478 client_signing = "allowed";
3480 if (conn->mandatory_signing) {
3481 client_signing = "required";
3484 server_signing = "not supported";
3485 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLED) {
3486 server_signing = "supported";
3487 server_allowed = true;
3489 if (server_security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRED) {
3490 server_signing = "required";
3491 server_mandatory = true;
3494 ok = smb_signing_set_negotiated(conn->smb1.signing,
3495 server_allowed,
3496 server_mandatory);
3497 if (!ok) {
3498 DEBUG(1,("cli_negprot: SMB signing is required, "
3499 "but client[%s] and server[%s] mismatch\n",
3500 client_signing, server_signing));
3501 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
3502 return;
3505 } else if (conn->protocol >= PROTOCOL_LANMAN1) {
3506 DATA_BLOB blob1;
3507 uint8_t key_len;
3508 time_t t;
3510 if (wct != 0x0D) {
3511 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3512 return;
3515 server_security_mode = SVAL(vwv + 1, 0);
3516 server_max_xmit = SVAL(vwv + 2, 0);
3517 server_max_mux = SVAL(vwv + 3, 0);
3518 server_readbraw = ((SVAL(vwv + 5, 0) & 0x1) != 0);
3519 server_writebraw = ((SVAL(vwv + 5, 0) & 0x2) != 0);
3520 server_session_key = IVAL(vwv + 6, 0);
3521 server_time_zone = SVALS(vwv + 10, 0);
3522 server_time_zone *= 60;
3523 /* this time is converted to GMT by make_unix_date */
3524 t = pull_dos_date((const uint8_t *)(vwv + 8), server_time_zone);
3525 unix_to_nt_time(&server_system_time, t);
3526 key_len = SVAL(vwv + 11, 0);
3528 if (num_bytes < key_len) {
3529 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3530 return;
3533 if (key_len != 0 && key_len != 8) {
3534 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3535 return;
3538 if (key_len == 8) {
3539 memcpy(server_challenge, bytes, 8);
3542 blob1 = data_blob_const(bytes+key_len, num_bytes-key_len);
3543 if (blob1.length > 0) {
3544 size_t len;
3545 bool ok;
3547 len = utf16_len_n(blob1.data,
3548 blob1.length);
3549 blob1.length = len;
3551 ok = convert_string_talloc(state,
3552 CH_DOS,
3553 CH_UNIX,
3554 blob1.data,
3555 blob1.length,
3556 &server_workgroup,
3557 &len);
3558 if (!ok) {
3559 status = map_nt_error_from_unix_common(errno);
3560 tevent_req_nterror(req, status);
3561 return;
3565 } else {
3566 /* the old core protocol */
3567 server_time_zone = get_time_zone(time(NULL));
3568 server_max_xmit = 1024;
3569 server_max_mux = 1;
3572 if (server_max_xmit < 1024) {
3573 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3574 return;
3577 if (server_max_mux < 1) {
3578 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3579 return;
3583 * Now calculate the negotiated capabilities
3584 * based on the mask for:
3585 * - client only flags
3586 * - flags used in both directions
3587 * - server only flags
3589 both_capabilities = client_capabilities & server_capabilities;
3590 capabilities = client_capabilities & SMB_CAP_CLIENT_MASK;
3591 capabilities |= both_capabilities & SMB_CAP_BOTH_MASK;
3592 capabilities |= server_capabilities & SMB_CAP_SERVER_MASK;
3594 max_xmit = MIN(client_max_xmit, server_max_xmit);
3596 conn->smb1.server.capabilities = server_capabilities;
3597 conn->smb1.capabilities = capabilities;
3599 conn->smb1.server.max_xmit = server_max_xmit;
3600 conn->smb1.max_xmit = max_xmit;
3602 conn->smb1.server.max_mux = server_max_mux;
3604 conn->smb1.server.security_mode = server_security_mode;
3606 conn->smb1.server.readbraw = server_readbraw;
3607 conn->smb1.server.writebraw = server_writebraw;
3608 conn->smb1.server.lockread = server_lockread;
3609 conn->smb1.server.writeunlock = server_writeunlock;
3611 conn->smb1.server.session_key = server_session_key;
3613 talloc_steal(conn, server_gss_blob.data);
3614 conn->smb1.server.gss_blob = server_gss_blob;
3615 conn->smb1.server.guid = server_guid;
3616 memcpy(conn->smb1.server.challenge, server_challenge, 8);
3617 conn->smb1.server.workgroup = talloc_move(conn, &server_workgroup);
3618 conn->smb1.server.name = talloc_move(conn, &server_name);
3620 conn->smb1.server.time_zone = server_time_zone;
3621 conn->smb1.server.system_time = server_system_time;
3623 tevent_req_done(req);
3626 static struct tevent_req *smbXcli_negprot_smb2_subreq(struct smbXcli_negprot_state *state)
3628 size_t i;
3629 uint8_t *buf;
3630 uint16_t dialect_count = 0;
3632 buf = state->smb2.dyn;
3633 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3634 if (smb2cli_prots[i].proto < state->min_protocol) {
3635 continue;
3638 if (smb2cli_prots[i].proto > state->max_protocol) {
3639 continue;
3642 SSVAL(buf, dialect_count*2, smb2cli_prots[i].smb2_dialect);
3643 dialect_count++;
3646 buf = state->smb2.fixed;
3647 SSVAL(buf, 0, 36);
3648 SSVAL(buf, 2, dialect_count);
3649 SSVAL(buf, 4, state->conn->smb2.client.security_mode);
3650 SSVAL(buf, 6, 0); /* Reserved */
3651 SSVAL(buf, 8, 0); /* Capabilities */
3652 if (state->max_protocol >= PROTOCOL_SMB2_10) {
3653 NTSTATUS status;
3654 DATA_BLOB blob;
3656 status = GUID_to_ndr_blob(&state->conn->smb2.client.guid,
3657 state, &blob);
3658 if (!NT_STATUS_IS_OK(status)) {
3659 return NULL;
3661 memcpy(buf+12, blob.data, 16); /* ClientGuid */
3662 } else {
3663 memset(buf+12, 0, 16); /* ClientGuid */
3665 SBVAL(buf, 28, 0); /* ClientStartTime */
3667 return smb2cli_req_send(state, state->ev,
3668 state->conn, SMB2_OP_NEGPROT,
3669 0, 0, /* flags */
3670 state->timeout_msec,
3671 0xFEFF, 0, NULL, /* pid, tid, session */
3672 state->smb2.fixed, sizeof(state->smb2.fixed),
3673 state->smb2.dyn, dialect_count*2);
3676 static void smbXcli_negprot_smb2_done(struct tevent_req *subreq)
3678 struct tevent_req *req =
3679 tevent_req_callback_data(subreq,
3680 struct tevent_req);
3681 struct smbXcli_negprot_state *state =
3682 tevent_req_data(req,
3683 struct smbXcli_negprot_state);
3684 struct smbXcli_conn *conn = state->conn;
3685 size_t security_offset, security_length;
3686 DATA_BLOB blob;
3687 NTSTATUS status;
3688 struct iovec *iov;
3689 uint8_t *body;
3690 size_t i;
3691 uint16_t dialect_revision;
3692 static const struct smb2cli_req_expected_response expected[] = {
3694 .status = NT_STATUS_OK,
3695 .body_size = 0x41
3699 status = smb2cli_req_recv(subreq, state, &iov,
3700 expected, ARRAY_SIZE(expected));
3701 TALLOC_FREE(subreq);
3702 if (tevent_req_nterror(req, status)) {
3703 return;
3706 body = (uint8_t *)iov[1].iov_base;
3708 dialect_revision = SVAL(body, 4);
3710 for (i=0; i < ARRAY_SIZE(smb2cli_prots); i++) {
3711 if (smb2cli_prots[i].proto < state->min_protocol) {
3712 continue;
3715 if (smb2cli_prots[i].proto > state->max_protocol) {
3716 continue;
3719 if (smb2cli_prots[i].smb2_dialect != dialect_revision) {
3720 continue;
3723 conn->protocol = smb2cli_prots[i].proto;
3724 break;
3727 if (conn->protocol == PROTOCOL_NONE) {
3728 if (state->min_protocol >= PROTOCOL_SMB2_02) {
3729 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3730 return;
3733 if (dialect_revision != SMB2_DIALECT_REVISION_2FF) {
3734 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3735 return;
3738 /* make sure we do not loop forever */
3739 state->min_protocol = PROTOCOL_SMB2_02;
3742 * send a SMB2 negprot, in order to negotiate
3743 * the SMB2 dialect. This needs to use the
3744 * message id 1.
3746 state->conn->smb2.mid = 1;
3747 subreq = smbXcli_negprot_smb2_subreq(state);
3748 if (tevent_req_nomem(subreq, req)) {
3749 return;
3751 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3752 return;
3755 conn->smb2.server.security_mode = SVAL(body, 2);
3757 blob = data_blob_const(body + 8, 16);
3758 status = GUID_from_data_blob(&blob, &conn->smb2.server.guid);
3759 if (tevent_req_nterror(req, status)) {
3760 return;
3763 conn->smb2.server.capabilities = IVAL(body, 24);
3764 conn->smb2.server.max_trans_size= IVAL(body, 28);
3765 conn->smb2.server.max_read_size = IVAL(body, 32);
3766 conn->smb2.server.max_write_size= IVAL(body, 36);
3767 conn->smb2.server.system_time = BVAL(body, 40);
3768 conn->smb2.server.start_time = BVAL(body, 48);
3770 security_offset = SVAL(body, 56);
3771 security_length = SVAL(body, 58);
3773 if (security_offset != SMB2_HDR_BODY + iov[1].iov_len) {
3774 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3775 return;
3778 if (security_length > iov[2].iov_len) {
3779 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
3780 return;
3783 conn->smb2.server.gss_blob = data_blob_talloc(conn,
3784 iov[2].iov_base,
3785 security_length);
3786 if (tevent_req_nomem(conn->smb2.server.gss_blob.data, req)) {
3787 return;
3790 tevent_req_done(req);
3793 static NTSTATUS smbXcli_negprot_dispatch_incoming(struct smbXcli_conn *conn,
3794 TALLOC_CTX *tmp_mem,
3795 uint8_t *inbuf)
3797 size_t num_pending = talloc_array_length(conn->pending);
3798 struct tevent_req *subreq;
3799 struct smbXcli_req_state *substate;
3800 struct tevent_req *req;
3801 uint32_t protocol_magic = IVAL(inbuf, 4);
3803 if (num_pending != 1) {
3804 return NT_STATUS_INTERNAL_ERROR;
3807 subreq = conn->pending[0];
3808 substate = tevent_req_data(subreq, struct smbXcli_req_state);
3809 req = tevent_req_callback_data(subreq, struct tevent_req);
3811 switch (protocol_magic) {
3812 case SMB_MAGIC:
3813 tevent_req_set_callback(subreq, smbXcli_negprot_smb1_done, req);
3814 conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
3815 return smb1cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3817 case SMB2_MAGIC:
3818 if (substate->smb2.recv_iov == NULL) {
3820 * For the SMB1 negprot we have move it.
3822 substate->smb2.recv_iov = substate->smb1.recv_iov;
3823 substate->smb1.recv_iov = NULL;
3826 tevent_req_set_callback(subreq, smbXcli_negprot_smb2_done, req);
3827 conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
3828 return smb2cli_conn_dispatch_incoming(conn, tmp_mem, inbuf);
3831 DEBUG(10, ("Got non-SMB PDU\n"));
3832 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3835 NTSTATUS smbXcli_negprot_recv(struct tevent_req *req)
3837 return tevent_req_simple_recv_ntstatus(req);
3840 NTSTATUS smbXcli_negprot(struct smbXcli_conn *conn,
3841 uint32_t timeout_msec,
3842 enum protocol_types min_protocol,
3843 enum protocol_types max_protocol)
3845 TALLOC_CTX *frame = talloc_stackframe();
3846 struct tevent_context *ev;
3847 struct tevent_req *req;
3848 NTSTATUS status = NT_STATUS_NO_MEMORY;
3849 bool ok;
3851 if (smbXcli_conn_has_async_calls(conn)) {
3853 * Can't use sync call while an async call is in flight
3855 status = NT_STATUS_INVALID_PARAMETER_MIX;
3856 goto fail;
3858 ev = tevent_context_init(frame);
3859 if (ev == NULL) {
3860 goto fail;
3862 req = smbXcli_negprot_send(frame, ev, conn, timeout_msec,
3863 min_protocol, max_protocol);
3864 if (req == NULL) {
3865 goto fail;
3867 ok = tevent_req_poll(req, ev);
3868 if (!ok) {
3869 status = map_nt_error_from_unix_common(errno);
3870 goto fail;
3872 status = smbXcli_negprot_recv(req);
3873 fail:
3874 TALLOC_FREE(frame);
3875 return status;
3878 static int smbXcli_session_destructor(struct smbXcli_session *session)
3880 if (session->conn == NULL) {
3881 return 0;
3884 DLIST_REMOVE(session->conn->sessions, session);
3885 return 0;
3888 struct smbXcli_session *smbXcli_session_create(TALLOC_CTX *mem_ctx,
3889 struct smbXcli_conn *conn)
3891 struct smbXcli_session *session;
3893 session = talloc_zero(mem_ctx, struct smbXcli_session);
3894 if (session == NULL) {
3895 return NULL;
3897 talloc_set_destructor(session, smbXcli_session_destructor);
3899 DLIST_ADD_END(conn->sessions, session, struct smbXcli_session *);
3900 session->conn = conn;
3902 return session;
3905 uint8_t smb2cli_session_security_mode(struct smbXcli_session *session)
3907 struct smbXcli_conn *conn = session->conn;
3908 uint8_t security_mode = 0;
3910 if (conn == NULL) {
3911 return security_mode;
3914 security_mode = SMB2_NEGOTIATE_SIGNING_ENABLED;
3915 if (conn->mandatory_signing) {
3916 security_mode |= SMB2_NEGOTIATE_SIGNING_REQUIRED;
3919 return security_mode;
3922 uint64_t smb2cli_session_current_id(struct smbXcli_session *session)
3924 return session->smb2.session_id;
3927 void smb2cli_session_set_id_and_flags(struct smbXcli_session *session,
3928 uint64_t session_id,
3929 uint16_t session_flags)
3931 session->smb2.session_id = session_id;
3932 session->smb2.session_flags = session_flags;
3935 NTSTATUS smb2cli_session_update_session_key(struct smbXcli_session *session,
3936 const DATA_BLOB session_key,
3937 const struct iovec *recv_iov)
3939 struct smbXcli_conn *conn = session->conn;
3940 uint16_t no_sign_flags;
3941 DATA_BLOB signing_key;
3942 NTSTATUS status;
3944 if (conn == NULL) {
3945 return NT_STATUS_INVALID_PARAMETER_MIX;
3948 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
3950 if (session->smb2.session_flags & no_sign_flags) {
3951 session->smb2.should_sign = false;
3952 return NT_STATUS_OK;
3955 if (session->smb2.signing_key.length > 0) {
3956 signing_key = session->smb2.signing_key;
3957 } else {
3958 signing_key = session_key;
3960 if (session->smb2.channel_setup) {
3961 signing_key = session_key;
3964 status = smb2_signing_check_pdu(signing_key, recv_iov, 3);
3965 if (!NT_STATUS_IS_OK(status)) {
3966 return status;
3969 if (!session->smb2.channel_setup) {
3970 session->smb2.session_key = data_blob_dup_talloc(session,
3971 session_key);
3972 if (session->smb2.session_key.data == NULL) {
3973 return NT_STATUS_NO_MEMORY;
3977 if (session->smb2.channel_setup) {
3978 data_blob_free(&session->smb2.signing_key);
3979 session->smb2.channel_setup = false;
3982 if (session->smb2.signing_key.length > 0) {
3983 return NT_STATUS_OK;
3986 session->smb2.signing_key = data_blob_dup_talloc(session, signing_key);
3987 if (session->smb2.signing_key.data == NULL) {
3988 return NT_STATUS_NO_MEMORY;
3991 session->smb2.should_sign = false;
3993 if (conn->desire_signing) {
3994 session->smb2.should_sign = true;
3997 if (conn->smb2.server.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
3998 session->smb2.should_sign = true;
4001 return NT_STATUS_OK;
4004 NTSTATUS smb2cli_session_create_channel(TALLOC_CTX *mem_ctx,
4005 struct smbXcli_session *session1,
4006 struct smbXcli_conn *conn,
4007 struct smbXcli_session **_session2)
4009 struct smbXcli_session *session2;
4010 uint16_t no_sign_flags;
4012 no_sign_flags = SMB2_SESSION_FLAG_IS_GUEST | SMB2_SESSION_FLAG_IS_NULL;
4014 if (session1->smb2.session_flags & no_sign_flags) {
4015 return NT_STATUS_INVALID_PARAMETER_MIX;
4018 if (session1->smb2.session_key.length == 0) {
4019 return NT_STATUS_INVALID_PARAMETER_MIX;
4022 if (session1->smb2.signing_key.length == 0) {
4023 return NT_STATUS_INVALID_PARAMETER_MIX;
4026 if (conn == NULL) {
4027 return NT_STATUS_INVALID_PARAMETER_MIX;
4030 session2 = talloc_zero(mem_ctx, struct smbXcli_session);
4031 if (session2 == NULL) {
4032 return NT_STATUS_NO_MEMORY;
4034 session2->smb2.session_id = session1->smb2.session_id;
4035 session2->smb2.session_flags = session1->smb2.session_flags;
4037 session2->smb2.session_key = data_blob_dup_talloc(session2,
4038 session1->smb2.session_key);
4039 if (session2->smb2.session_key.data == NULL) {
4040 return NT_STATUS_NO_MEMORY;
4043 session2->smb2.signing_key = data_blob_dup_talloc(session2,
4044 session1->smb2.signing_key);
4045 if (session2->smb2.signing_key.data == NULL) {
4046 return NT_STATUS_NO_MEMORY;
4049 session2->smb2.should_sign = session1->smb2.should_sign;
4050 session2->smb2.channel_setup = true;
4052 talloc_set_destructor(session2, smbXcli_session_destructor);
4053 DLIST_ADD_END(conn->sessions, session2, struct smbXcli_session *);
4054 session2->conn = conn;
4056 *_session2 = session2;
4057 return NT_STATUS_OK;