smbXcli: rework smbXcli_base.c to use smbXcli_conn/smbXcli_req
[Samba/gebeck_regimport.git] / libcli / smb / smbXcli_base.c
blob2755218eb5ea68f93389cf8f39e4f1ae8b013f6e
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async SMB client requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "system/network.h"
23 #include "../lib/async_req/async_sock.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/util/util_net.h"
27 #include "../libcli/smb/smb_common.h"
28 #include "../libcli/smb/smb_seal.h"
29 #include "../libcli/smb/smb_signing.h"
30 #include "../libcli/smb/read_smb.h"
31 #include "smbXcli_base.h"
32 #include "librpc/ndr/libndr.h"
34 struct smbXcli_conn {
35 int fd;
36 struct sockaddr_storage local_ss;
37 struct sockaddr_storage remote_ss;
38 const char *remote_name;
40 struct tevent_queue *outgoing;
41 struct tevent_req **pending;
42 struct tevent_req *read_smb_req;
44 enum protocol_types protocol;
45 bool allow_signing;
46 bool desire_signing;
47 bool mandatory_signing;
50 * The incoming dispatch function should return:
51 * - NT_STATUS_RETRY, if more incoming PDUs are expected.
52 * - NT_STATUS_OK, if no more processing is desired, e.g.
53 * the dispatch function called
54 * tevent_req_done().
55 * - All other return values disconnect the connection.
57 NTSTATUS (*dispatch_incoming)(struct smbXcli_conn *conn,
58 TALLOC_CTX *tmp_mem,
59 uint8_t *inbuf);
61 struct {
62 struct {
63 uint32_t capabilities;
64 uint32_t max_xmit;
65 } client;
67 struct {
68 uint32_t capabilities;
69 uint32_t max_xmit;
70 uint16_t max_mux;
71 uint16_t security_mode;
72 bool readbraw;
73 bool writebraw;
74 bool lockread;
75 bool writeunlock;
76 uint32_t session_key;
77 struct GUID guid;
78 DATA_BLOB gss_blob;
79 uint8_t challenge[8];
80 const char *workgroup;
81 int time_zone;
82 NTTIME system_time;
83 } server;
85 uint32_t capabilities;
86 uint32_t max_xmit;
88 uint16_t mid;
90 struct smb_signing_state *signing;
91 struct smb_trans_enc_state *trans_enc;
92 } smb1;
95 struct smbXcli_req_state {
96 struct tevent_context *ev;
97 struct smbXcli_conn *conn;
99 uint8_t length_hdr[4];
101 bool one_way;
103 uint8_t *inbuf;
105 struct {
106 /* Space for the header including the wct */
107 uint8_t hdr[HDR_VWV];
110 * For normal requests, smb1cli_req_send chooses a mid.
111 * SecondaryV trans requests need to use the mid of the primary
112 * request, so we need a place to store it.
113 * Assume it is set if != 0.
115 uint16_t mid;
117 uint16_t *vwv;
118 uint8_t bytecount_buf[2];
120 #define MAX_SMB_IOV 5
121 /* length_hdr, hdr, words, byte_count, buffers */
122 struct iovec iov[1 + 3 + MAX_SMB_IOV];
123 int iov_count;
125 uint32_t seqnum;
126 int chain_num;
127 int chain_length;
128 struct tevent_req **chained_requests;
129 } smb1;
132 static int smbXcli_conn_destructor(struct smbXcli_conn *conn)
135 * NT_STATUS_OK, means we do not notify the callers
137 smbXcli_conn_disconnect(conn, NT_STATUS_OK);
139 if (conn->smb1.trans_enc) {
140 common_free_encryption_state(&conn->smb1.trans_enc);
143 return 0;
146 struct smbXcli_conn *smbXcli_conn_create(TALLOC_CTX *mem_ctx,
147 int fd,
148 const char *remote_name,
149 enum smb_signing_setting signing_state,
150 uint32_t smb1_capabilities)
152 struct smbXcli_conn *conn = NULL;
153 void *ss = NULL;
154 struct sockaddr *sa = NULL;
155 socklen_t sa_length;
156 int ret;
158 conn = talloc_zero(mem_ctx, struct smbXcli_conn);
159 if (!conn) {
160 return NULL;
163 conn->remote_name = talloc_strdup(conn, remote_name);
164 if (conn->remote_name == NULL) {
165 goto error;
168 conn->fd = fd;
170 ss = (void *)&conn->local_ss;
171 sa = (struct sockaddr *)ss;
172 sa_length = sizeof(conn->local_ss);
173 ret = getsockname(fd, sa, &sa_length);
174 if (ret == -1) {
175 goto error;
177 ss = (void *)&conn->remote_ss;
178 sa = (struct sockaddr *)ss;
179 sa_length = sizeof(conn->remote_ss);
180 ret = getpeername(fd, sa, &sa_length);
181 if (ret == -1) {
182 goto error;
185 conn->outgoing = tevent_queue_create(conn, "smbXcli_outgoing");
186 if (conn->outgoing == NULL) {
187 goto error;
189 conn->pending = NULL;
191 conn->protocol = PROTOCOL_NONE;
193 switch (signing_state) {
194 case SMB_SIGNING_OFF:
195 /* never */
196 conn->allow_signing = false;
197 conn->desire_signing = false;
198 conn->mandatory_signing = false;
199 break;
200 case SMB_SIGNING_DEFAULT:
201 case SMB_SIGNING_IF_REQUIRED:
202 /* if the server requires it */
203 conn->allow_signing = true;
204 conn->desire_signing = false;
205 conn->mandatory_signing = false;
206 break;
207 case SMB_SIGNING_REQUIRED:
208 /* always */
209 conn->allow_signing = true;
210 conn->desire_signing = true;
211 conn->mandatory_signing = true;
212 break;
215 conn->smb1.client.capabilities = smb1_capabilities;
216 conn->smb1.client.max_xmit = UINT16_MAX;
218 conn->smb1.capabilities = conn->smb1.client.capabilities;
219 conn->smb1.max_xmit = 1024;
221 conn->smb1.mid = 1;
223 /* initialise signing */
224 conn->smb1.signing = smb_signing_init(conn,
225 conn->allow_signing,
226 conn->desire_signing,
227 conn->mandatory_signing);
228 if (!conn->smb1.signing) {
229 goto error;
232 talloc_set_destructor(conn, smbXcli_conn_destructor);
233 return conn;
235 error:
236 TALLOC_FREE(conn);
237 return NULL;
240 bool smbXcli_conn_is_connected(struct smbXcli_conn *conn)
242 if (conn == NULL) {
243 return false;
246 if (conn->fd == -1) {
247 return false;
250 return true;
253 enum protocol_types smbXcli_conn_protocol(struct smbXcli_conn *conn)
255 return conn->protocol;
258 bool smbXcli_conn_use_unicode(struct smbXcli_conn *conn)
260 if (conn->protocol >= PROTOCOL_SMB2_02) {
261 return true;
264 if (conn->smb1.capabilities & CAP_UNICODE) {
265 return true;
268 return false;
271 void smbXcli_conn_set_sockopt(struct smbXcli_conn *conn, const char *options)
273 set_socket_options(conn->fd, options);
276 const struct sockaddr_storage *smbXcli_conn_local_sockaddr(struct smbXcli_conn *conn)
278 return &conn->local_ss;
281 const struct sockaddr_storage *smbXcli_conn_remote_sockaddr(struct smbXcli_conn *conn)
283 return &conn->remote_ss;
286 const char *smbXcli_conn_remote_name(struct smbXcli_conn *conn)
288 return conn->remote_name;
291 bool smb1cli_conn_activate_signing(struct smbXcli_conn *conn,
292 const DATA_BLOB user_session_key,
293 const DATA_BLOB response)
295 return smb_signing_activate(conn->smb1.signing,
296 user_session_key,
297 response);
300 bool smb1cli_conn_check_signing(struct smbXcli_conn *conn,
301 const uint8_t *buf, uint32_t seqnum)
303 return smb_signing_check_pdu(conn->smb1.signing, buf, seqnum);
306 bool smb1cli_conn_signing_is_active(struct smbXcli_conn *conn)
308 return smb_signing_is_active(conn->smb1.signing);
311 void smb1cli_conn_set_encryption(struct smbXcli_conn *conn,
312 struct smb_trans_enc_state *es)
314 /* Replace the old state, if any. */
315 if (conn->smb1.trans_enc) {
316 common_free_encryption_state(&conn->smb1.trans_enc);
318 conn->smb1.trans_enc = es;
321 bool smb1cli_conn_encryption_on(struct smbXcli_conn *conn)
323 return common_encryption_on(conn->smb1.trans_enc);
327 static NTSTATUS smb1cli_pull_raw_error(const uint8_t *buf)
329 const uint8_t *hdr = buf + NBT_HDR_SIZE;
330 uint32_t flags2 = SVAL(hdr, HDR_FLG2);
331 NTSTATUS status = NT_STATUS(IVAL(hdr, HDR_RCLS));
333 if (NT_STATUS_IS_OK(status)) {
334 return NT_STATUS_OK;
337 if (flags2 & FLAGS2_32_BIT_ERROR_CODES) {
338 return status;
341 return NT_STATUS_DOS(CVAL(hdr, HDR_RCLS), SVAL(hdr, HDR_ERR));
345 * Figure out if there is an andx command behind the current one
346 * @param[in] buf The smb buffer to look at
347 * @param[in] ofs The offset to the wct field that is followed by the cmd
348 * @retval Is there a command following?
351 static bool smb1cli_have_andx_command(const uint8_t *buf,
352 uint16_t ofs,
353 uint8_t cmd)
355 uint8_t wct;
356 size_t buflen = talloc_get_size(buf);
358 if (!smb1cli_is_andx_req(cmd)) {
359 return false;
362 if ((ofs == buflen-1) || (ofs == buflen)) {
363 return false;
366 wct = CVAL(buf, ofs);
367 if (wct < 2) {
369 * Not enough space for the command and a following pointer
371 return false;
373 return (CVAL(buf, ofs+1) != 0xff);
377 * Is the SMB command able to hold an AND_X successor
378 * @param[in] cmd The SMB command in question
379 * @retval Can we add a chained request after "cmd"?
381 bool smb1cli_is_andx_req(uint8_t cmd)
383 switch (cmd) {
384 case SMBtconX:
385 case SMBlockingX:
386 case SMBopenX:
387 case SMBreadX:
388 case SMBwriteX:
389 case SMBsesssetupX:
390 case SMBulogoffX:
391 case SMBntcreateX:
392 return true;
393 break;
394 default:
395 break;
398 return false;
401 static uint16_t smb1cli_alloc_mid(struct smbXcli_conn *conn)
403 size_t num_pending = talloc_array_length(conn->pending);
404 uint16_t result;
406 while (true) {
407 size_t i;
409 result = conn->smb1.mid++;
410 if ((result == 0) || (result == 0xffff)) {
411 continue;
414 for (i=0; i<num_pending; i++) {
415 if (result == smb1cli_req_mid(conn->pending[i])) {
416 break;
420 if (i == num_pending) {
421 return result;
426 void smbXcli_req_unset_pending(struct tevent_req *req)
428 struct smbXcli_req_state *state =
429 tevent_req_data(req,
430 struct smbXcli_req_state);
431 struct smbXcli_conn *conn = state->conn;
432 size_t num_pending = talloc_array_length(conn->pending);
433 size_t i;
435 if (state->smb1.mid != 0) {
437 * This is a [nt]trans[2] request which waits
438 * for more than one reply.
440 return;
443 talloc_set_destructor(req, NULL);
445 if (num_pending == 1) {
447 * The pending read_smb tevent_req is a child of
448 * conn->pending. So if nothing is pending anymore, we need to
449 * delete the socket read fde.
451 TALLOC_FREE(conn->pending);
452 conn->read_smb_req = NULL;
453 return;
456 for (i=0; i<num_pending; i++) {
457 if (req == conn->pending[i]) {
458 break;
461 if (i == num_pending) {
463 * Something's seriously broken. Just returning here is the
464 * right thing nevertheless, the point of this routine is to
465 * remove ourselves from conn->pending.
467 return;
471 * Remove ourselves from the conn->pending array
473 for (; i < (num_pending - 1); i++) {
474 conn->pending[i] = conn->pending[i+1];
478 * No NULL check here, we're shrinking by sizeof(void *), and
479 * talloc_realloc just adjusts the size for this.
481 conn->pending = talloc_realloc(NULL, conn->pending, struct tevent_req *,
482 num_pending - 1);
483 return;
486 static int smbXcli_req_destructor(struct tevent_req *req)
488 struct smbXcli_req_state *state =
489 tevent_req_data(req,
490 struct smbXcli_req_state);
493 * Make sure we really remove it from
494 * the pending array on destruction.
496 state->smb1.mid = 0;
497 smbXcli_req_unset_pending(req);
498 return 0;
501 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn);
503 bool smbXcli_req_set_pending(struct tevent_req *req)
505 struct smbXcli_req_state *state =
506 tevent_req_data(req,
507 struct smbXcli_req_state);
508 struct smbXcli_conn *conn;
509 struct tevent_req **pending;
510 size_t num_pending;
512 conn = state->conn;
514 if (!smbXcli_conn_is_connected(conn)) {
515 return false;
518 num_pending = talloc_array_length(conn->pending);
520 pending = talloc_realloc(conn, conn->pending, struct tevent_req *,
521 num_pending+1);
522 if (pending == NULL) {
523 return false;
525 pending[num_pending] = req;
526 conn->pending = pending;
527 talloc_set_destructor(req, smbXcli_req_destructor);
529 if (!smbXcli_conn_receive_next(conn)) {
531 * the caller should notify the current request
533 * And all other pending requests get notified
534 * by smbXcli_conn_disconnect().
536 smbXcli_req_unset_pending(req);
537 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
538 return false;
541 return true;
544 static void smbXcli_conn_received(struct tevent_req *subreq);
546 static bool smbXcli_conn_receive_next(struct smbXcli_conn *conn)
548 size_t num_pending = talloc_array_length(conn->pending);
549 struct tevent_req *req;
550 struct smbXcli_req_state *state;
552 if (conn->read_smb_req != NULL) {
553 return true;
556 if (num_pending == 0) {
557 return true;
560 req = conn->pending[0];
561 state = tevent_req_data(req, struct smbXcli_req_state);
564 * We're the first ones, add the read_smb request that waits for the
565 * answer from the server
567 conn->read_smb_req = read_smb_send(conn->pending, state->ev, conn->fd);
568 if (conn->read_smb_req == NULL) {
569 return false;
571 tevent_req_set_callback(conn->read_smb_req, smbXcli_conn_received, conn);
572 return true;
575 void smbXcli_conn_disconnect(struct smbXcli_conn *conn, NTSTATUS status)
577 if (conn->fd != -1) {
578 close(conn->fd);
580 conn->fd = -1;
583 * Cancel all pending requests. We do not do a for-loop walking
584 * conn->pending because that array changes in
585 * smbXcli_req_unset_pending.
587 while (talloc_array_length(conn->pending) > 0) {
588 struct tevent_req *req;
589 struct smbXcli_req_state *state;
591 req = conn->pending[0];
592 state = tevent_req_data(req, struct smbXcli_req_state);
595 * We're dead. No point waiting for trans2
596 * replies.
598 state->smb1.mid = 0;
600 smbXcli_req_unset_pending(req);
602 if (NT_STATUS_IS_OK(status)) {
603 /* do not notify the callers */
604 continue;
608 * we need to defer the callback, because we may notify more
609 * then one caller.
611 tevent_req_defer_callback(req, state->ev);
612 tevent_req_nterror(req, status);
617 * Fetch a smb request's mid. Only valid after the request has been sent by
618 * smb1cli_req_send().
620 uint16_t smb1cli_req_mid(struct tevent_req *req)
622 struct smbXcli_req_state *state =
623 tevent_req_data(req,
624 struct smbXcli_req_state);
626 if (state->smb1.mid != 0) {
627 return state->smb1.mid;
630 return SVAL(state->smb1.hdr, HDR_MID);
633 void smb1cli_req_set_mid(struct tevent_req *req, uint16_t mid)
635 struct smbXcli_req_state *state =
636 tevent_req_data(req,
637 struct smbXcli_req_state);
639 state->smb1.mid = mid;
642 uint32_t smb1cli_req_seqnum(struct tevent_req *req)
644 struct smbXcli_req_state *state =
645 tevent_req_data(req,
646 struct smbXcli_req_state);
648 return state->smb1.seqnum;
651 void smb1cli_req_set_seqnum(struct tevent_req *req, uint32_t seqnum)
653 struct smbXcli_req_state *state =
654 tevent_req_data(req,
655 struct smbXcli_req_state);
657 state->smb1.seqnum = seqnum;
660 static size_t smbXcli_iov_len(const struct iovec *iov, int count)
662 size_t result = 0;
663 int i;
664 for (i=0; i<count; i++) {
665 result += iov[i].iov_len;
667 return result;
670 static uint8_t *smbXcli_iov_concat(TALLOC_CTX *mem_ctx,
671 const struct iovec *iov,
672 int count)
674 size_t len = smbXcli_iov_len(iov, count);
675 size_t copied;
676 uint8_t *buf;
677 int i;
679 buf = talloc_array(mem_ctx, uint8_t, len);
680 if (buf == NULL) {
681 return NULL;
683 copied = 0;
684 for (i=0; i<count; i++) {
685 memcpy(buf+copied, iov[i].iov_base, iov[i].iov_len);
686 copied += iov[i].iov_len;
688 return buf;
691 struct tevent_req *smb1cli_req_create(TALLOC_CTX *mem_ctx,
692 struct tevent_context *ev,
693 struct smbXcli_conn *conn,
694 uint8_t smb_command,
695 uint8_t additional_flags,
696 uint8_t clear_flags,
697 uint16_t additional_flags2,
698 uint16_t clear_flags2,
699 uint32_t timeout_msec,
700 uint32_t pid,
701 uint16_t tid,
702 uint16_t uid,
703 uint8_t wct, uint16_t *vwv,
704 int iov_count,
705 struct iovec *bytes_iov)
707 struct tevent_req *req;
708 struct smbXcli_req_state *state;
709 uint8_t flags = 0;
710 uint16_t flags2 = 0;
712 if (iov_count > MAX_SMB_IOV) {
714 * Should not happen :-)
716 return NULL;
719 req = tevent_req_create(mem_ctx, &state,
720 struct smbXcli_req_state);
721 if (req == NULL) {
722 return NULL;
724 state->ev = ev;
725 state->conn = conn;
727 if (conn->protocol >= PROTOCOL_LANMAN1) {
728 flags |= FLAG_CASELESS_PATHNAMES;
729 flags |= FLAG_CANONICAL_PATHNAMES;
732 if (conn->protocol >= PROTOCOL_LANMAN2) {
733 flags2 |= FLAGS2_LONG_PATH_COMPONENTS;
734 flags2 |= FLAGS2_EXTENDED_ATTRIBUTES;
737 if (conn->protocol >= PROTOCOL_NT1) {
738 if (conn->smb1.capabilities & CAP_UNICODE) {
739 flags2 |= FLAGS2_UNICODE_STRINGS;
741 if (conn->smb1.capabilities & CAP_STATUS32) {
742 flags2 |= FLAGS2_32_BIT_ERROR_CODES;
744 if (conn->smb1.capabilities & CAP_EXTENDED_SECURITY) {
745 flags2 |= FLAGS2_EXTENDED_SECURITY;
749 flags |= additional_flags;
750 flags &= ~clear_flags;
751 flags2 |= additional_flags2;
752 flags2 &= ~clear_flags2;
754 SIVAL(state->smb1.hdr, 0, SMB_MAGIC);
755 SCVAL(state->smb1.hdr, HDR_COM, smb_command);
756 SIVAL(state->smb1.hdr, HDR_RCLS, NT_STATUS_V(NT_STATUS_OK));
757 SCVAL(state->smb1.hdr, HDR_FLG, flags);
758 SSVAL(state->smb1.hdr, HDR_FLG2, flags2);
759 SSVAL(state->smb1.hdr, HDR_PIDHIGH, pid >> 16);
760 SSVAL(state->smb1.hdr, HDR_TID, tid);
761 SSVAL(state->smb1.hdr, HDR_PID, pid);
762 SSVAL(state->smb1.hdr, HDR_UID, uid);
763 SSVAL(state->smb1.hdr, HDR_MID, 0); /* this comes later */
764 SSVAL(state->smb1.hdr, HDR_WCT, wct);
766 state->smb1.vwv = vwv;
768 SSVAL(state->smb1.bytecount_buf, 0, smbXcli_iov_len(bytes_iov, iov_count));
770 state->smb1.iov[0].iov_base = (void *)state->length_hdr;
771 state->smb1.iov[0].iov_len = sizeof(state->length_hdr);
772 state->smb1.iov[1].iov_base = (void *)state->smb1.hdr;
773 state->smb1.iov[1].iov_len = sizeof(state->smb1.hdr);
774 state->smb1.iov[2].iov_base = (void *)state->smb1.vwv;
775 state->smb1.iov[2].iov_len = wct * sizeof(uint16_t);
776 state->smb1.iov[3].iov_base = (void *)state->smb1.bytecount_buf;
777 state->smb1.iov[3].iov_len = sizeof(uint16_t);
779 if (iov_count != 0) {
780 memcpy(&state->smb1.iov[4], bytes_iov,
781 iov_count * sizeof(*bytes_iov));
783 state->smb1.iov_count = iov_count + 4;
785 if (timeout_msec > 0) {
786 struct timeval endtime;
788 endtime = timeval_current_ofs_msec(timeout_msec);
789 if (!tevent_req_set_endtime(req, ev, endtime)) {
790 return req;
794 switch (smb_command) {
795 case SMBtranss:
796 case SMBtranss2:
797 case SMBnttranss:
798 case SMBntcancel:
799 state->one_way = true;
800 break;
801 case SMBlockingX:
802 if ((wct == 8) &&
803 (CVAL(vwv+3, 0) == LOCKING_ANDX_OPLOCK_RELEASE)) {
804 state->one_way = true;
806 break;
809 return req;
812 static NTSTATUS smb1cli_conn_signv(struct smbXcli_conn *conn,
813 struct iovec *iov, int iov_count,
814 uint32_t *seqnum)
816 uint8_t *buf;
819 * Obvious optimization: Make cli_calculate_sign_mac work with struct
820 * iovec directly. MD5Update would do that just fine.
823 if (iov_count < 4) {
824 return NT_STATUS_INVALID_PARAMETER_MIX;
826 if (iov[0].iov_len != NBT_HDR_SIZE) {
827 return NT_STATUS_INVALID_PARAMETER_MIX;
829 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
830 return NT_STATUS_INVALID_PARAMETER_MIX;
832 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
833 return NT_STATUS_INVALID_PARAMETER_MIX;
835 if (iov[3].iov_len != sizeof(uint16_t)) {
836 return NT_STATUS_INVALID_PARAMETER_MIX;
839 buf = smbXcli_iov_concat(talloc_tos(), iov, iov_count);
840 if (buf == NULL) {
841 return NT_STATUS_NO_MEMORY;
844 *seqnum = smb_signing_next_seqnum(conn->smb1.signing, false);
845 smb_signing_sign_pdu(conn->smb1.signing, buf, *seqnum);
846 memcpy(iov[1].iov_base, buf+4, iov[1].iov_len);
848 TALLOC_FREE(buf);
849 return NT_STATUS_OK;
852 static void smb1cli_req_writev_done(struct tevent_req *subreq);
853 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
854 TALLOC_CTX *tmp_mem,
855 uint8_t *inbuf);
857 static NTSTATUS smb1cli_req_writev_submit(struct tevent_req *req,
858 struct smbXcli_req_state *state,
859 struct iovec *iov, int iov_count)
861 struct tevent_req *subreq;
862 NTSTATUS status;
863 uint16_t mid;
865 if (!smbXcli_conn_is_connected(state->conn)) {
866 return NT_STATUS_CONNECTION_DISCONNECTED;
869 if (state->conn->protocol > PROTOCOL_NT1) {
870 return NT_STATUS_REVISION_MISMATCH;
873 if (iov_count < 4) {
874 return NT_STATUS_INVALID_PARAMETER_MIX;
876 if (iov[0].iov_len != NBT_HDR_SIZE) {
877 return NT_STATUS_INVALID_PARAMETER_MIX;
879 if (iov[1].iov_len != (MIN_SMB_SIZE-sizeof(uint16_t))) {
880 return NT_STATUS_INVALID_PARAMETER_MIX;
882 if (iov[2].iov_len > (0xFF * sizeof(uint16_t))) {
883 return NT_STATUS_INVALID_PARAMETER_MIX;
885 if (iov[3].iov_len != sizeof(uint16_t)) {
886 return NT_STATUS_INVALID_PARAMETER_MIX;
889 if (state->smb1.mid != 0) {
890 mid = state->smb1.mid;
891 } else {
892 mid = smb1cli_alloc_mid(state->conn);
894 SSVAL(iov[1].iov_base, HDR_MID, mid);
896 _smb_setlen_nbt(iov[0].iov_base, smbXcli_iov_len(&iov[1], iov_count-1));
898 status = smb1cli_conn_signv(state->conn, iov, iov_count,
899 &state->smb1.seqnum);
901 if (!NT_STATUS_IS_OK(status)) {
902 return status;
906 * If we supported multiple encrytion contexts
907 * here we'd look up based on tid.
909 if (common_encryption_on(state->conn->smb1.trans_enc)) {
910 char *buf, *enc_buf;
912 buf = (char *)smbXcli_iov_concat(talloc_tos(), iov, iov_count);
913 if (buf == NULL) {
914 return NT_STATUS_NO_MEMORY;
916 status = common_encrypt_buffer(state->conn->smb1.trans_enc,
917 (char *)buf, &enc_buf);
918 TALLOC_FREE(buf);
919 if (!NT_STATUS_IS_OK(status)) {
920 DEBUG(0, ("Error in encrypting client message: %s\n",
921 nt_errstr(status)));
922 return status;
924 buf = (char *)talloc_memdup(state, enc_buf,
925 smb_len_nbt(enc_buf)+4);
926 SAFE_FREE(enc_buf);
927 if (buf == NULL) {
928 return NT_STATUS_NO_MEMORY;
930 iov[0].iov_base = (void *)buf;
931 iov[0].iov_len = talloc_get_size(buf);
932 iov_count = 1;
935 if (state->conn->dispatch_incoming == NULL) {
936 state->conn->dispatch_incoming = smb1cli_conn_dispatch_incoming;
939 subreq = writev_send(state, state->ev, state->conn->outgoing,
940 state->conn->fd, false, iov, iov_count);
941 if (subreq == NULL) {
942 return NT_STATUS_NO_MEMORY;
944 tevent_req_set_callback(subreq, smb1cli_req_writev_done, req);
945 return NT_STATUS_OK;
948 struct tevent_req *smb1cli_req_send(TALLOC_CTX *mem_ctx,
949 struct tevent_context *ev,
950 struct smbXcli_conn *conn,
951 uint8_t smb_command,
952 uint8_t additional_flags,
953 uint8_t clear_flags,
954 uint16_t additional_flags2,
955 uint16_t clear_flags2,
956 uint32_t timeout_msec,
957 uint32_t pid,
958 uint16_t tid,
959 uint16_t uid,
960 uint8_t wct, uint16_t *vwv,
961 uint32_t num_bytes,
962 const uint8_t *bytes)
964 struct tevent_req *req;
965 struct iovec iov;
966 NTSTATUS status;
968 iov.iov_base = discard_const_p(void, bytes);
969 iov.iov_len = num_bytes;
971 req = smb1cli_req_create(mem_ctx, ev, conn, smb_command,
972 additional_flags, clear_flags,
973 additional_flags2, clear_flags2,
974 timeout_msec,
975 pid, tid, uid,
976 wct, vwv, 1, &iov);
977 if (req == NULL) {
978 return NULL;
980 if (!tevent_req_is_in_progress(req)) {
981 return tevent_req_post(req, ev);
983 status = smb1cli_req_chain_submit(&req, 1);
984 if (tevent_req_nterror(req, status)) {
985 return tevent_req_post(req, ev);
987 return req;
990 static void smb1cli_req_writev_done(struct tevent_req *subreq)
992 struct tevent_req *req =
993 tevent_req_callback_data(subreq,
994 struct tevent_req);
995 struct smbXcli_req_state *state =
996 tevent_req_data(req,
997 struct smbXcli_req_state);
998 ssize_t nwritten;
999 int err;
1001 nwritten = writev_recv(subreq, &err);
1002 TALLOC_FREE(subreq);
1003 if (nwritten == -1) {
1004 NTSTATUS status = map_nt_error_from_unix_common(err);
1005 smbXcli_conn_disconnect(state->conn, status);
1006 return;
1009 if (state->one_way) {
1010 state->inbuf = NULL;
1011 tevent_req_done(req);
1012 return;
1015 if (!smbXcli_req_set_pending(req)) {
1016 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1017 return;
1021 static void smbXcli_conn_received(struct tevent_req *subreq)
1023 struct smbXcli_conn *conn =
1024 tevent_req_callback_data(subreq,
1025 struct smbXcli_conn);
1026 TALLOC_CTX *frame = talloc_stackframe();
1027 NTSTATUS status;
1028 uint8_t *inbuf;
1029 ssize_t received;
1030 int err;
1032 if (subreq != conn->read_smb_req) {
1033 DEBUG(1, ("Internal error: cli_smb_received called with "
1034 "unexpected subreq\n"));
1035 status = NT_STATUS_INTERNAL_ERROR;
1036 smbXcli_conn_disconnect(conn, status);
1037 TALLOC_FREE(frame);
1038 return;
1040 conn->read_smb_req = NULL;
1042 received = read_smb_recv(subreq, frame, &inbuf, &err);
1043 TALLOC_FREE(subreq);
1044 if (received == -1) {
1045 status = map_nt_error_from_unix_common(err);
1046 smbXcli_conn_disconnect(conn, status);
1047 TALLOC_FREE(frame);
1048 return;
1051 status = conn->dispatch_incoming(conn, frame, inbuf);
1052 TALLOC_FREE(frame);
1053 if (NT_STATUS_IS_OK(status)) {
1055 * We should not do any more processing
1056 * as the dispatch function called
1057 * tevent_req_done().
1059 return;
1060 } else if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
1062 * We got an error, so notify all pending requests
1064 smbXcli_conn_disconnect(conn, status);
1065 return;
1069 * We got NT_STATUS_RETRY, so we may ask for a
1070 * next incoming pdu.
1072 if (!smbXcli_conn_receive_next(conn)) {
1073 smbXcli_conn_disconnect(conn, NT_STATUS_NO_MEMORY);
1077 static NTSTATUS smb1cli_conn_dispatch_incoming(struct smbXcli_conn *conn,
1078 TALLOC_CTX *tmp_mem,
1079 uint8_t *inbuf)
1081 struct tevent_req *req;
1082 struct smbXcli_req_state *state;
1083 NTSTATUS status;
1084 size_t num_pending;
1085 size_t i;
1086 uint16_t mid;
1087 bool oplock_break;
1088 const uint8_t *inhdr = inbuf + NBT_HDR_SIZE;
1090 if ((IVAL(inhdr, 0) != SMB_MAGIC) /* 0xFF"SMB" */
1091 && (SVAL(inhdr, 0) != 0x45ff)) /* 0xFF"E" */ {
1092 DEBUG(10, ("Got non-SMB PDU\n"));
1093 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1097 * If we supported multiple encrytion contexts
1098 * here we'd look up based on tid.
1100 if (common_encryption_on(conn->smb1.trans_enc)
1101 && (CVAL(inbuf, 0) == 0)) {
1102 uint16_t enc_ctx_num;
1104 status = get_enc_ctx_num(inbuf, &enc_ctx_num);
1105 if (!NT_STATUS_IS_OK(status)) {
1106 DEBUG(10, ("get_enc_ctx_num returned %s\n",
1107 nt_errstr(status)));
1108 return status;
1111 if (enc_ctx_num != conn->smb1.trans_enc->enc_ctx_num) {
1112 DEBUG(10, ("wrong enc_ctx %d, expected %d\n",
1113 enc_ctx_num,
1114 conn->smb1.trans_enc->enc_ctx_num));
1115 return NT_STATUS_INVALID_HANDLE;
1118 status = common_decrypt_buffer(conn->smb1.trans_enc,
1119 (char *)inbuf);
1120 if (!NT_STATUS_IS_OK(status)) {
1121 DEBUG(10, ("common_decrypt_buffer returned %s\n",
1122 nt_errstr(status)));
1123 return status;
1127 mid = SVAL(inhdr, HDR_MID);
1128 num_pending = talloc_array_length(conn->pending);
1130 for (i=0; i<num_pending; i++) {
1131 if (mid == smb1cli_req_mid(conn->pending[i])) {
1132 break;
1135 if (i == num_pending) {
1136 /* Dump unexpected reply */
1137 return NT_STATUS_RETRY;
1140 oplock_break = false;
1142 if (mid == 0xffff) {
1144 * Paranoia checks that this is really an oplock break request.
1146 oplock_break = (smb_len_nbt(inbuf) == 51); /* hdr + 8 words */
1147 oplock_break &= ((CVAL(inhdr, HDR_FLG) & FLAG_REPLY) == 0);
1148 oplock_break &= (CVAL(inhdr, HDR_COM) == SMBlockingX);
1149 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(6)) == 0);
1150 oplock_break &= (SVAL(inhdr, HDR_VWV+VWV(7)) == 0);
1152 if (!oplock_break) {
1153 /* Dump unexpected reply */
1154 return NT_STATUS_RETRY;
1158 req = conn->pending[i];
1159 state = tevent_req_data(req, struct smbXcli_req_state);
1161 if (!oplock_break /* oplock breaks are not signed */
1162 && !smb_signing_check_pdu(conn->smb1.signing,
1163 inbuf, state->smb1.seqnum+1)) {
1164 DEBUG(10, ("cli_check_sign_mac failed\n"));
1165 return NT_STATUS_ACCESS_DENIED;
1168 if (state->smb1.chained_requests != NULL) {
1169 struct tevent_req **chain = talloc_move(tmp_mem,
1170 &state->smb1.chained_requests);
1171 size_t num_chained = talloc_array_length(chain);
1174 * We steal the inbuf to the chain,
1175 * so that it will stay until all
1176 * requests of the chain are finished.
1178 * Each requests in the chain will
1179 * hold a talloc reference to the chain.
1180 * This way we do not expose the talloc_reference()
1181 * behavior to the callers.
1183 talloc_steal(chain, inbuf);
1185 for (i=0; i<num_chained; i++) {
1186 struct tevent_req **ref;
1188 req = chain[i];
1189 state = tevent_req_data(req, struct smbXcli_req_state);
1191 smbXcli_req_unset_pending(req);
1194 * as we finish multiple requests here
1195 * we need to defer the callbacks as
1196 * they could destroy our current stack state.
1198 tevent_req_defer_callback(req, state->ev);
1200 ref = talloc_reference(state, chain);
1201 if (tevent_req_nomem(ref, req)) {
1202 continue;
1205 state->inbuf = inbuf;
1206 state->smb1.chain_num = i;
1207 state->smb1.chain_length = num_chained;
1209 tevent_req_done(req);
1211 return NT_STATUS_RETRY;
1214 smbXcli_req_unset_pending(req);
1216 state->inbuf = talloc_move(state, &inbuf);
1217 state->smb1.chain_num = 0;
1218 state->smb1.chain_length = 1;
1220 if (talloc_array_length(conn->pending) == 0) {
1221 tevent_req_done(req);
1222 return NT_STATUS_OK;
1225 tevent_req_defer_callback(req, state->ev);
1226 tevent_req_done(req);
1227 return NT_STATUS_RETRY;
1230 NTSTATUS smb1cli_req_recv(struct tevent_req *req,
1231 TALLOC_CTX *mem_ctx, uint8_t **pinbuf,
1232 uint8_t min_wct, uint8_t *pwct, uint16_t **pvwv,
1233 uint32_t *pnum_bytes, uint8_t **pbytes)
1235 struct smbXcli_req_state *state =
1236 tevent_req_data(req,
1237 struct smbXcli_req_state);
1238 NTSTATUS status = NT_STATUS_OK;
1239 uint8_t cmd, wct;
1240 uint16_t num_bytes;
1241 size_t wct_ofs, bytes_offset;
1242 int i;
1244 if (tevent_req_is_nterror(req, &status)) {
1245 return status;
1248 if (state->inbuf == NULL) {
1249 if (min_wct != 0) {
1250 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1252 if (pinbuf) {
1253 *pinbuf = NULL;
1255 if (pwct) {
1256 *pwct = 0;
1258 if (pvwv) {
1259 *pvwv = NULL;
1261 if (pnum_bytes) {
1262 *pnum_bytes = 0;
1264 if (pbytes) {
1265 *pbytes = NULL;
1267 /* This was a request without a reply */
1268 return NT_STATUS_OK;
1271 wct_ofs = NBT_HDR_SIZE + HDR_WCT;
1272 cmd = CVAL(state->inbuf, NBT_HDR_SIZE + HDR_COM);
1274 for (i=0; i<state->smb1.chain_num; i++) {
1275 if (i < state->smb1.chain_num-1) {
1276 if (cmd == 0xff) {
1277 return NT_STATUS_REQUEST_ABORTED;
1279 if (!smb1cli_is_andx_req(cmd)) {
1280 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1284 if (!smb1cli_have_andx_command(state->inbuf, wct_ofs, cmd)) {
1286 * This request was not completed because a previous
1287 * request in the chain had received an error.
1289 return NT_STATUS_REQUEST_ABORTED;
1292 cmd = CVAL(state->inbuf, wct_ofs + 1);
1293 wct_ofs = SVAL(state->inbuf, wct_ofs + 3);
1296 * Skip the all-present length field. No overflow, we've just
1297 * put a 16-bit value into a size_t.
1299 wct_ofs += 4;
1301 if (wct_ofs+2 > talloc_get_size(state->inbuf)) {
1302 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1306 status = smb1cli_pull_raw_error(state->inbuf);
1308 if (!smb1cli_have_andx_command(state->inbuf, wct_ofs, cmd)) {
1310 if ((cmd == SMBsesssetupX)
1311 && NT_STATUS_EQUAL(
1312 status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1314 * NT_STATUS_MORE_PROCESSING_REQUIRED is a
1315 * valid return code for session setup
1317 goto no_err;
1320 if (NT_STATUS_IS_ERR(status)) {
1322 * The last command takes the error code. All
1323 * further commands down the requested chain
1324 * will get a NT_STATUS_REQUEST_ABORTED.
1326 return status;
1328 } else {
1330 * Only the last request in the chain get the returned
1331 * status.
1333 status = NT_STATUS_OK;
1336 no_err:
1338 wct = CVAL(state->inbuf, wct_ofs);
1339 bytes_offset = wct_ofs + 1 + wct * sizeof(uint16_t);
1340 num_bytes = SVAL(state->inbuf, bytes_offset);
1342 if (wct < min_wct) {
1343 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1347 * wct_ofs is a 16-bit value plus 4, wct is a 8-bit value, num_bytes
1348 * is a 16-bit value. So bytes_offset being size_t should be far from
1349 * wrapping.
1351 if ((bytes_offset + 2 > talloc_get_size(state->inbuf))
1352 || (bytes_offset > 0xffff)) {
1353 return NT_STATUS_INVALID_NETWORK_RESPONSE;
1356 if (pwct != NULL) {
1357 *pwct = wct;
1359 if (pvwv != NULL) {
1360 *pvwv = (uint16_t *)(state->inbuf + wct_ofs + 1);
1362 if (pnum_bytes != NULL) {
1363 *pnum_bytes = num_bytes;
1365 if (pbytes != NULL) {
1366 *pbytes = (uint8_t *)state->inbuf + bytes_offset + 2;
1368 if ((mem_ctx != NULL) && (pinbuf != NULL)) {
1369 if (state->smb1.chain_num == state->smb1.chain_length-1) {
1370 *pinbuf = talloc_move(mem_ctx, &state->inbuf);
1371 } else {
1372 *pinbuf = state->inbuf;
1376 return status;
1379 size_t smb1cli_req_wct_ofs(struct tevent_req **reqs, int num_reqs)
1381 size_t wct_ofs;
1382 int i;
1384 wct_ofs = HDR_WCT;
1386 for (i=0; i<num_reqs; i++) {
1387 struct smbXcli_req_state *state;
1388 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1389 wct_ofs += smbXcli_iov_len(state->smb1.iov+2,
1390 state->smb1.iov_count-2);
1391 wct_ofs = (wct_ofs + 3) & ~3;
1393 return wct_ofs;
1396 NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
1398 struct smbXcli_req_state *first_state =
1399 tevent_req_data(reqs[0],
1400 struct smbXcli_req_state);
1401 struct smbXcli_req_state *last_state =
1402 tevent_req_data(reqs[num_reqs-1],
1403 struct smbXcli_req_state);
1404 struct smbXcli_req_state *state;
1405 size_t wct_offset;
1406 size_t chain_padding = 0;
1407 int i, iovlen;
1408 struct iovec *iov = NULL;
1409 struct iovec *this_iov;
1410 NTSTATUS status;
1411 size_t nbt_len;
1413 if (num_reqs == 1) {
1414 return smb1cli_req_writev_submit(reqs[0], first_state,
1415 first_state->smb1.iov,
1416 first_state->smb1.iov_count);
1419 iovlen = 0;
1420 for (i=0; i<num_reqs; i++) {
1421 if (!tevent_req_is_in_progress(reqs[i])) {
1422 return NT_STATUS_INTERNAL_ERROR;
1425 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1427 if (state->smb1.iov_count < 4) {
1428 return NT_STATUS_INVALID_PARAMETER_MIX;
1431 if (i == 0) {
1433 * The NBT and SMB header
1435 iovlen += 2;
1436 } else {
1438 * Chain padding
1440 iovlen += 1;
1444 * words and bytes
1446 iovlen += state->smb1.iov_count - 2;
1449 iov = talloc_zero_array(last_state, struct iovec, iovlen);
1450 if (iov == NULL) {
1451 return NT_STATUS_NO_MEMORY;
1454 first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
1455 last_state, reqs, sizeof(*reqs) * num_reqs);
1456 if (first_state->smb1.chained_requests == NULL) {
1457 TALLOC_FREE(iov);
1458 return NT_STATUS_NO_MEMORY;
1461 wct_offset = HDR_WCT;
1462 this_iov = iov;
1464 for (i=0; i<num_reqs; i++) {
1465 size_t next_padding = 0;
1466 uint16_t *vwv;
1468 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1470 if (i < num_reqs-1) {
1471 if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
1472 || CVAL(state->smb1.hdr, HDR_WCT) < 2) {
1473 TALLOC_FREE(iov);
1474 TALLOC_FREE(first_state->smb1.chained_requests);
1475 return NT_STATUS_INVALID_PARAMETER_MIX;
1479 wct_offset += smbXcli_iov_len(state->smb1.iov+2,
1480 state->smb1.iov_count-2) + 1;
1481 if ((wct_offset % 4) != 0) {
1482 next_padding = 4 - (wct_offset % 4);
1484 wct_offset += next_padding;
1485 vwv = state->smb1.vwv;
1487 if (i < num_reqs-1) {
1488 struct smbXcli_req_state *next_state =
1489 tevent_req_data(reqs[i+1],
1490 struct smbXcli_req_state);
1491 SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
1492 SCVAL(vwv+0, 1, 0);
1493 SSVAL(vwv+1, 0, wct_offset);
1494 } else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
1495 /* properly end the chain */
1496 SCVAL(vwv+0, 0, 0xff);
1497 SCVAL(vwv+0, 1, 0xff);
1498 SSVAL(vwv+1, 0, 0);
1501 if (i == 0) {
1503 * The NBT and SMB header
1505 this_iov[0] = state->smb1.iov[0];
1506 this_iov[1] = state->smb1.iov[1];
1507 this_iov += 2;
1508 } else {
1510 * This one is a bit subtle. We have to add
1511 * chain_padding bytes between the requests, and we
1512 * have to also include the wct field of the
1513 * subsequent requests. We use the subsequent header
1514 * for the padding, it contains the wct field in its
1515 * last byte.
1517 this_iov[0].iov_len = chain_padding+1;
1518 this_iov[0].iov_base = (void *)&state->smb1.hdr[
1519 sizeof(state->smb1.hdr) - this_iov[0].iov_len];
1520 memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
1521 this_iov += 1;
1525 * copy the words and bytes
1527 memcpy(this_iov, state->smb1.iov+2,
1528 sizeof(struct iovec) * (state->smb1.iov_count-2));
1529 this_iov += state->smb1.iov_count - 2;
1530 chain_padding = next_padding;
1533 nbt_len = smbXcli_iov_len(&iov[1], iovlen-1);
1534 if (nbt_len > first_state->conn->smb1.max_xmit) {
1535 TALLOC_FREE(iov);
1536 TALLOC_FREE(first_state->smb1.chained_requests);
1537 return NT_STATUS_INVALID_PARAMETER_MIX;
1540 status = smb1cli_req_writev_submit(reqs[0], last_state, iov, iovlen);
1541 if (!NT_STATUS_IS_OK(status)) {
1542 TALLOC_FREE(iov);
1543 TALLOC_FREE(first_state->smb1.chained_requests);
1544 return status;
1547 for (i=0; i < (num_reqs - 1); i++) {
1548 state = tevent_req_data(reqs[i], struct smbXcli_req_state);
1550 state->smb1.seqnum = last_state->smb1.seqnum;
1553 return NT_STATUS_OK;
1556 bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
1558 return ((tevent_queue_length(conn->outgoing) != 0)
1559 || (talloc_array_length(conn->pending) != 0));