cifs: consolidate signature generating code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / transport.c
blob33a3fbf3a3a54e72c536c0b9c72aa0b83286530c
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <asm/uaccess.h>
30 #include <asm/processor.h>
31 #include <linux/mempool.h>
32 #include "cifspdu.h"
33 #include "cifsglob.h"
34 #include "cifsproto.h"
35 #include "cifs_debug.h"
37 extern mempool_t *cifs_mid_poolp;
39 static void
40 wake_up_task(struct mid_q_entry *mid)
42 wake_up_process(mid->callback_data);
45 struct mid_q_entry *
46 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 struct mid_q_entry *temp;
50 if (server == NULL) {
51 cERROR(1, "Null TCP session in AllocMidQEntry");
52 return NULL;
55 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
56 if (temp == NULL)
57 return temp;
58 else {
59 memset(temp, 0, sizeof(struct mid_q_entry));
60 temp->mid = smb_buffer->Mid; /* always LE */
61 temp->pid = current->pid;
62 temp->command = smb_buffer->Command;
63 cFYI(1, "For smb_command %d", temp->command);
64 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
65 /* when mid allocated can be before when sent */
66 temp->when_alloc = jiffies;
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
72 temp->callback = wake_up_task;
73 temp->callback_data = current;
76 atomic_inc(&midCount);
77 temp->midState = MID_REQUEST_ALLOCATED;
78 return temp;
81 void
82 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 #ifdef CONFIG_CIFS_STATS2
85 unsigned long now;
86 #endif
87 midEntry->midState = MID_FREE;
88 atomic_dec(&midCount);
89 if (midEntry->largeBuf)
90 cifs_buf_release(midEntry->resp_buf);
91 else
92 cifs_small_buf_release(midEntry->resp_buf);
93 #ifdef CONFIG_CIFS_STATS2
94 now = jiffies;
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) {
98 if ((cifsFYI & CIFS_TIMER) &&
99 (midEntry->command != SMB_COM_LOCKING_ANDX)) {
100 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
101 midEntry->command, midEntry->mid);
102 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103 now - midEntry->when_alloc,
104 now - midEntry->when_sent,
105 now - midEntry->when_received);
108 #endif
109 mempool_free(midEntry, cifs_mid_poolp);
112 static void
113 delete_mid(struct mid_q_entry *mid)
115 spin_lock(&GlobalMid_Lock);
116 list_del(&mid->qhead);
117 spin_unlock(&GlobalMid_Lock);
119 DeleteMidQEntry(mid);
122 static int
123 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125 int rc = 0;
126 int i = 0;
127 struct msghdr smb_msg;
128 struct smb_hdr *smb_buffer = iov[0].iov_base;
129 unsigned int len = iov[0].iov_len;
130 unsigned int total_len;
131 int first_vec = 0;
132 unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
133 struct socket *ssocket = server->ssocket;
135 if (ssocket == NULL)
136 return -ENOTSOCK; /* BB eventually add reconnect code here */
138 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
139 smb_msg.msg_namelen = sizeof(struct sockaddr);
140 smb_msg.msg_control = NULL;
141 smb_msg.msg_controllen = 0;
142 if (server->noblocksnd)
143 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
144 else
145 smb_msg.msg_flags = MSG_NOSIGNAL;
147 total_len = 0;
148 for (i = 0; i < n_vec; i++)
149 total_len += iov[i].iov_len;
151 cFYI(1, "Sending smb: total_len %d", total_len);
152 dump_smb(smb_buffer, len);
154 i = 0;
155 while (total_len) {
156 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
157 n_vec - first_vec, total_len);
158 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
159 i++;
160 /* if blocking send we try 3 times, since each can block
161 for 5 seconds. For nonblocking we have to try more
162 but wait increasing amounts of time allowing time for
163 socket to clear. The overall time we wait in either
164 case to send on the socket is about 15 seconds.
165 Similarly we wait for 15 seconds for
166 a response from the server in SendReceive[2]
167 for the server to send a response back for
168 most types of requests (except SMB Write
169 past end of file which can be slow, and
170 blocking lock operations). NFS waits slightly longer
171 than CIFS, but this can make it take longer for
172 nonresponsive servers to be detected and 15 seconds
173 is more than enough time for modern networks to
174 send a packet. In most cases if we fail to send
175 after the retries we will kill the socket and
176 reconnect which may clear the network problem.
178 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
179 cERROR(1, "sends on sock %p stuck for 15 seconds",
180 ssocket);
181 rc = -EAGAIN;
182 break;
184 msleep(1 << i);
185 continue;
187 if (rc < 0)
188 break;
190 if (rc == total_len) {
191 total_len = 0;
192 break;
193 } else if (rc > total_len) {
194 cERROR(1, "sent %d requested %d", rc, total_len);
195 break;
197 if (rc == 0) {
198 /* should never happen, letting socket clear before
199 retrying is our only obvious option here */
200 cERROR(1, "tcp sent no data");
201 msleep(500);
202 continue;
204 total_len -= rc;
205 /* the line below resets i */
206 for (i = first_vec; i < n_vec; i++) {
207 if (iov[i].iov_len) {
208 if (rc > iov[i].iov_len) {
209 rc -= iov[i].iov_len;
210 iov[i].iov_len = 0;
211 } else {
212 iov[i].iov_base += rc;
213 iov[i].iov_len -= rc;
214 first_vec = i;
215 break;
219 i = 0; /* in case we get ENOSPC on the next send */
222 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
223 cFYI(1, "partial send (%d remaining), terminating session",
224 total_len);
225 /* If we have only sent part of an SMB then the next SMB
226 could be taken as the remainder of this one. We need
227 to kill the socket so the server throws away the partial
228 SMB */
229 server->tcpStatus = CifsNeedReconnect;
232 if (rc < 0 && rc != -EINTR)
233 cERROR(1, "Error %d sending data on socket to server", rc);
234 else
235 rc = 0;
237 /* Don't want to modify the buffer as a
238 side effect of this call. */
239 smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
241 return rc;
245 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
248 struct kvec iov;
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
253 return smb_sendv(server, &iov, 1);
256 static int wait_for_free_request(struct TCP_Server_Info *server,
257 const int long_op)
259 if (long_op == CIFS_ASYNC_OP) {
260 /* oplock breaks must not be held up */
261 atomic_inc(&server->inFlight);
262 return 0;
265 spin_lock(&GlobalMid_Lock);
266 while (1) {
267 if (atomic_read(&server->inFlight) >= cifs_max_pending) {
268 spin_unlock(&GlobalMid_Lock);
269 cifs_num_waiters_inc(server);
270 wait_event(server->request_q,
271 atomic_read(&server->inFlight)
272 < cifs_max_pending);
273 cifs_num_waiters_dec(server);
274 spin_lock(&GlobalMid_Lock);
275 } else {
276 if (server->tcpStatus == CifsExiting) {
277 spin_unlock(&GlobalMid_Lock);
278 return -ENOENT;
281 /* can not count locking commands against total
282 as they are allowed to block on server */
284 /* update # of requests on the wire to server */
285 if (long_op != CIFS_BLOCKING_OP)
286 atomic_inc(&server->inFlight);
287 spin_unlock(&GlobalMid_Lock);
288 break;
291 return 0;
294 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
295 struct mid_q_entry **ppmidQ)
297 if (ses->server->tcpStatus == CifsExiting) {
298 return -ENOENT;
301 if (ses->server->tcpStatus == CifsNeedReconnect) {
302 cFYI(1, "tcp session dead - return to caller to retry");
303 return -EAGAIN;
306 if (ses->status != CifsGood) {
307 /* check if SMB session is bad because we are setting it up */
308 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
309 (in_buf->Command != SMB_COM_NEGOTIATE))
310 return -EAGAIN;
311 /* else ok - we are setting up session */
313 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
314 if (*ppmidQ == NULL)
315 return -ENOMEM;
316 spin_lock(&GlobalMid_Lock);
317 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
318 spin_unlock(&GlobalMid_Lock);
319 return 0;
322 static int
323 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
325 int error;
327 error = wait_event_killable(server->response_q,
328 midQ->midState != MID_REQUEST_SUBMITTED);
329 if (error < 0)
330 return -ERESTARTSYS;
332 return 0;
337 * Send a SMB request and set the callback function in the mid to handle
338 * the result. Caller is responsible for dealing with timeouts.
341 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
342 unsigned int nvec, mid_callback_t *callback, void *cbdata,
343 bool ignore_pend)
345 int rc;
346 struct mid_q_entry *mid;
347 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
349 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
350 if (rc)
351 return rc;
353 /* enable signing if server requires it */
354 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
355 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
357 mutex_lock(&server->srv_mutex);
358 mid = AllocMidQEntry(hdr, server);
359 if (mid == NULL) {
360 mutex_unlock(&server->srv_mutex);
361 atomic_dec(&server->inFlight);
362 wake_up(&server->request_q);
363 return -ENOMEM;
366 /* put it on the pending_mid_q */
367 spin_lock(&GlobalMid_Lock);
368 list_add_tail(&mid->qhead, &server->pending_mid_q);
369 spin_unlock(&GlobalMid_Lock);
371 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
372 if (rc) {
373 mutex_unlock(&server->srv_mutex);
374 goto out_err;
377 mid->callback = callback;
378 mid->callback_data = cbdata;
379 mid->midState = MID_REQUEST_SUBMITTED;
381 cifs_in_send_inc(server);
382 rc = smb_sendv(server, iov, nvec);
383 cifs_in_send_dec(server);
384 cifs_save_when_sent(mid);
385 mutex_unlock(&server->srv_mutex);
387 if (rc)
388 goto out_err;
390 return rc;
391 out_err:
392 delete_mid(mid);
393 atomic_dec(&server->inFlight);
394 wake_up(&server->request_q);
395 return rc;
400 * Send an SMB Request. No response info (other than return code)
401 * needs to be parsed.
403 * flags indicate the type of request buffer and how long to wait
404 * and whether to log NT STATUS code (error) before mapping it to POSIX error
408 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
409 struct smb_hdr *in_buf, int flags)
411 int rc;
412 struct kvec iov[1];
413 int resp_buf_type;
415 iov[0].iov_base = (char *)in_buf;
416 iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
417 flags |= CIFS_NO_RESP;
418 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
419 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
421 return rc;
424 static int
425 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
427 int rc = 0;
429 cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
430 mid->mid, mid->midState);
432 spin_lock(&GlobalMid_Lock);
433 switch (mid->midState) {
434 case MID_RESPONSE_RECEIVED:
435 spin_unlock(&GlobalMid_Lock);
436 return rc;
437 case MID_RETRY_NEEDED:
438 rc = -EAGAIN;
439 break;
440 case MID_RESPONSE_MALFORMED:
441 rc = -EIO;
442 break;
443 case MID_SHUTDOWN:
444 rc = -EHOSTDOWN;
445 break;
446 default:
447 list_del_init(&mid->qhead);
448 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
449 mid->mid, mid->midState);
450 rc = -EIO;
452 spin_unlock(&GlobalMid_Lock);
454 DeleteMidQEntry(mid);
455 return rc;
459 * An NT cancel request header looks just like the original request except:
461 * The Command is SMB_COM_NT_CANCEL
462 * The WordCount is zeroed out
463 * The ByteCount is zeroed out
465 * This function mangles an existing request buffer into a
466 * SMB_COM_NT_CANCEL request and then sends it.
468 static int
469 send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
470 struct mid_q_entry *mid)
472 int rc = 0;
474 /* -4 for RFC1001 length and +2 for BCC field */
475 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
476 in_buf->Command = SMB_COM_NT_CANCEL;
477 in_buf->WordCount = 0;
478 put_bcc(0, in_buf);
480 mutex_lock(&server->srv_mutex);
481 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
482 if (rc) {
483 mutex_unlock(&server->srv_mutex);
484 return rc;
486 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
487 mutex_unlock(&server->srv_mutex);
489 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
490 in_buf->Mid, rc);
492 return rc;
496 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
497 bool log_error)
499 unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
501 dump_smb(mid->resp_buf, min_t(u32, 92, len));
503 /* convert the length into a more usable form */
504 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
505 struct kvec iov;
507 iov.iov_base = mid->resp_buf;
508 iov.iov_len = len;
509 /* FIXME: add code to kill session */
510 if (cifs_verify_signature(&iov, 1, server,
511 mid->sequence_number + 1) != 0)
512 cERROR(1, "Unexpected SMB signature");
515 /* BB special case reconnect tid and uid here? */
516 return map_smb_to_linux_error(mid->resp_buf, log_error);
520 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
521 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
522 const int flags)
524 int rc = 0;
525 int long_op;
526 struct mid_q_entry *midQ;
527 struct smb_hdr *in_buf = iov[0].iov_base;
529 long_op = flags & CIFS_TIMEOUT_MASK;
531 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
533 if ((ses == NULL) || (ses->server == NULL)) {
534 cifs_small_buf_release(in_buf);
535 cERROR(1, "Null session");
536 return -EIO;
539 if (ses->server->tcpStatus == CifsExiting) {
540 cifs_small_buf_release(in_buf);
541 return -ENOENT;
544 /* Ensure that we do not send more than 50 overlapping requests
545 to the same server. We may make this configurable later or
546 use ses->maxReq */
548 rc = wait_for_free_request(ses->server, long_op);
549 if (rc) {
550 cifs_small_buf_release(in_buf);
551 return rc;
554 /* make sure that we sign in the same order that we send on this socket
555 and avoid races inside tcp sendmsg code that could cause corruption
556 of smb data */
558 mutex_lock(&ses->server->srv_mutex);
560 rc = allocate_mid(ses, in_buf, &midQ);
561 if (rc) {
562 mutex_unlock(&ses->server->srv_mutex);
563 cifs_small_buf_release(in_buf);
564 /* Update # of requests on wire to server */
565 atomic_dec(&ses->server->inFlight);
566 wake_up(&ses->server->request_q);
567 return rc;
569 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
570 if (rc) {
571 mutex_unlock(&ses->server->srv_mutex);
572 cifs_small_buf_release(in_buf);
573 goto out;
576 midQ->midState = MID_REQUEST_SUBMITTED;
577 cifs_in_send_inc(ses->server);
578 rc = smb_sendv(ses->server, iov, n_vec);
579 cifs_in_send_dec(ses->server);
580 cifs_save_when_sent(midQ);
582 mutex_unlock(&ses->server->srv_mutex);
584 if (rc < 0) {
585 cifs_small_buf_release(in_buf);
586 goto out;
589 if (long_op == CIFS_ASYNC_OP) {
590 cifs_small_buf_release(in_buf);
591 goto out;
594 rc = wait_for_response(ses->server, midQ);
595 if (rc != 0) {
596 send_nt_cancel(ses->server, in_buf, midQ);
597 spin_lock(&GlobalMid_Lock);
598 if (midQ->midState == MID_REQUEST_SUBMITTED) {
599 midQ->callback = DeleteMidQEntry;
600 spin_unlock(&GlobalMid_Lock);
601 cifs_small_buf_release(in_buf);
602 atomic_dec(&ses->server->inFlight);
603 wake_up(&ses->server->request_q);
604 return rc;
606 spin_unlock(&GlobalMid_Lock);
609 cifs_small_buf_release(in_buf);
611 rc = cifs_sync_mid_result(midQ, ses->server);
612 if (rc != 0) {
613 atomic_dec(&ses->server->inFlight);
614 wake_up(&ses->server->request_q);
615 return rc;
618 if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
619 rc = -EIO;
620 cFYI(1, "Bad MID state?");
621 goto out;
624 iov[0].iov_base = (char *)midQ->resp_buf;
625 iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
626 if (midQ->largeBuf)
627 *pRespBufType = CIFS_LARGE_BUFFER;
628 else
629 *pRespBufType = CIFS_SMALL_BUFFER;
631 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
633 /* mark it so buf will not be freed by delete_mid */
634 if ((flags & CIFS_NO_RESP) == 0)
635 midQ->resp_buf = NULL;
636 out:
637 delete_mid(midQ);
638 atomic_dec(&ses->server->inFlight);
639 wake_up(&ses->server->request_q);
641 return rc;
645 SendReceive(const unsigned int xid, struct cifs_ses *ses,
646 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
647 int *pbytes_returned, const int long_op)
649 int rc = 0;
650 struct mid_q_entry *midQ;
652 if (ses == NULL) {
653 cERROR(1, "Null smb session");
654 return -EIO;
656 if (ses->server == NULL) {
657 cERROR(1, "Null tcp session");
658 return -EIO;
661 if (ses->server->tcpStatus == CifsExiting)
662 return -ENOENT;
664 /* Ensure that we do not send more than 50 overlapping requests
665 to the same server. We may make this configurable later or
666 use ses->maxReq */
668 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
669 MAX_CIFS_HDR_SIZE - 4) {
670 cERROR(1, "Illegal length, greater than maximum frame, %d",
671 be32_to_cpu(in_buf->smb_buf_length));
672 return -EIO;
675 rc = wait_for_free_request(ses->server, long_op);
676 if (rc)
677 return rc;
679 /* make sure that we sign in the same order that we send on this socket
680 and avoid races inside tcp sendmsg code that could cause corruption
681 of smb data */
683 mutex_lock(&ses->server->srv_mutex);
685 rc = allocate_mid(ses, in_buf, &midQ);
686 if (rc) {
687 mutex_unlock(&ses->server->srv_mutex);
688 /* Update # of requests on wire to server */
689 atomic_dec(&ses->server->inFlight);
690 wake_up(&ses->server->request_q);
691 return rc;
694 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
695 if (rc) {
696 mutex_unlock(&ses->server->srv_mutex);
697 goto out;
700 midQ->midState = MID_REQUEST_SUBMITTED;
702 cifs_in_send_inc(ses->server);
703 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
704 cifs_in_send_dec(ses->server);
705 cifs_save_when_sent(midQ);
706 mutex_unlock(&ses->server->srv_mutex);
708 if (rc < 0)
709 goto out;
711 if (long_op == CIFS_ASYNC_OP)
712 goto out;
714 rc = wait_for_response(ses->server, midQ);
715 if (rc != 0) {
716 send_nt_cancel(ses->server, in_buf, midQ);
717 spin_lock(&GlobalMid_Lock);
718 if (midQ->midState == MID_REQUEST_SUBMITTED) {
719 /* no longer considered to be "in-flight" */
720 midQ->callback = DeleteMidQEntry;
721 spin_unlock(&GlobalMid_Lock);
722 atomic_dec(&ses->server->inFlight);
723 wake_up(&ses->server->request_q);
724 return rc;
726 spin_unlock(&GlobalMid_Lock);
729 rc = cifs_sync_mid_result(midQ, ses->server);
730 if (rc != 0) {
731 atomic_dec(&ses->server->inFlight);
732 wake_up(&ses->server->request_q);
733 return rc;
736 if (!midQ->resp_buf || !out_buf ||
737 midQ->midState != MID_RESPONSE_RECEIVED) {
738 rc = -EIO;
739 cERROR(1, "Bad MID state?");
740 goto out;
743 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
744 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
745 rc = cifs_check_receive(midQ, ses->server, 0);
746 out:
747 delete_mid(midQ);
748 atomic_dec(&ses->server->inFlight);
749 wake_up(&ses->server->request_q);
751 return rc;
754 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
755 blocking lock to return. */
757 static int
758 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
759 struct smb_hdr *in_buf,
760 struct smb_hdr *out_buf)
762 int bytes_returned;
763 struct cifs_ses *ses = tcon->ses;
764 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
766 /* We just modify the current in_buf to change
767 the type of lock from LOCKING_ANDX_SHARED_LOCK
768 or LOCKING_ANDX_EXCLUSIVE_LOCK to
769 LOCKING_ANDX_CANCEL_LOCK. */
771 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
772 pSMB->Timeout = 0;
773 pSMB->hdr.Mid = GetNextMid(ses->server);
775 return SendReceive(xid, ses, in_buf, out_buf,
776 &bytes_returned, 0);
780 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
781 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
782 int *pbytes_returned)
784 int rc = 0;
785 int rstart = 0;
786 struct mid_q_entry *midQ;
787 struct cifs_ses *ses;
789 if (tcon == NULL || tcon->ses == NULL) {
790 cERROR(1, "Null smb session");
791 return -EIO;
793 ses = tcon->ses;
795 if (ses->server == NULL) {
796 cERROR(1, "Null tcp session");
797 return -EIO;
800 if (ses->server->tcpStatus == CifsExiting)
801 return -ENOENT;
803 /* Ensure that we do not send more than 50 overlapping requests
804 to the same server. We may make this configurable later or
805 use ses->maxReq */
807 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
808 MAX_CIFS_HDR_SIZE - 4) {
809 cERROR(1, "Illegal length, greater than maximum frame, %d",
810 be32_to_cpu(in_buf->smb_buf_length));
811 return -EIO;
814 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
815 if (rc)
816 return rc;
818 /* make sure that we sign in the same order that we send on this socket
819 and avoid races inside tcp sendmsg code that could cause corruption
820 of smb data */
822 mutex_lock(&ses->server->srv_mutex);
824 rc = allocate_mid(ses, in_buf, &midQ);
825 if (rc) {
826 mutex_unlock(&ses->server->srv_mutex);
827 return rc;
830 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
831 if (rc) {
832 delete_mid(midQ);
833 mutex_unlock(&ses->server->srv_mutex);
834 return rc;
837 midQ->midState = MID_REQUEST_SUBMITTED;
838 cifs_in_send_inc(ses->server);
839 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
840 cifs_in_send_dec(ses->server);
841 cifs_save_when_sent(midQ);
842 mutex_unlock(&ses->server->srv_mutex);
844 if (rc < 0) {
845 delete_mid(midQ);
846 return rc;
849 /* Wait for a reply - allow signals to interrupt. */
850 rc = wait_event_interruptible(ses->server->response_q,
851 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
852 ((ses->server->tcpStatus != CifsGood) &&
853 (ses->server->tcpStatus != CifsNew)));
855 /* Were we interrupted by a signal ? */
856 if ((rc == -ERESTARTSYS) &&
857 (midQ->midState == MID_REQUEST_SUBMITTED) &&
858 ((ses->server->tcpStatus == CifsGood) ||
859 (ses->server->tcpStatus == CifsNew))) {
861 if (in_buf->Command == SMB_COM_TRANSACTION2) {
862 /* POSIX lock. We send a NT_CANCEL SMB to cause the
863 blocking lock to return. */
864 rc = send_nt_cancel(ses->server, in_buf, midQ);
865 if (rc) {
866 delete_mid(midQ);
867 return rc;
869 } else {
870 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
871 to cause the blocking lock to return. */
873 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
875 /* If we get -ENOLCK back the lock may have
876 already been removed. Don't exit in this case. */
877 if (rc && rc != -ENOLCK) {
878 delete_mid(midQ);
879 return rc;
883 rc = wait_for_response(ses->server, midQ);
884 if (rc) {
885 send_nt_cancel(ses->server, in_buf, midQ);
886 spin_lock(&GlobalMid_Lock);
887 if (midQ->midState == MID_REQUEST_SUBMITTED) {
888 /* no longer considered to be "in-flight" */
889 midQ->callback = DeleteMidQEntry;
890 spin_unlock(&GlobalMid_Lock);
891 return rc;
893 spin_unlock(&GlobalMid_Lock);
896 /* We got the response - restart system call. */
897 rstart = 1;
900 rc = cifs_sync_mid_result(midQ, ses->server);
901 if (rc != 0)
902 return rc;
904 /* rcvd frame is ok */
905 if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
906 rc = -EIO;
907 cERROR(1, "Bad MID state?");
908 goto out;
911 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
912 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
913 rc = cifs_check_receive(midQ, ses->server, 0);
914 out:
915 delete_mid(midQ);
916 if (rstart && rc == -EACCES)
917 return -ERESTARTSYS;
918 return rc;