hwmon: (coretemp) Fix for non-SMP builds
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / transport.c
blob10ca6b2c26b7ff939e286b08533420b7cca1a539
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <asm/uaccess.h>
30 #include <asm/processor.h>
31 #include <linux/mempool.h>
32 #include "cifspdu.h"
33 #include "cifsglob.h"
34 #include "cifsproto.h"
35 #include "cifs_debug.h"
37 extern mempool_t *cifs_mid_poolp;
39 static void
40 wake_up_task(struct mid_q_entry *mid)
42 wake_up_process(mid->callback_data);
45 struct mid_q_entry *
46 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 struct mid_q_entry *temp;
50 if (server == NULL) {
51 cERROR(1, "Null TCP session in AllocMidQEntry");
52 return NULL;
55 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
56 if (temp == NULL)
57 return temp;
58 else {
59 memset(temp, 0, sizeof(struct mid_q_entry));
60 temp->mid = smb_buffer->Mid; /* always LE */
61 temp->pid = current->pid;
62 temp->command = smb_buffer->Command;
63 cFYI(1, "For smb_command %d", temp->command);
64 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
65 /* when mid allocated can be before when sent */
66 temp->when_alloc = jiffies;
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
72 temp->callback = wake_up_task;
73 temp->callback_data = current;
76 atomic_inc(&midCount);
77 temp->midState = MID_REQUEST_ALLOCATED;
78 return temp;
81 void
82 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 #ifdef CONFIG_CIFS_STATS2
85 unsigned long now;
86 #endif
87 midEntry->midState = MID_FREE;
88 atomic_dec(&midCount);
89 if (midEntry->largeBuf)
90 cifs_buf_release(midEntry->resp_buf);
91 else
92 cifs_small_buf_release(midEntry->resp_buf);
93 #ifdef CONFIG_CIFS_STATS2
94 now = jiffies;
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) {
98 if ((cifsFYI & CIFS_TIMER) &&
99 (midEntry->command != SMB_COM_LOCKING_ANDX)) {
100 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
101 midEntry->command, midEntry->mid);
102 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103 now - midEntry->when_alloc,
104 now - midEntry->when_sent,
105 now - midEntry->when_received);
108 #endif
109 mempool_free(midEntry, cifs_mid_poolp);
112 static void
113 delete_mid(struct mid_q_entry *mid)
115 spin_lock(&GlobalMid_Lock);
116 list_del(&mid->qhead);
117 spin_unlock(&GlobalMid_Lock);
119 DeleteMidQEntry(mid);
122 static int
123 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125 int rc = 0;
126 int i = 0;
127 struct msghdr smb_msg;
128 struct smb_hdr *smb_buffer = iov[0].iov_base;
129 unsigned int len = iov[0].iov_len;
130 unsigned int total_len;
131 int first_vec = 0;
132 unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
133 struct socket *ssocket = server->ssocket;
135 if (ssocket == NULL)
136 return -ENOTSOCK; /* BB eventually add reconnect code here */
138 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
139 smb_msg.msg_namelen = sizeof(struct sockaddr);
140 smb_msg.msg_control = NULL;
141 smb_msg.msg_controllen = 0;
142 if (server->noblocksnd)
143 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
144 else
145 smb_msg.msg_flags = MSG_NOSIGNAL;
147 total_len = 0;
148 for (i = 0; i < n_vec; i++)
149 total_len += iov[i].iov_len;
151 cFYI(1, "Sending smb: total_len %d", total_len);
152 dump_smb(smb_buffer, len);
154 i = 0;
155 while (total_len) {
156 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
157 n_vec - first_vec, total_len);
158 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
159 i++;
160 /* if blocking send we try 3 times, since each can block
161 for 5 seconds. For nonblocking we have to try more
162 but wait increasing amounts of time allowing time for
163 socket to clear. The overall time we wait in either
164 case to send on the socket is about 15 seconds.
165 Similarly we wait for 15 seconds for
166 a response from the server in SendReceive[2]
167 for the server to send a response back for
168 most types of requests (except SMB Write
169 past end of file which can be slow, and
170 blocking lock operations). NFS waits slightly longer
171 than CIFS, but this can make it take longer for
172 nonresponsive servers to be detected and 15 seconds
173 is more than enough time for modern networks to
174 send a packet. In most cases if we fail to send
175 after the retries we will kill the socket and
176 reconnect which may clear the network problem.
178 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
179 cERROR(1, "sends on sock %p stuck for 15 seconds",
180 ssocket);
181 rc = -EAGAIN;
182 break;
184 msleep(1 << i);
185 continue;
187 if (rc < 0)
188 break;
190 if (rc == total_len) {
191 total_len = 0;
192 break;
193 } else if (rc > total_len) {
194 cERROR(1, "sent %d requested %d", rc, total_len);
195 break;
197 if (rc == 0) {
198 /* should never happen, letting socket clear before
199 retrying is our only obvious option here */
200 cERROR(1, "tcp sent no data");
201 msleep(500);
202 continue;
204 total_len -= rc;
205 /* the line below resets i */
206 for (i = first_vec; i < n_vec; i++) {
207 if (iov[i].iov_len) {
208 if (rc > iov[i].iov_len) {
209 rc -= iov[i].iov_len;
210 iov[i].iov_len = 0;
211 } else {
212 iov[i].iov_base += rc;
213 iov[i].iov_len -= rc;
214 first_vec = i;
215 break;
219 i = 0; /* in case we get ENOSPC on the next send */
222 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
223 cFYI(1, "partial send (%d remaining), terminating session",
224 total_len);
225 /* If we have only sent part of an SMB then the next SMB
226 could be taken as the remainder of this one. We need
227 to kill the socket so the server throws away the partial
228 SMB */
229 server->tcpStatus = CifsNeedReconnect;
232 if (rc < 0 && rc != -EINTR)
233 cERROR(1, "Error %d sending data on socket to server", rc);
234 else
235 rc = 0;
237 /* Don't want to modify the buffer as a
238 side effect of this call. */
239 smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
241 return rc;
245 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
248 struct kvec iov;
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
253 return smb_sendv(server, &iov, 1);
256 static int wait_for_free_request(struct TCP_Server_Info *server,
257 const int long_op)
259 if (long_op == CIFS_ASYNC_OP) {
260 /* oplock breaks must not be held up */
261 atomic_inc(&server->inFlight);
262 return 0;
265 spin_lock(&GlobalMid_Lock);
266 while (1) {
267 if (atomic_read(&server->inFlight) >= cifs_max_pending) {
268 spin_unlock(&GlobalMid_Lock);
269 cifs_num_waiters_inc(server);
270 wait_event(server->request_q,
271 atomic_read(&server->inFlight)
272 < cifs_max_pending);
273 cifs_num_waiters_dec(server);
274 spin_lock(&GlobalMid_Lock);
275 } else {
276 if (server->tcpStatus == CifsExiting) {
277 spin_unlock(&GlobalMid_Lock);
278 return -ENOENT;
281 /* can not count locking commands against total
282 as they are allowed to block on server */
284 /* update # of requests on the wire to server */
285 if (long_op != CIFS_BLOCKING_OP)
286 atomic_inc(&server->inFlight);
287 spin_unlock(&GlobalMid_Lock);
288 break;
291 return 0;
294 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
295 struct mid_q_entry **ppmidQ)
297 if (ses->server->tcpStatus == CifsExiting) {
298 return -ENOENT;
301 if (ses->server->tcpStatus == CifsNeedReconnect) {
302 cFYI(1, "tcp session dead - return to caller to retry");
303 return -EAGAIN;
306 if (ses->status != CifsGood) {
307 /* check if SMB session is bad because we are setting it up */
308 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
309 (in_buf->Command != SMB_COM_NEGOTIATE))
310 return -EAGAIN;
311 /* else ok - we are setting up session */
313 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
314 if (*ppmidQ == NULL)
315 return -ENOMEM;
316 spin_lock(&GlobalMid_Lock);
317 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
318 spin_unlock(&GlobalMid_Lock);
319 return 0;
322 static int
323 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
325 int error;
327 error = wait_event_killable(server->response_q,
328 midQ->midState != MID_REQUEST_SUBMITTED);
329 if (error < 0)
330 return -ERESTARTSYS;
332 return 0;
337 * Send a SMB request and set the callback function in the mid to handle
338 * the result. Caller is responsible for dealing with timeouts.
341 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
342 unsigned int nvec, mid_callback_t *callback, void *cbdata,
343 bool ignore_pend)
345 int rc;
346 struct mid_q_entry *mid;
347 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
349 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
350 if (rc)
351 return rc;
353 /* enable signing if server requires it */
354 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
355 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
357 mutex_lock(&server->srv_mutex);
358 mid = AllocMidQEntry(hdr, server);
359 if (mid == NULL) {
360 mutex_unlock(&server->srv_mutex);
361 atomic_dec(&server->inFlight);
362 wake_up(&server->request_q);
363 return -ENOMEM;
366 /* put it on the pending_mid_q */
367 spin_lock(&GlobalMid_Lock);
368 list_add_tail(&mid->qhead, &server->pending_mid_q);
369 spin_unlock(&GlobalMid_Lock);
371 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
372 if (rc) {
373 mutex_unlock(&server->srv_mutex);
374 goto out_err;
377 mid->callback = callback;
378 mid->callback_data = cbdata;
379 mid->midState = MID_REQUEST_SUBMITTED;
381 cifs_in_send_inc(server);
382 rc = smb_sendv(server, iov, nvec);
383 cifs_in_send_dec(server);
384 cifs_save_when_sent(mid);
385 mutex_unlock(&server->srv_mutex);
387 if (rc)
388 goto out_err;
390 return rc;
391 out_err:
392 delete_mid(mid);
393 atomic_dec(&server->inFlight);
394 wake_up(&server->request_q);
395 return rc;
400 * Send an SMB Request. No response info (other than return code)
401 * needs to be parsed.
403 * flags indicate the type of request buffer and how long to wait
404 * and whether to log NT STATUS code (error) before mapping it to POSIX error
408 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
409 struct smb_hdr *in_buf, int flags)
411 int rc;
412 struct kvec iov[1];
413 int resp_buf_type;
415 iov[0].iov_base = (char *)in_buf;
416 iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
417 flags |= CIFS_NO_RESP;
418 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
419 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
421 return rc;
424 static int
425 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
427 int rc = 0;
429 cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
430 mid->mid, mid->midState);
432 spin_lock(&GlobalMid_Lock);
433 switch (mid->midState) {
434 case MID_RESPONSE_RECEIVED:
435 spin_unlock(&GlobalMid_Lock);
436 return rc;
437 case MID_RETRY_NEEDED:
438 rc = -EAGAIN;
439 break;
440 case MID_RESPONSE_MALFORMED:
441 rc = -EIO;
442 break;
443 case MID_SHUTDOWN:
444 rc = -EHOSTDOWN;
445 break;
446 default:
447 list_del_init(&mid->qhead);
448 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
449 mid->mid, mid->midState);
450 rc = -EIO;
452 spin_unlock(&GlobalMid_Lock);
454 DeleteMidQEntry(mid);
455 return rc;
459 * An NT cancel request header looks just like the original request except:
461 * The Command is SMB_COM_NT_CANCEL
462 * The WordCount is zeroed out
463 * The ByteCount is zeroed out
465 * This function mangles an existing request buffer into a
466 * SMB_COM_NT_CANCEL request and then sends it.
468 static int
469 send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
470 struct mid_q_entry *mid)
472 int rc = 0;
474 /* -4 for RFC1001 length and +2 for BCC field */
475 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
476 in_buf->Command = SMB_COM_NT_CANCEL;
477 in_buf->WordCount = 0;
478 put_bcc(0, in_buf);
480 mutex_lock(&server->srv_mutex);
481 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
482 if (rc) {
483 mutex_unlock(&server->srv_mutex);
484 return rc;
486 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
487 mutex_unlock(&server->srv_mutex);
489 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
490 in_buf->Mid, rc);
492 return rc;
496 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
497 bool log_error)
499 dump_smb(mid->resp_buf,
500 min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
502 /* convert the length into a more usable form */
503 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
504 /* FIXME: add code to kill session */
505 if (cifs_verify_signature(mid->resp_buf, server,
506 mid->sequence_number + 1) != 0)
507 cERROR(1, "Unexpected SMB signature");
510 /* BB special case reconnect tid and uid here? */
511 return map_smb_to_linux_error(mid->resp_buf, log_error);
515 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
516 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
517 const int flags)
519 int rc = 0;
520 int long_op;
521 struct mid_q_entry *midQ;
522 struct smb_hdr *in_buf = iov[0].iov_base;
524 long_op = flags & CIFS_TIMEOUT_MASK;
526 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
528 if ((ses == NULL) || (ses->server == NULL)) {
529 cifs_small_buf_release(in_buf);
530 cERROR(1, "Null session");
531 return -EIO;
534 if (ses->server->tcpStatus == CifsExiting) {
535 cifs_small_buf_release(in_buf);
536 return -ENOENT;
539 /* Ensure that we do not send more than 50 overlapping requests
540 to the same server. We may make this configurable later or
541 use ses->maxReq */
543 rc = wait_for_free_request(ses->server, long_op);
544 if (rc) {
545 cifs_small_buf_release(in_buf);
546 return rc;
549 /* make sure that we sign in the same order that we send on this socket
550 and avoid races inside tcp sendmsg code that could cause corruption
551 of smb data */
553 mutex_lock(&ses->server->srv_mutex);
555 rc = allocate_mid(ses, in_buf, &midQ);
556 if (rc) {
557 mutex_unlock(&ses->server->srv_mutex);
558 cifs_small_buf_release(in_buf);
559 /* Update # of requests on wire to server */
560 atomic_dec(&ses->server->inFlight);
561 wake_up(&ses->server->request_q);
562 return rc;
564 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
565 if (rc) {
566 mutex_unlock(&ses->server->srv_mutex);
567 cifs_small_buf_release(in_buf);
568 goto out;
571 midQ->midState = MID_REQUEST_SUBMITTED;
572 cifs_in_send_inc(ses->server);
573 rc = smb_sendv(ses->server, iov, n_vec);
574 cifs_in_send_dec(ses->server);
575 cifs_save_when_sent(midQ);
577 mutex_unlock(&ses->server->srv_mutex);
579 if (rc < 0) {
580 cifs_small_buf_release(in_buf);
581 goto out;
584 if (long_op == CIFS_ASYNC_OP) {
585 cifs_small_buf_release(in_buf);
586 goto out;
589 rc = wait_for_response(ses->server, midQ);
590 if (rc != 0) {
591 send_nt_cancel(ses->server, in_buf, midQ);
592 spin_lock(&GlobalMid_Lock);
593 if (midQ->midState == MID_REQUEST_SUBMITTED) {
594 midQ->callback = DeleteMidQEntry;
595 spin_unlock(&GlobalMid_Lock);
596 cifs_small_buf_release(in_buf);
597 atomic_dec(&ses->server->inFlight);
598 wake_up(&ses->server->request_q);
599 return rc;
601 spin_unlock(&GlobalMid_Lock);
604 cifs_small_buf_release(in_buf);
606 rc = cifs_sync_mid_result(midQ, ses->server);
607 if (rc != 0) {
608 atomic_dec(&ses->server->inFlight);
609 wake_up(&ses->server->request_q);
610 return rc;
613 if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
614 rc = -EIO;
615 cFYI(1, "Bad MID state?");
616 goto out;
619 iov[0].iov_base = (char *)midQ->resp_buf;
620 iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
621 if (midQ->largeBuf)
622 *pRespBufType = CIFS_LARGE_BUFFER;
623 else
624 *pRespBufType = CIFS_SMALL_BUFFER;
626 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
628 /* mark it so buf will not be freed by delete_mid */
629 if ((flags & CIFS_NO_RESP) == 0)
630 midQ->resp_buf = NULL;
631 out:
632 delete_mid(midQ);
633 atomic_dec(&ses->server->inFlight);
634 wake_up(&ses->server->request_q);
636 return rc;
640 SendReceive(const unsigned int xid, struct cifs_ses *ses,
641 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
642 int *pbytes_returned, const int long_op)
644 int rc = 0;
645 struct mid_q_entry *midQ;
647 if (ses == NULL) {
648 cERROR(1, "Null smb session");
649 return -EIO;
651 if (ses->server == NULL) {
652 cERROR(1, "Null tcp session");
653 return -EIO;
656 if (ses->server->tcpStatus == CifsExiting)
657 return -ENOENT;
659 /* Ensure that we do not send more than 50 overlapping requests
660 to the same server. We may make this configurable later or
661 use ses->maxReq */
663 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
664 MAX_CIFS_HDR_SIZE - 4) {
665 cERROR(1, "Illegal length, greater than maximum frame, %d",
666 be32_to_cpu(in_buf->smb_buf_length));
667 return -EIO;
670 rc = wait_for_free_request(ses->server, long_op);
671 if (rc)
672 return rc;
674 /* make sure that we sign in the same order that we send on this socket
675 and avoid races inside tcp sendmsg code that could cause corruption
676 of smb data */
678 mutex_lock(&ses->server->srv_mutex);
680 rc = allocate_mid(ses, in_buf, &midQ);
681 if (rc) {
682 mutex_unlock(&ses->server->srv_mutex);
683 /* Update # of requests on wire to server */
684 atomic_dec(&ses->server->inFlight);
685 wake_up(&ses->server->request_q);
686 return rc;
689 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
690 if (rc) {
691 mutex_unlock(&ses->server->srv_mutex);
692 goto out;
695 midQ->midState = MID_REQUEST_SUBMITTED;
697 cifs_in_send_inc(ses->server);
698 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
699 cifs_in_send_dec(ses->server);
700 cifs_save_when_sent(midQ);
701 mutex_unlock(&ses->server->srv_mutex);
703 if (rc < 0)
704 goto out;
706 if (long_op == CIFS_ASYNC_OP)
707 goto out;
709 rc = wait_for_response(ses->server, midQ);
710 if (rc != 0) {
711 send_nt_cancel(ses->server, in_buf, midQ);
712 spin_lock(&GlobalMid_Lock);
713 if (midQ->midState == MID_REQUEST_SUBMITTED) {
714 /* no longer considered to be "in-flight" */
715 midQ->callback = DeleteMidQEntry;
716 spin_unlock(&GlobalMid_Lock);
717 atomic_dec(&ses->server->inFlight);
718 wake_up(&ses->server->request_q);
719 return rc;
721 spin_unlock(&GlobalMid_Lock);
724 rc = cifs_sync_mid_result(midQ, ses->server);
725 if (rc != 0) {
726 atomic_dec(&ses->server->inFlight);
727 wake_up(&ses->server->request_q);
728 return rc;
731 if (!midQ->resp_buf || !out_buf ||
732 midQ->midState != MID_RESPONSE_RECEIVED) {
733 rc = -EIO;
734 cERROR(1, "Bad MID state?");
735 goto out;
738 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
739 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
740 rc = cifs_check_receive(midQ, ses->server, 0);
741 out:
742 delete_mid(midQ);
743 atomic_dec(&ses->server->inFlight);
744 wake_up(&ses->server->request_q);
746 return rc;
749 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
750 blocking lock to return. */
752 static int
753 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
754 struct smb_hdr *in_buf,
755 struct smb_hdr *out_buf)
757 int bytes_returned;
758 struct cifs_ses *ses = tcon->ses;
759 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
761 /* We just modify the current in_buf to change
762 the type of lock from LOCKING_ANDX_SHARED_LOCK
763 or LOCKING_ANDX_EXCLUSIVE_LOCK to
764 LOCKING_ANDX_CANCEL_LOCK. */
766 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
767 pSMB->Timeout = 0;
768 pSMB->hdr.Mid = GetNextMid(ses->server);
770 return SendReceive(xid, ses, in_buf, out_buf,
771 &bytes_returned, 0);
775 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
776 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
777 int *pbytes_returned)
779 int rc = 0;
780 int rstart = 0;
781 struct mid_q_entry *midQ;
782 struct cifs_ses *ses;
784 if (tcon == NULL || tcon->ses == NULL) {
785 cERROR(1, "Null smb session");
786 return -EIO;
788 ses = tcon->ses;
790 if (ses->server == NULL) {
791 cERROR(1, "Null tcp session");
792 return -EIO;
795 if (ses->server->tcpStatus == CifsExiting)
796 return -ENOENT;
798 /* Ensure that we do not send more than 50 overlapping requests
799 to the same server. We may make this configurable later or
800 use ses->maxReq */
802 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
803 MAX_CIFS_HDR_SIZE - 4) {
804 cERROR(1, "Illegal length, greater than maximum frame, %d",
805 be32_to_cpu(in_buf->smb_buf_length));
806 return -EIO;
809 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
810 if (rc)
811 return rc;
813 /* make sure that we sign in the same order that we send on this socket
814 and avoid races inside tcp sendmsg code that could cause corruption
815 of smb data */
817 mutex_lock(&ses->server->srv_mutex);
819 rc = allocate_mid(ses, in_buf, &midQ);
820 if (rc) {
821 mutex_unlock(&ses->server->srv_mutex);
822 return rc;
825 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
826 if (rc) {
827 delete_mid(midQ);
828 mutex_unlock(&ses->server->srv_mutex);
829 return rc;
832 midQ->midState = MID_REQUEST_SUBMITTED;
833 cifs_in_send_inc(ses->server);
834 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
835 cifs_in_send_dec(ses->server);
836 cifs_save_when_sent(midQ);
837 mutex_unlock(&ses->server->srv_mutex);
839 if (rc < 0) {
840 delete_mid(midQ);
841 return rc;
844 /* Wait for a reply - allow signals to interrupt. */
845 rc = wait_event_interruptible(ses->server->response_q,
846 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
847 ((ses->server->tcpStatus != CifsGood) &&
848 (ses->server->tcpStatus != CifsNew)));
850 /* Were we interrupted by a signal ? */
851 if ((rc == -ERESTARTSYS) &&
852 (midQ->midState == MID_REQUEST_SUBMITTED) &&
853 ((ses->server->tcpStatus == CifsGood) ||
854 (ses->server->tcpStatus == CifsNew))) {
856 if (in_buf->Command == SMB_COM_TRANSACTION2) {
857 /* POSIX lock. We send a NT_CANCEL SMB to cause the
858 blocking lock to return. */
859 rc = send_nt_cancel(ses->server, in_buf, midQ);
860 if (rc) {
861 delete_mid(midQ);
862 return rc;
864 } else {
865 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
866 to cause the blocking lock to return. */
868 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
870 /* If we get -ENOLCK back the lock may have
871 already been removed. Don't exit in this case. */
872 if (rc && rc != -ENOLCK) {
873 delete_mid(midQ);
874 return rc;
878 rc = wait_for_response(ses->server, midQ);
879 if (rc) {
880 send_nt_cancel(ses->server, in_buf, midQ);
881 spin_lock(&GlobalMid_Lock);
882 if (midQ->midState == MID_REQUEST_SUBMITTED) {
883 /* no longer considered to be "in-flight" */
884 midQ->callback = DeleteMidQEntry;
885 spin_unlock(&GlobalMid_Lock);
886 return rc;
888 spin_unlock(&GlobalMid_Lock);
891 /* We got the response - restart system call. */
892 rstart = 1;
895 rc = cifs_sync_mid_result(midQ, ses->server);
896 if (rc != 0)
897 return rc;
899 /* rcvd frame is ok */
900 if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
901 rc = -EIO;
902 cERROR(1, "Bad MID state?");
903 goto out;
906 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
907 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
908 rc = cifs_check_receive(midQ, ses->server, 0);
909 out:
910 delete_mid(midQ);
911 if (rstart && rc == -EACCES)
912 return -ERESTARTSYS;
913 return rc;