video/mbx: use simple_open()
[linux-2.6/btrfs-unstable.git] / fs / cifs / transport.c
blobfbb84c08e3cdfd9814b59aa098c6aae69ccd1c65
1 /*
2 * fs/cifs/transport.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
41 void
42 cifs_wake_up_task(struct mid_q_entry *mid)
44 wake_up_process(mid->callback_data);
47 struct mid_q_entry *
48 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
50 struct mid_q_entry *temp;
52 if (server == NULL) {
53 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
54 return NULL;
57 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
58 if (temp == NULL)
59 return temp;
60 else {
61 memset(temp, 0, sizeof(struct mid_q_entry));
62 temp->mid = get_mid(smb_buffer);
63 temp->pid = current->pid;
64 temp->command = cpu_to_le16(smb_buffer->Command);
65 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
66 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
67 /* when mid allocated can be before when sent */
68 temp->when_alloc = jiffies;
69 temp->server = server;
72 * The default is for the mid to be synchronous, so the
73 * default callback just wakes up the current task.
75 temp->callback = cifs_wake_up_task;
76 temp->callback_data = current;
79 atomic_inc(&midCount);
80 temp->mid_state = MID_REQUEST_ALLOCATED;
81 return temp;
84 void
85 DeleteMidQEntry(struct mid_q_entry *midEntry)
87 #ifdef CONFIG_CIFS_STATS2
88 __le16 command = midEntry->server->vals->lock_cmd;
89 unsigned long now;
90 #endif
91 midEntry->mid_state = MID_FREE;
92 atomic_dec(&midCount);
93 if (midEntry->large_buf)
94 cifs_buf_release(midEntry->resp_buf);
95 else
96 cifs_small_buf_release(midEntry->resp_buf);
97 #ifdef CONFIG_CIFS_STATS2
98 now = jiffies;
99 /* commands taking longer than one second are indications that
100 something is wrong, unless it is quite a slow link or server */
101 if ((now - midEntry->when_alloc) > HZ) {
102 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
103 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
104 midEntry->command, midEntry->mid);
105 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
106 now - midEntry->when_alloc,
107 now - midEntry->when_sent,
108 now - midEntry->when_received);
111 #endif
112 mempool_free(midEntry, cifs_mid_poolp);
115 void
116 cifs_delete_mid(struct mid_q_entry *mid)
118 spin_lock(&GlobalMid_Lock);
119 list_del(&mid->qhead);
120 spin_unlock(&GlobalMid_Lock);
122 DeleteMidQEntry(mid);
126 * smb_send_kvec - send an array of kvecs to the server
127 * @server: Server to send the data to
128 * @smb_msg: Message to send
129 * @sent: amount of data sent on socket is stored here
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
134 static int
135 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
136 size_t *sent)
138 int rc = 0;
139 int retries = 0;
140 struct socket *ssocket = server->ssocket;
142 *sent = 0;
144 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
145 smb_msg->msg_namelen = sizeof(struct sockaddr);
146 smb_msg->msg_control = NULL;
147 smb_msg->msg_controllen = 0;
148 if (server->noblocksnd)
149 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
150 else
151 smb_msg->msg_flags = MSG_NOSIGNAL;
153 while (msg_data_left(smb_msg)) {
155 * If blocking send, we try 3 times, since each can block
156 * for 5 seconds. For nonblocking we have to try more
157 * but wait increasing amounts of time allowing time for
158 * socket to clear. The overall time we wait in either
159 * case to send on the socket is about 15 seconds.
160 * Similarly we wait for 15 seconds for a response from
161 * the server in SendReceive[2] for the server to send
162 * a response back for most types of requests (except
163 * SMB Write past end of file which can be slow, and
164 * blocking lock operations). NFS waits slightly longer
165 * than CIFS, but this can make it take longer for
166 * nonresponsive servers to be detected and 15 seconds
167 * is more than enough time for modern networks to
168 * send a packet. In most cases if we fail to send
169 * after the retries we will kill the socket and
170 * reconnect which may clear the network problem.
172 rc = sock_sendmsg(ssocket, smb_msg);
173 if (rc == -EAGAIN) {
174 retries++;
175 if (retries >= 14 ||
176 (!server->noblocksnd && (retries > 2))) {
177 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
178 ssocket);
179 return -EAGAIN;
181 msleep(1 << retries);
182 continue;
185 if (rc < 0)
186 return rc;
188 if (rc == 0) {
189 /* should never happen, letting socket clear before
190 retrying is our only obvious option here */
191 cifs_dbg(VFS, "tcp sent no data\n");
192 msleep(500);
193 continue;
196 /* send was at least partially successful */
197 *sent += rc;
198 retries = 0; /* in case we get ENOSPC on the next send */
200 return 0;
203 static unsigned long
204 rqst_len(struct smb_rqst *rqst)
206 unsigned int i;
207 struct kvec *iov = rqst->rq_iov;
208 unsigned long buflen = 0;
210 /* total up iov array first */
211 for (i = 0; i < rqst->rq_nvec; i++)
212 buflen += iov[i].iov_len;
214 /* add in the page array if there is one */
215 if (rqst->rq_npages) {
216 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
217 buflen += rqst->rq_tailsz;
220 return buflen;
223 static int
224 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
226 int rc;
227 struct kvec *iov = rqst->rq_iov;
228 int n_vec = rqst->rq_nvec;
229 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
230 unsigned long send_length;
231 unsigned int i;
232 size_t total_len = 0, sent, size;
233 struct socket *ssocket = server->ssocket;
234 struct msghdr smb_msg;
235 int val = 1;
237 if (ssocket == NULL)
238 return -ENOTSOCK;
240 /* sanity check send length */
241 send_length = rqst_len(rqst);
242 if (send_length != smb_buf_length + 4) {
243 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
244 send_length, smb_buf_length);
245 return -EIO;
248 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
249 dump_smb(iov[0].iov_base, iov[0].iov_len);
251 /* cork the socket */
252 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
253 (char *)&val, sizeof(val));
255 size = 0;
256 for (i = 0; i < n_vec; i++)
257 size += iov[i].iov_len;
259 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
261 rc = smb_send_kvec(server, &smb_msg, &sent);
262 if (rc < 0)
263 goto uncork;
265 total_len += sent;
267 /* now walk the page array and send each page in it */
268 for (i = 0; i < rqst->rq_npages; i++) {
269 size_t len = i == rqst->rq_npages - 1
270 ? rqst->rq_tailsz
271 : rqst->rq_pagesz;
272 struct bio_vec bvec = {
273 .bv_page = rqst->rq_pages[i],
274 .bv_len = len
276 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
277 &bvec, 1, len);
278 rc = smb_send_kvec(server, &smb_msg, &sent);
279 if (rc < 0)
280 break;
282 total_len += sent;
285 uncork:
286 /* uncork it */
287 val = 0;
288 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
289 (char *)&val, sizeof(val));
291 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
292 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
293 smb_buf_length + 4, total_len);
295 * If we have only sent part of an SMB then the next SMB could
296 * be taken as the remainder of this one. We need to kill the
297 * socket so the server throws away the partial SMB
299 server->tcpStatus = CifsNeedReconnect;
302 if (rc < 0 && rc != -EINTR)
303 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
304 rc);
305 else
306 rc = 0;
308 return rc;
311 static int
312 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
314 struct smb_rqst rqst = { .rq_iov = iov,
315 .rq_nvec = n_vec };
317 return smb_send_rqst(server, &rqst);
321 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
322 unsigned int smb_buf_length)
324 struct kvec iov;
326 iov.iov_base = smb_buffer;
327 iov.iov_len = smb_buf_length + 4;
329 return smb_sendv(server, &iov, 1);
332 static int
333 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
334 int *credits)
336 int rc;
338 spin_lock(&server->req_lock);
339 if (timeout == CIFS_ASYNC_OP) {
340 /* oplock breaks must not be held up */
341 server->in_flight++;
342 *credits -= 1;
343 spin_unlock(&server->req_lock);
344 return 0;
347 while (1) {
348 if (*credits <= 0) {
349 spin_unlock(&server->req_lock);
350 cifs_num_waiters_inc(server);
351 rc = wait_event_killable(server->request_q,
352 has_credits(server, credits));
353 cifs_num_waiters_dec(server);
354 if (rc)
355 return rc;
356 spin_lock(&server->req_lock);
357 } else {
358 if (server->tcpStatus == CifsExiting) {
359 spin_unlock(&server->req_lock);
360 return -ENOENT;
364 * Can not count locking commands against total
365 * as they are allowed to block on server.
368 /* update # of requests on the wire to server */
369 if (timeout != CIFS_BLOCKING_OP) {
370 *credits -= 1;
371 server->in_flight++;
373 spin_unlock(&server->req_lock);
374 break;
377 return 0;
380 static int
381 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
382 const int optype)
384 int *val;
386 val = server->ops->get_credits_field(server, optype);
387 /* Since an echo is already inflight, no need to wait to send another */
388 if (*val <= 0 && optype == CIFS_ECHO_OP)
389 return -EAGAIN;
390 return wait_for_free_credits(server, timeout, val);
394 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
395 unsigned int *num, unsigned int *credits)
397 *num = size;
398 *credits = 0;
399 return 0;
402 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
403 struct mid_q_entry **ppmidQ)
405 if (ses->server->tcpStatus == CifsExiting) {
406 return -ENOENT;
409 if (ses->server->tcpStatus == CifsNeedReconnect) {
410 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
411 return -EAGAIN;
414 if (ses->status == CifsNew) {
415 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
416 (in_buf->Command != SMB_COM_NEGOTIATE))
417 return -EAGAIN;
418 /* else ok - we are setting up session */
421 if (ses->status == CifsExiting) {
422 /* check if SMB session is bad because we are setting it up */
423 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
424 return -EAGAIN;
425 /* else ok - we are shutting down session */
428 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
429 if (*ppmidQ == NULL)
430 return -ENOMEM;
431 spin_lock(&GlobalMid_Lock);
432 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
433 spin_unlock(&GlobalMid_Lock);
434 return 0;
437 static int
438 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
440 int error;
442 error = wait_event_freezekillable_unsafe(server->response_q,
443 midQ->mid_state != MID_REQUEST_SUBMITTED);
444 if (error < 0)
445 return -ERESTARTSYS;
447 return 0;
450 struct mid_q_entry *
451 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
453 int rc;
454 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
455 struct mid_q_entry *mid;
457 /* enable signing if server requires it */
458 if (server->sign)
459 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
461 mid = AllocMidQEntry(hdr, server);
462 if (mid == NULL)
463 return ERR_PTR(-ENOMEM);
465 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
466 if (rc) {
467 DeleteMidQEntry(mid);
468 return ERR_PTR(rc);
471 return mid;
475 * Send a SMB request and set the callback function in the mid to handle
476 * the result. Caller is responsible for dealing with timeouts.
479 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
480 mid_receive_t *receive, mid_callback_t *callback,
481 void *cbdata, const int flags)
483 int rc, timeout, optype;
484 struct mid_q_entry *mid;
485 unsigned int credits = 0;
487 timeout = flags & CIFS_TIMEOUT_MASK;
488 optype = flags & CIFS_OP_MASK;
490 if ((flags & CIFS_HAS_CREDITS) == 0) {
491 rc = wait_for_free_request(server, timeout, optype);
492 if (rc)
493 return rc;
494 credits = 1;
497 mutex_lock(&server->srv_mutex);
498 mid = server->ops->setup_async_request(server, rqst);
499 if (IS_ERR(mid)) {
500 mutex_unlock(&server->srv_mutex);
501 add_credits_and_wake_if(server, credits, optype);
502 return PTR_ERR(mid);
505 mid->receive = receive;
506 mid->callback = callback;
507 mid->callback_data = cbdata;
508 mid->mid_state = MID_REQUEST_SUBMITTED;
510 /* put it on the pending_mid_q */
511 spin_lock(&GlobalMid_Lock);
512 list_add_tail(&mid->qhead, &server->pending_mid_q);
513 spin_unlock(&GlobalMid_Lock);
516 cifs_in_send_inc(server);
517 rc = smb_send_rqst(server, rqst);
518 cifs_in_send_dec(server);
519 cifs_save_when_sent(mid);
521 if (rc < 0) {
522 server->sequence_number -= 2;
523 cifs_delete_mid(mid);
526 mutex_unlock(&server->srv_mutex);
528 if (rc == 0)
529 return 0;
531 add_credits_and_wake_if(server, credits, optype);
532 return rc;
537 * Send an SMB Request. No response info (other than return code)
538 * needs to be parsed.
540 * flags indicate the type of request buffer and how long to wait
541 * and whether to log NT STATUS code (error) before mapping it to POSIX error
545 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
546 char *in_buf, int flags)
548 int rc;
549 struct kvec iov[1];
550 int resp_buf_type;
552 iov[0].iov_base = in_buf;
553 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
554 flags |= CIFS_NO_RESP;
555 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
556 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
558 return rc;
561 static int
562 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
564 int rc = 0;
566 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
567 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
569 spin_lock(&GlobalMid_Lock);
570 switch (mid->mid_state) {
571 case MID_RESPONSE_RECEIVED:
572 spin_unlock(&GlobalMid_Lock);
573 return rc;
574 case MID_RETRY_NEEDED:
575 rc = -EAGAIN;
576 break;
577 case MID_RESPONSE_MALFORMED:
578 rc = -EIO;
579 break;
580 case MID_SHUTDOWN:
581 rc = -EHOSTDOWN;
582 break;
583 default:
584 list_del_init(&mid->qhead);
585 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
586 __func__, mid->mid, mid->mid_state);
587 rc = -EIO;
589 spin_unlock(&GlobalMid_Lock);
591 mutex_lock(&server->srv_mutex);
592 DeleteMidQEntry(mid);
593 mutex_unlock(&server->srv_mutex);
594 return rc;
597 static inline int
598 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
600 return server->ops->send_cancel ?
601 server->ops->send_cancel(server, buf, mid) : 0;
605 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
606 bool log_error)
608 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
610 dump_smb(mid->resp_buf, min_t(u32, 92, len));
612 /* convert the length into a more usable form */
613 if (server->sign) {
614 struct kvec iov;
615 int rc = 0;
616 struct smb_rqst rqst = { .rq_iov = &iov,
617 .rq_nvec = 1 };
619 iov.iov_base = mid->resp_buf;
620 iov.iov_len = len;
621 /* FIXME: add code to kill session */
622 rc = cifs_verify_signature(&rqst, server,
623 mid->sequence_number);
624 if (rc)
625 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
626 rc);
629 /* BB special case reconnect tid and uid here? */
630 return map_smb_to_linux_error(mid->resp_buf, log_error);
633 struct mid_q_entry *
634 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
636 int rc;
637 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
638 struct mid_q_entry *mid;
640 rc = allocate_mid(ses, hdr, &mid);
641 if (rc)
642 return ERR_PTR(rc);
643 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
644 if (rc) {
645 cifs_delete_mid(mid);
646 return ERR_PTR(rc);
648 return mid;
652 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
653 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
654 const int flags)
656 int rc = 0;
657 int timeout, optype;
658 struct mid_q_entry *midQ;
659 char *buf = iov[0].iov_base;
660 unsigned int credits = 1;
661 struct smb_rqst rqst = { .rq_iov = iov,
662 .rq_nvec = n_vec };
664 timeout = flags & CIFS_TIMEOUT_MASK;
665 optype = flags & CIFS_OP_MASK;
667 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
669 if ((ses == NULL) || (ses->server == NULL)) {
670 cifs_small_buf_release(buf);
671 cifs_dbg(VFS, "Null session\n");
672 return -EIO;
675 if (ses->server->tcpStatus == CifsExiting) {
676 cifs_small_buf_release(buf);
677 return -ENOENT;
681 * Ensure that we do not send more than 50 overlapping requests
682 * to the same server. We may make this configurable later or
683 * use ses->maxReq.
686 rc = wait_for_free_request(ses->server, timeout, optype);
687 if (rc) {
688 cifs_small_buf_release(buf);
689 return rc;
693 * Make sure that we sign in the same order that we send on this socket
694 * and avoid races inside tcp sendmsg code that could cause corruption
695 * of smb data.
698 mutex_lock(&ses->server->srv_mutex);
700 midQ = ses->server->ops->setup_request(ses, &rqst);
701 if (IS_ERR(midQ)) {
702 mutex_unlock(&ses->server->srv_mutex);
703 cifs_small_buf_release(buf);
704 /* Update # of requests on wire to server */
705 add_credits(ses->server, 1, optype);
706 return PTR_ERR(midQ);
709 midQ->mid_state = MID_REQUEST_SUBMITTED;
710 cifs_in_send_inc(ses->server);
711 rc = smb_sendv(ses->server, iov, n_vec);
712 cifs_in_send_dec(ses->server);
713 cifs_save_when_sent(midQ);
715 if (rc < 0)
716 ses->server->sequence_number -= 2;
717 mutex_unlock(&ses->server->srv_mutex);
719 if (rc < 0) {
720 cifs_small_buf_release(buf);
721 goto out;
724 if (timeout == CIFS_ASYNC_OP) {
725 cifs_small_buf_release(buf);
726 goto out;
729 rc = wait_for_response(ses->server, midQ);
730 if (rc != 0) {
731 send_cancel(ses->server, buf, midQ);
732 spin_lock(&GlobalMid_Lock);
733 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
734 midQ->callback = DeleteMidQEntry;
735 spin_unlock(&GlobalMid_Lock);
736 cifs_small_buf_release(buf);
737 add_credits(ses->server, 1, optype);
738 return rc;
740 spin_unlock(&GlobalMid_Lock);
743 cifs_small_buf_release(buf);
745 rc = cifs_sync_mid_result(midQ, ses->server);
746 if (rc != 0) {
747 add_credits(ses->server, 1, optype);
748 return rc;
751 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
752 rc = -EIO;
753 cifs_dbg(FYI, "Bad MID state?\n");
754 goto out;
757 buf = (char *)midQ->resp_buf;
758 iov[0].iov_base = buf;
759 iov[0].iov_len = get_rfc1002_length(buf) + 4;
760 if (midQ->large_buf)
761 *resp_buf_type = CIFS_LARGE_BUFFER;
762 else
763 *resp_buf_type = CIFS_SMALL_BUFFER;
765 credits = ses->server->ops->get_credits(midQ);
767 rc = ses->server->ops->check_receive(midQ, ses->server,
768 flags & CIFS_LOG_ERROR);
770 /* mark it so buf will not be freed by cifs_delete_mid */
771 if ((flags & CIFS_NO_RESP) == 0)
772 midQ->resp_buf = NULL;
773 out:
774 cifs_delete_mid(midQ);
775 add_credits(ses->server, credits, optype);
777 return rc;
781 SendReceive(const unsigned int xid, struct cifs_ses *ses,
782 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
783 int *pbytes_returned, const int timeout)
785 int rc = 0;
786 struct mid_q_entry *midQ;
788 if (ses == NULL) {
789 cifs_dbg(VFS, "Null smb session\n");
790 return -EIO;
792 if (ses->server == NULL) {
793 cifs_dbg(VFS, "Null tcp session\n");
794 return -EIO;
797 if (ses->server->tcpStatus == CifsExiting)
798 return -ENOENT;
800 /* Ensure that we do not send more than 50 overlapping requests
801 to the same server. We may make this configurable later or
802 use ses->maxReq */
804 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
805 MAX_CIFS_HDR_SIZE - 4) {
806 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
807 be32_to_cpu(in_buf->smb_buf_length));
808 return -EIO;
811 rc = wait_for_free_request(ses->server, timeout, 0);
812 if (rc)
813 return rc;
815 /* make sure that we sign in the same order that we send on this socket
816 and avoid races inside tcp sendmsg code that could cause corruption
817 of smb data */
819 mutex_lock(&ses->server->srv_mutex);
821 rc = allocate_mid(ses, in_buf, &midQ);
822 if (rc) {
823 mutex_unlock(&ses->server->srv_mutex);
824 /* Update # of requests on wire to server */
825 add_credits(ses->server, 1, 0);
826 return rc;
829 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
830 if (rc) {
831 mutex_unlock(&ses->server->srv_mutex);
832 goto out;
835 midQ->mid_state = MID_REQUEST_SUBMITTED;
837 cifs_in_send_inc(ses->server);
838 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
839 cifs_in_send_dec(ses->server);
840 cifs_save_when_sent(midQ);
842 if (rc < 0)
843 ses->server->sequence_number -= 2;
845 mutex_unlock(&ses->server->srv_mutex);
847 if (rc < 0)
848 goto out;
850 if (timeout == CIFS_ASYNC_OP)
851 goto out;
853 rc = wait_for_response(ses->server, midQ);
854 if (rc != 0) {
855 send_cancel(ses->server, in_buf, midQ);
856 spin_lock(&GlobalMid_Lock);
857 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
858 /* no longer considered to be "in-flight" */
859 midQ->callback = DeleteMidQEntry;
860 spin_unlock(&GlobalMid_Lock);
861 add_credits(ses->server, 1, 0);
862 return rc;
864 spin_unlock(&GlobalMid_Lock);
867 rc = cifs_sync_mid_result(midQ, ses->server);
868 if (rc != 0) {
869 add_credits(ses->server, 1, 0);
870 return rc;
873 if (!midQ->resp_buf || !out_buf ||
874 midQ->mid_state != MID_RESPONSE_RECEIVED) {
875 rc = -EIO;
876 cifs_dbg(VFS, "Bad MID state?\n");
877 goto out;
880 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
881 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
882 rc = cifs_check_receive(midQ, ses->server, 0);
883 out:
884 cifs_delete_mid(midQ);
885 add_credits(ses->server, 1, 0);
887 return rc;
890 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
891 blocking lock to return. */
893 static int
894 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
895 struct smb_hdr *in_buf,
896 struct smb_hdr *out_buf)
898 int bytes_returned;
899 struct cifs_ses *ses = tcon->ses;
900 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
902 /* We just modify the current in_buf to change
903 the type of lock from LOCKING_ANDX_SHARED_LOCK
904 or LOCKING_ANDX_EXCLUSIVE_LOCK to
905 LOCKING_ANDX_CANCEL_LOCK. */
907 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
908 pSMB->Timeout = 0;
909 pSMB->hdr.Mid = get_next_mid(ses->server);
911 return SendReceive(xid, ses, in_buf, out_buf,
912 &bytes_returned, 0);
916 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
917 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
918 int *pbytes_returned)
920 int rc = 0;
921 int rstart = 0;
922 struct mid_q_entry *midQ;
923 struct cifs_ses *ses;
925 if (tcon == NULL || tcon->ses == NULL) {
926 cifs_dbg(VFS, "Null smb session\n");
927 return -EIO;
929 ses = tcon->ses;
931 if (ses->server == NULL) {
932 cifs_dbg(VFS, "Null tcp session\n");
933 return -EIO;
936 if (ses->server->tcpStatus == CifsExiting)
937 return -ENOENT;
939 /* Ensure that we do not send more than 50 overlapping requests
940 to the same server. We may make this configurable later or
941 use ses->maxReq */
943 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
944 MAX_CIFS_HDR_SIZE - 4) {
945 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
946 be32_to_cpu(in_buf->smb_buf_length));
947 return -EIO;
950 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
951 if (rc)
952 return rc;
954 /* make sure that we sign in the same order that we send on this socket
955 and avoid races inside tcp sendmsg code that could cause corruption
956 of smb data */
958 mutex_lock(&ses->server->srv_mutex);
960 rc = allocate_mid(ses, in_buf, &midQ);
961 if (rc) {
962 mutex_unlock(&ses->server->srv_mutex);
963 return rc;
966 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
967 if (rc) {
968 cifs_delete_mid(midQ);
969 mutex_unlock(&ses->server->srv_mutex);
970 return rc;
973 midQ->mid_state = MID_REQUEST_SUBMITTED;
974 cifs_in_send_inc(ses->server);
975 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
976 cifs_in_send_dec(ses->server);
977 cifs_save_when_sent(midQ);
979 if (rc < 0)
980 ses->server->sequence_number -= 2;
982 mutex_unlock(&ses->server->srv_mutex);
984 if (rc < 0) {
985 cifs_delete_mid(midQ);
986 return rc;
989 /* Wait for a reply - allow signals to interrupt. */
990 rc = wait_event_interruptible(ses->server->response_q,
991 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
992 ((ses->server->tcpStatus != CifsGood) &&
993 (ses->server->tcpStatus != CifsNew)));
995 /* Were we interrupted by a signal ? */
996 if ((rc == -ERESTARTSYS) &&
997 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
998 ((ses->server->tcpStatus == CifsGood) ||
999 (ses->server->tcpStatus == CifsNew))) {
1001 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1002 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1003 blocking lock to return. */
1004 rc = send_cancel(ses->server, in_buf, midQ);
1005 if (rc) {
1006 cifs_delete_mid(midQ);
1007 return rc;
1009 } else {
1010 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1011 to cause the blocking lock to return. */
1013 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1015 /* If we get -ENOLCK back the lock may have
1016 already been removed. Don't exit in this case. */
1017 if (rc && rc != -ENOLCK) {
1018 cifs_delete_mid(midQ);
1019 return rc;
1023 rc = wait_for_response(ses->server, midQ);
1024 if (rc) {
1025 send_cancel(ses->server, in_buf, midQ);
1026 spin_lock(&GlobalMid_Lock);
1027 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1028 /* no longer considered to be "in-flight" */
1029 midQ->callback = DeleteMidQEntry;
1030 spin_unlock(&GlobalMid_Lock);
1031 return rc;
1033 spin_unlock(&GlobalMid_Lock);
1036 /* We got the response - restart system call. */
1037 rstart = 1;
1040 rc = cifs_sync_mid_result(midQ, ses->server);
1041 if (rc != 0)
1042 return rc;
1044 /* rcvd frame is ok */
1045 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1046 rc = -EIO;
1047 cifs_dbg(VFS, "Bad MID state?\n");
1048 goto out;
1051 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1052 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1053 rc = cifs_check_receive(midQ, ses->server, 0);
1054 out:
1055 cifs_delete_mid(midQ);
1056 if (rstart && rc == -EACCES)
1057 return -ERESTARTSYS;
1058 return rc;