cifs: consolidate SendReceive response checks
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / file.c
blobc672afef0c096b2361bcfbd777a0f09c23b37d06
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
45 static inline int cifs_convert_flags(unsigned int flags)
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
63 static u32 cifs_posix_convert_flags(unsigned int flags)
65 u32 posix_flags = 0;
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 posix_flags = SMB_O_RDONLY;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
81 if (flags & O_DSYNC)
82 posix_flags |= SMB_O_SYNC;
83 if (flags & O_DIRECTORY)
84 posix_flags |= SMB_O_DIRECTORY;
85 if (flags & O_NOFOLLOW)
86 posix_flags |= SMB_O_NOFOLLOW;
87 if (flags & O_DIRECT)
88 posix_flags |= SMB_O_DIRECT;
90 return posix_flags;
93 static inline int cifs_get_disposition(unsigned int flags)
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
103 else
104 return FILE_OPEN;
107 int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
117 struct cifsTconInfo *tcon;
119 cFYI(1, "posix open %s", full_path);
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
141 if (rc)
142 goto posix_open_ret;
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
164 posix_open_ret:
165 kfree(presp_data);
166 return rc;
169 static int
170 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
172 __u16 *pnetfid, int xid)
174 int rc;
175 int desiredAccess;
176 int disposition;
177 FILE_ALL_INFO *buf;
179 desiredAccess = cifs_convert_flags(f_flags);
181 /*********************************************************************
182 * open flag mapping table:
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
205 disposition = cifs_get_disposition(f_flags);
207 /* BB pass O_SYNC flag through on file attributes .. BB */
209 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 if (!buf)
211 return -ENOMEM;
213 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else
219 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
224 if (rc)
225 goto out;
227 if (tcon->unix_ext)
228 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 xid);
230 else
231 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 xid, pnetfid);
234 out:
235 kfree(buf);
236 return rc;
239 struct cifsFileInfo *
240 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 struct tcon_link *tlink, __u32 oplock)
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 struct cifsFileInfo *pCifsFile;
248 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 if (pCifsFile == NULL)
250 return pCifsFile;
252 pCifsFile->count = 1;
253 pCifsFile->netfid = fileHandle;
254 pCifsFile->pid = current->tgid;
255 pCifsFile->uid = current_fsuid();
256 pCifsFile->dentry = dget(dentry);
257 pCifsFile->f_flags = file->f_flags;
258 pCifsFile->invalidHandle = false;
259 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
265 spin_lock(&cifs_file_list_lock);
266 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 else
271 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
272 spin_unlock(&cifs_file_list_lock);
274 cifs_set_oplock_level(pCifsInode, oplock);
276 file->private_data = pCifsFile;
277 return pCifsFile;
281 * Release a reference on the file private data. This may involve closing
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
285 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
287 struct inode *inode = cifs_file->dentry->d_inode;
288 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
291 struct cifsLockInfo *li, *tmp;
293 spin_lock(&cifs_file_list_lock);
294 if (--cifs_file->count > 0) {
295 spin_unlock(&cifs_file_list_lock);
296 return;
299 /* remove it from the lists */
300 list_del(&cifs_file->flist);
301 list_del(&cifs_file->tlist);
303 if (list_empty(&cifsi->openFileList)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file->dentry->d_inode);
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 CIFS_I(inode)->invalid_mapping = true;
313 cifs_set_oplock_level(cifsi, 0);
315 spin_unlock(&cifs_file_list_lock);
317 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
318 int xid, rc;
320 xid = GetXid();
321 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
322 FreeXid(xid);
325 /* Delete any outstanding lock records. We'll lose them when the file
326 * is closed anyway.
328 mutex_lock(&cifs_file->lock_mutex);
329 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
330 list_del(&li->llist);
331 kfree(li);
333 mutex_unlock(&cifs_file->lock_mutex);
335 cifs_put_tlink(cifs_file->tlink);
336 dput(cifs_file->dentry);
337 kfree(cifs_file);
340 int cifs_open(struct inode *inode, struct file *file)
342 int rc = -EACCES;
343 int xid;
344 __u32 oplock;
345 struct cifs_sb_info *cifs_sb;
346 struct cifsTconInfo *tcon;
347 struct tcon_link *tlink;
348 struct cifsFileInfo *pCifsFile = NULL;
349 char *full_path = NULL;
350 bool posix_open_ok = false;
351 __u16 netfid;
353 xid = GetXid();
355 cifs_sb = CIFS_SB(inode->i_sb);
356 tlink = cifs_sb_tlink(cifs_sb);
357 if (IS_ERR(tlink)) {
358 FreeXid(xid);
359 return PTR_ERR(tlink);
361 tcon = tlink_tcon(tlink);
363 full_path = build_path_from_dentry(file->f_path.dentry);
364 if (full_path == NULL) {
365 rc = -ENOMEM;
366 goto out;
369 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
370 inode, file->f_flags, full_path);
372 if (oplockEnabled)
373 oplock = REQ_OPLOCK;
374 else
375 oplock = 0;
377 if (!tcon->broken_posix_open && tcon->unix_ext &&
378 (tcon->ses->capabilities & CAP_UNIX) &&
379 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
380 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
381 /* can not refresh inode info since size could be stale */
382 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
383 cifs_sb->mnt_file_mode /* ignored */,
384 file->f_flags, &oplock, &netfid, xid);
385 if (rc == 0) {
386 cFYI(1, "posix open succeeded");
387 posix_open_ok = true;
388 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
389 if (tcon->ses->serverNOS)
390 cERROR(1, "server %s of type %s returned"
391 " unexpected error on SMB posix open"
392 ", disabling posix open support."
393 " Check if server update available.",
394 tcon->ses->serverName,
395 tcon->ses->serverNOS);
396 tcon->broken_posix_open = true;
397 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
398 (rc != -EOPNOTSUPP)) /* path not found or net err */
399 goto out;
400 /* else fallthrough to retry open the old way on network i/o
401 or DFS errors */
404 if (!posix_open_ok) {
405 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
406 file->f_flags, &oplock, &netfid, xid);
407 if (rc)
408 goto out;
411 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
412 if (pCifsFile == NULL) {
413 CIFSSMBClose(xid, tcon, netfid);
414 rc = -ENOMEM;
415 goto out;
418 cifs_fscache_set_inode_cookie(inode, file);
420 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
421 /* time to set mode which we can not set earlier due to
422 problems creating new read-only files */
423 struct cifs_unix_set_info_args args = {
424 .mode = inode->i_mode,
425 .uid = NO_CHANGE_64,
426 .gid = NO_CHANGE_64,
427 .ctime = NO_CHANGE_64,
428 .atime = NO_CHANGE_64,
429 .mtime = NO_CHANGE_64,
430 .device = 0,
432 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
433 pCifsFile->pid);
436 out:
437 kfree(full_path);
438 FreeXid(xid);
439 cifs_put_tlink(tlink);
440 return rc;
443 /* Try to reacquire byte range locks that were released when session */
444 /* to server was lost */
445 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
447 int rc = 0;
449 /* BB list all locks open on this file and relock */
451 return rc;
454 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
456 int rc = -EACCES;
457 int xid;
458 __u32 oplock;
459 struct cifs_sb_info *cifs_sb;
460 struct cifsTconInfo *tcon;
461 struct cifsInodeInfo *pCifsInode;
462 struct inode *inode;
463 char *full_path = NULL;
464 int desiredAccess;
465 int disposition = FILE_OPEN;
466 __u16 netfid;
468 xid = GetXid();
469 mutex_lock(&pCifsFile->fh_mutex);
470 if (!pCifsFile->invalidHandle) {
471 mutex_unlock(&pCifsFile->fh_mutex);
472 rc = 0;
473 FreeXid(xid);
474 return rc;
477 inode = pCifsFile->dentry->d_inode;
478 cifs_sb = CIFS_SB(inode->i_sb);
479 tcon = tlink_tcon(pCifsFile->tlink);
481 /* can not grab rename sem here because various ops, including
482 those that already have the rename sem can end up causing writepage
483 to get called and if the server was down that means we end up here,
484 and we can never tell if the caller already has the rename_sem */
485 full_path = build_path_from_dentry(pCifsFile->dentry);
486 if (full_path == NULL) {
487 rc = -ENOMEM;
488 mutex_unlock(&pCifsFile->fh_mutex);
489 FreeXid(xid);
490 return rc;
493 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
494 inode, pCifsFile->f_flags, full_path);
496 if (oplockEnabled)
497 oplock = REQ_OPLOCK;
498 else
499 oplock = 0;
501 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
502 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
503 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
506 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
507 * original open. Must mask them off for a reopen.
509 unsigned int oflags = pCifsFile->f_flags &
510 ~(O_CREAT | O_EXCL | O_TRUNC);
512 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
513 cifs_sb->mnt_file_mode /* ignored */,
514 oflags, &oplock, &netfid, xid);
515 if (rc == 0) {
516 cFYI(1, "posix reopen succeeded");
517 goto reopen_success;
519 /* fallthrough to retry open the old way on errors, especially
520 in the reconnect path it is important to retry hard */
523 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
525 /* Can not refresh inode by passing in file_info buf to be returned
526 by SMBOpen and then calling get_inode_info with returned buf
527 since file might have write behind data that needs to be flushed
528 and server version of file size can be stale. If we knew for sure
529 that inode was not dirty locally we could do this */
531 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
532 CREATE_NOT_DIR, &netfid, &oplock, NULL,
533 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
534 CIFS_MOUNT_MAP_SPECIAL_CHR);
535 if (rc) {
536 mutex_unlock(&pCifsFile->fh_mutex);
537 cFYI(1, "cifs_open returned 0x%x", rc);
538 cFYI(1, "oplock: %d", oplock);
539 goto reopen_error_exit;
542 reopen_success:
543 pCifsFile->netfid = netfid;
544 pCifsFile->invalidHandle = false;
545 mutex_unlock(&pCifsFile->fh_mutex);
546 pCifsInode = CIFS_I(inode);
548 if (can_flush) {
549 rc = filemap_write_and_wait(inode->i_mapping);
550 mapping_set_error(inode->i_mapping, rc);
552 if (tcon->unix_ext)
553 rc = cifs_get_inode_info_unix(&inode,
554 full_path, inode->i_sb, xid);
555 else
556 rc = cifs_get_inode_info(&inode,
557 full_path, NULL, inode->i_sb,
558 xid, NULL);
559 } /* else we are writing out data to server already
560 and could deadlock if we tried to flush data, and
561 since we do not know if we have data that would
562 invalidate the current end of file on the server
563 we can not go to the server to get the new inod
564 info */
566 cifs_set_oplock_level(pCifsInode, oplock);
568 cifs_relock_file(pCifsFile);
570 reopen_error_exit:
571 kfree(full_path);
572 FreeXid(xid);
573 return rc;
576 int cifs_close(struct inode *inode, struct file *file)
578 if (file->private_data != NULL) {
579 cifsFileInfo_put(file->private_data);
580 file->private_data = NULL;
583 /* return code from the ->release op is always ignored */
584 return 0;
587 int cifs_closedir(struct inode *inode, struct file *file)
589 int rc = 0;
590 int xid;
591 struct cifsFileInfo *pCFileStruct = file->private_data;
592 char *ptmp;
594 cFYI(1, "Closedir inode = 0x%p", inode);
596 xid = GetXid();
598 if (pCFileStruct) {
599 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
601 cFYI(1, "Freeing private data in close dir");
602 spin_lock(&cifs_file_list_lock);
603 if (!pCFileStruct->srch_inf.endOfSearch &&
604 !pCFileStruct->invalidHandle) {
605 pCFileStruct->invalidHandle = true;
606 spin_unlock(&cifs_file_list_lock);
607 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
608 cFYI(1, "Closing uncompleted readdir with rc %d",
609 rc);
610 /* not much we can do if it fails anyway, ignore rc */
611 rc = 0;
612 } else
613 spin_unlock(&cifs_file_list_lock);
614 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
615 if (ptmp) {
616 cFYI(1, "closedir free smb buf in srch struct");
617 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
618 if (pCFileStruct->srch_inf.smallBuf)
619 cifs_small_buf_release(ptmp);
620 else
621 cifs_buf_release(ptmp);
623 cifs_put_tlink(pCFileStruct->tlink);
624 kfree(file->private_data);
625 file->private_data = NULL;
627 /* BB can we lock the filestruct while this is going on? */
628 FreeXid(xid);
629 return rc;
632 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
633 __u64 offset, __u8 lockType)
635 struct cifsLockInfo *li =
636 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
637 if (li == NULL)
638 return -ENOMEM;
639 li->offset = offset;
640 li->length = len;
641 li->type = lockType;
642 mutex_lock(&fid->lock_mutex);
643 list_add(&li->llist, &fid->llist);
644 mutex_unlock(&fid->lock_mutex);
645 return 0;
648 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
650 int rc, xid;
651 __u32 numLock = 0;
652 __u32 numUnlock = 0;
653 __u64 length;
654 bool wait_flag = false;
655 struct cifs_sb_info *cifs_sb;
656 struct cifsTconInfo *tcon;
657 __u16 netfid;
658 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
659 bool posix_locking = 0;
661 length = 1 + pfLock->fl_end - pfLock->fl_start;
662 rc = -EACCES;
663 xid = GetXid();
665 cFYI(1, "Lock parm: 0x%x flockflags: "
666 "0x%x flocktype: 0x%x start: %lld end: %lld",
667 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
668 pfLock->fl_end);
670 if (pfLock->fl_flags & FL_POSIX)
671 cFYI(1, "Posix");
672 if (pfLock->fl_flags & FL_FLOCK)
673 cFYI(1, "Flock");
674 if (pfLock->fl_flags & FL_SLEEP) {
675 cFYI(1, "Blocking lock");
676 wait_flag = true;
678 if (pfLock->fl_flags & FL_ACCESS)
679 cFYI(1, "Process suspended by mandatory locking - "
680 "not implemented yet");
681 if (pfLock->fl_flags & FL_LEASE)
682 cFYI(1, "Lease on file - not implemented yet");
683 if (pfLock->fl_flags &
684 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
685 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
687 if (pfLock->fl_type == F_WRLCK) {
688 cFYI(1, "F_WRLCK ");
689 numLock = 1;
690 } else if (pfLock->fl_type == F_UNLCK) {
691 cFYI(1, "F_UNLCK");
692 numUnlock = 1;
693 /* Check if unlock includes more than
694 one lock range */
695 } else if (pfLock->fl_type == F_RDLCK) {
696 cFYI(1, "F_RDLCK");
697 lockType |= LOCKING_ANDX_SHARED_LOCK;
698 numLock = 1;
699 } else if (pfLock->fl_type == F_EXLCK) {
700 cFYI(1, "F_EXLCK");
701 numLock = 1;
702 } else if (pfLock->fl_type == F_SHLCK) {
703 cFYI(1, "F_SHLCK");
704 lockType |= LOCKING_ANDX_SHARED_LOCK;
705 numLock = 1;
706 } else
707 cFYI(1, "Unknown type of lock");
709 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
710 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
711 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
713 if ((tcon->ses->capabilities & CAP_UNIX) &&
714 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
715 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
716 posix_locking = 1;
717 /* BB add code here to normalize offset and length to
718 account for negative length which we can not accept over the
719 wire */
720 if (IS_GETLK(cmd)) {
721 if (posix_locking) {
722 int posix_lock_type;
723 if (lockType & LOCKING_ANDX_SHARED_LOCK)
724 posix_lock_type = CIFS_RDLCK;
725 else
726 posix_lock_type = CIFS_WRLCK;
727 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
728 length, pfLock,
729 posix_lock_type, wait_flag);
730 FreeXid(xid);
731 return rc;
734 /* BB we could chain these into one lock request BB */
735 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
736 0, 1, lockType, 0 /* wait flag */, 0);
737 if (rc == 0) {
738 rc = CIFSSMBLock(xid, tcon, netfid, length,
739 pfLock->fl_start, 1 /* numUnlock */ ,
740 0 /* numLock */ , lockType,
741 0 /* wait flag */, 0);
742 pfLock->fl_type = F_UNLCK;
743 if (rc != 0)
744 cERROR(1, "Error unlocking previously locked "
745 "range %d during test of lock", rc);
746 rc = 0;
748 } else {
749 /* if rc == ERR_SHARING_VIOLATION ? */
750 rc = 0;
752 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
753 pfLock->fl_type = F_WRLCK;
754 } else {
755 rc = CIFSSMBLock(xid, tcon, netfid, length,
756 pfLock->fl_start, 0, 1,
757 lockType | LOCKING_ANDX_SHARED_LOCK,
758 0 /* wait flag */, 0);
759 if (rc == 0) {
760 rc = CIFSSMBLock(xid, tcon, netfid,
761 length, pfLock->fl_start, 1, 0,
762 lockType |
763 LOCKING_ANDX_SHARED_LOCK,
764 0 /* wait flag */, 0);
765 pfLock->fl_type = F_RDLCK;
766 if (rc != 0)
767 cERROR(1, "Error unlocking "
768 "previously locked range %d "
769 "during test of lock", rc);
770 rc = 0;
771 } else {
772 pfLock->fl_type = F_WRLCK;
773 rc = 0;
778 FreeXid(xid);
779 return rc;
782 if (!numLock && !numUnlock) {
783 /* if no lock or unlock then nothing
784 to do since we do not know what it is */
785 FreeXid(xid);
786 return -EOPNOTSUPP;
789 if (posix_locking) {
790 int posix_lock_type;
791 if (lockType & LOCKING_ANDX_SHARED_LOCK)
792 posix_lock_type = CIFS_RDLCK;
793 else
794 posix_lock_type = CIFS_WRLCK;
796 if (numUnlock == 1)
797 posix_lock_type = CIFS_UNLCK;
799 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
800 length, pfLock,
801 posix_lock_type, wait_flag);
802 } else {
803 struct cifsFileInfo *fid = file->private_data;
805 if (numLock) {
806 rc = CIFSSMBLock(xid, tcon, netfid, length,
807 pfLock->fl_start, 0, numLock, lockType,
808 wait_flag, 0);
810 if (rc == 0) {
811 /* For Windows locks we must store them. */
812 rc = store_file_lock(fid, length,
813 pfLock->fl_start, lockType);
815 } else if (numUnlock) {
816 /* For each stored lock that this unlock overlaps
817 completely, unlock it. */
818 int stored_rc = 0;
819 struct cifsLockInfo *li, *tmp;
821 rc = 0;
822 mutex_lock(&fid->lock_mutex);
823 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
824 if (pfLock->fl_start <= li->offset &&
825 (pfLock->fl_start + length) >=
826 (li->offset + li->length)) {
827 stored_rc = CIFSSMBLock(xid, tcon,
828 netfid, li->length,
829 li->offset, 1, 0,
830 li->type, false, 0);
831 if (stored_rc)
832 rc = stored_rc;
833 else {
834 list_del(&li->llist);
835 kfree(li);
839 mutex_unlock(&fid->lock_mutex);
843 if (pfLock->fl_flags & FL_POSIX)
844 posix_lock_file_wait(file, pfLock);
845 FreeXid(xid);
846 return rc;
849 /* update the file size (if needed) after a write */
850 void
851 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
852 unsigned int bytes_written)
854 loff_t end_of_write = offset + bytes_written;
856 if (end_of_write > cifsi->server_eof)
857 cifsi->server_eof = end_of_write;
860 static ssize_t cifs_write(struct cifsFileInfo *open_file,
861 const char *write_data, size_t write_size,
862 loff_t *poffset)
864 int rc = 0;
865 unsigned int bytes_written = 0;
866 unsigned int total_written;
867 struct cifs_sb_info *cifs_sb;
868 struct cifsTconInfo *pTcon;
869 int xid;
870 struct dentry *dentry = open_file->dentry;
871 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
873 cifs_sb = CIFS_SB(dentry->d_sb);
875 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
876 *poffset, dentry->d_name.name);
878 pTcon = tlink_tcon(open_file->tlink);
880 xid = GetXid();
882 for (total_written = 0; write_size > total_written;
883 total_written += bytes_written) {
884 rc = -EAGAIN;
885 while (rc == -EAGAIN) {
886 struct kvec iov[2];
887 unsigned int len;
889 if (open_file->invalidHandle) {
890 /* we could deadlock if we called
891 filemap_fdatawait from here so tell
892 reopen_file not to flush data to
893 server now */
894 rc = cifs_reopen_file(open_file, false);
895 if (rc != 0)
896 break;
899 len = min((size_t)cifs_sb->wsize,
900 write_size - total_written);
901 /* iov[0] is reserved for smb header */
902 iov[1].iov_base = (char *)write_data + total_written;
903 iov[1].iov_len = len;
904 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
905 *poffset, &bytes_written, iov, 1, 0);
907 if (rc || (bytes_written == 0)) {
908 if (total_written)
909 break;
910 else {
911 FreeXid(xid);
912 return rc;
914 } else {
915 cifs_update_eof(cifsi, *poffset, bytes_written);
916 *poffset += bytes_written;
920 cifs_stats_bytes_written(pTcon, total_written);
922 if (total_written > 0) {
923 spin_lock(&dentry->d_inode->i_lock);
924 if (*poffset > dentry->d_inode->i_size)
925 i_size_write(dentry->d_inode, *poffset);
926 spin_unlock(&dentry->d_inode->i_lock);
928 mark_inode_dirty_sync(dentry->d_inode);
929 FreeXid(xid);
930 return total_written;
933 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
934 bool fsuid_only)
936 struct cifsFileInfo *open_file = NULL;
937 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
939 /* only filter by fsuid on multiuser mounts */
940 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
941 fsuid_only = false;
943 spin_lock(&cifs_file_list_lock);
944 /* we could simply get the first_list_entry since write-only entries
945 are always at the end of the list but since the first entry might
946 have a close pending, we go through the whole list */
947 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
948 if (fsuid_only && open_file->uid != current_fsuid())
949 continue;
950 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
951 if (!open_file->invalidHandle) {
952 /* found a good file */
953 /* lock it so it will not be closed on us */
954 cifsFileInfo_get(open_file);
955 spin_unlock(&cifs_file_list_lock);
956 return open_file;
957 } /* else might as well continue, and look for
958 another, or simply have the caller reopen it
959 again rather than trying to fix this handle */
960 } else /* write only file */
961 break; /* write only files are last so must be done */
963 spin_unlock(&cifs_file_list_lock);
964 return NULL;
967 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
968 bool fsuid_only)
970 struct cifsFileInfo *open_file;
971 struct cifs_sb_info *cifs_sb;
972 bool any_available = false;
973 int rc;
975 /* Having a null inode here (because mapping->host was set to zero by
976 the VFS or MM) should not happen but we had reports of on oops (due to
977 it being zero) during stress testcases so we need to check for it */
979 if (cifs_inode == NULL) {
980 cERROR(1, "Null inode passed to cifs_writeable_file");
981 dump_stack();
982 return NULL;
985 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
987 /* only filter by fsuid on multiuser mounts */
988 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
989 fsuid_only = false;
991 spin_lock(&cifs_file_list_lock);
992 refind_writable:
993 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
994 if (!any_available && open_file->pid != current->tgid)
995 continue;
996 if (fsuid_only && open_file->uid != current_fsuid())
997 continue;
998 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
999 cifsFileInfo_get(open_file);
1001 if (!open_file->invalidHandle) {
1002 /* found a good writable file */
1003 spin_unlock(&cifs_file_list_lock);
1004 return open_file;
1007 spin_unlock(&cifs_file_list_lock);
1009 /* Had to unlock since following call can block */
1010 rc = cifs_reopen_file(open_file, false);
1011 if (!rc)
1012 return open_file;
1014 /* if it fails, try another handle if possible */
1015 cFYI(1, "wp failed on reopen file");
1016 cifsFileInfo_put(open_file);
1018 spin_lock(&cifs_file_list_lock);
1020 /* else we simply continue to the next entry. Thus
1021 we do not loop on reopen errors. If we
1022 can not reopen the file, for example if we
1023 reconnected to a server with another client
1024 racing to delete or lock the file we would not
1025 make progress if we restarted before the beginning
1026 of the loop here. */
1029 /* couldn't find useable FH with same pid, try any available */
1030 if (!any_available) {
1031 any_available = true;
1032 goto refind_writable;
1034 spin_unlock(&cifs_file_list_lock);
1035 return NULL;
1038 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1040 struct address_space *mapping = page->mapping;
1041 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1042 char *write_data;
1043 int rc = -EFAULT;
1044 int bytes_written = 0;
1045 struct inode *inode;
1046 struct cifsFileInfo *open_file;
1048 if (!mapping || !mapping->host)
1049 return -EFAULT;
1051 inode = page->mapping->host;
1053 offset += (loff_t)from;
1054 write_data = kmap(page);
1055 write_data += from;
1057 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1058 kunmap(page);
1059 return -EIO;
1062 /* racing with truncate? */
1063 if (offset > mapping->host->i_size) {
1064 kunmap(page);
1065 return 0; /* don't care */
1068 /* check to make sure that we are not extending the file */
1069 if (mapping->host->i_size - offset < (loff_t)to)
1070 to = (unsigned)(mapping->host->i_size - offset);
1072 open_file = find_writable_file(CIFS_I(mapping->host), false);
1073 if (open_file) {
1074 bytes_written = cifs_write(open_file, write_data,
1075 to - from, &offset);
1076 cifsFileInfo_put(open_file);
1077 /* Does mm or vfs already set times? */
1078 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1079 if ((bytes_written > 0) && (offset))
1080 rc = 0;
1081 else if (bytes_written < 0)
1082 rc = bytes_written;
1083 } else {
1084 cFYI(1, "No writeable filehandles for inode");
1085 rc = -EIO;
1088 kunmap(page);
1089 return rc;
1092 static int cifs_writepages(struct address_space *mapping,
1093 struct writeback_control *wbc)
1095 unsigned int bytes_to_write;
1096 unsigned int bytes_written;
1097 struct cifs_sb_info *cifs_sb;
1098 int done = 0;
1099 pgoff_t end;
1100 pgoff_t index;
1101 int range_whole = 0;
1102 struct kvec *iov;
1103 int len;
1104 int n_iov = 0;
1105 pgoff_t next;
1106 int nr_pages;
1107 __u64 offset = 0;
1108 struct cifsFileInfo *open_file;
1109 struct cifsTconInfo *tcon;
1110 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1111 struct page *page;
1112 struct pagevec pvec;
1113 int rc = 0;
1114 int scanned = 0;
1115 int xid;
1117 cifs_sb = CIFS_SB(mapping->host->i_sb);
1120 * If wsize is smaller that the page cache size, default to writing
1121 * one page at a time via cifs_writepage
1123 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1124 return generic_writepages(mapping, wbc);
1126 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1127 if (iov == NULL)
1128 return generic_writepages(mapping, wbc);
1131 * if there's no open file, then this is likely to fail too,
1132 * but it'll at least handle the return. Maybe it should be
1133 * a BUG() instead?
1135 open_file = find_writable_file(CIFS_I(mapping->host), false);
1136 if (!open_file) {
1137 kfree(iov);
1138 return generic_writepages(mapping, wbc);
1141 tcon = tlink_tcon(open_file->tlink);
1142 cifsFileInfo_put(open_file);
1144 xid = GetXid();
1146 pagevec_init(&pvec, 0);
1147 if (wbc->range_cyclic) {
1148 index = mapping->writeback_index; /* Start from prev offset */
1149 end = -1;
1150 } else {
1151 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1152 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1153 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1154 range_whole = 1;
1155 scanned = 1;
1157 retry:
1158 while (!done && (index <= end) &&
1159 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1160 PAGECACHE_TAG_DIRTY,
1161 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1162 int first;
1163 unsigned int i;
1165 first = -1;
1166 next = 0;
1167 n_iov = 0;
1168 bytes_to_write = 0;
1170 for (i = 0; i < nr_pages; i++) {
1171 page = pvec.pages[i];
1173 * At this point we hold neither mapping->tree_lock nor
1174 * lock on the page itself: the page may be truncated or
1175 * invalidated (changing page->mapping to NULL), or even
1176 * swizzled back from swapper_space to tmpfs file
1177 * mapping
1180 if (first < 0)
1181 lock_page(page);
1182 else if (!trylock_page(page))
1183 break;
1185 if (unlikely(page->mapping != mapping)) {
1186 unlock_page(page);
1187 break;
1190 if (!wbc->range_cyclic && page->index > end) {
1191 done = 1;
1192 unlock_page(page);
1193 break;
1196 if (next && (page->index != next)) {
1197 /* Not next consecutive page */
1198 unlock_page(page);
1199 break;
1202 if (wbc->sync_mode != WB_SYNC_NONE)
1203 wait_on_page_writeback(page);
1205 if (PageWriteback(page) ||
1206 !clear_page_dirty_for_io(page)) {
1207 unlock_page(page);
1208 break;
1212 * This actually clears the dirty bit in the radix tree.
1213 * See cifs_writepage() for more commentary.
1215 set_page_writeback(page);
1217 if (page_offset(page) >= mapping->host->i_size) {
1218 done = 1;
1219 unlock_page(page);
1220 end_page_writeback(page);
1221 break;
1225 * BB can we get rid of this? pages are held by pvec
1227 page_cache_get(page);
1229 len = min(mapping->host->i_size - page_offset(page),
1230 (loff_t)PAGE_CACHE_SIZE);
1232 /* reserve iov[0] for the smb header */
1233 n_iov++;
1234 iov[n_iov].iov_base = kmap(page);
1235 iov[n_iov].iov_len = len;
1236 bytes_to_write += len;
1238 if (first < 0) {
1239 first = i;
1240 offset = page_offset(page);
1242 next = page->index + 1;
1243 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1244 break;
1246 if (n_iov) {
1247 retry_write:
1248 open_file = find_writable_file(CIFS_I(mapping->host),
1249 false);
1250 if (!open_file) {
1251 cERROR(1, "No writable handles for inode");
1252 rc = -EBADF;
1253 } else {
1254 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1255 bytes_to_write, offset,
1256 &bytes_written, iov, n_iov,
1258 cifsFileInfo_put(open_file);
1261 cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
1264 * For now, treat a short write as if nothing got
1265 * written. A zero length write however indicates
1266 * ENOSPC or EFBIG. We have no way to know which
1267 * though, so call it ENOSPC for now. EFBIG would
1268 * get translated to AS_EIO anyway.
1270 * FIXME: make it take into account the data that did
1271 * get written
1273 if (rc == 0) {
1274 if (bytes_written == 0)
1275 rc = -ENOSPC;
1276 else if (bytes_written < bytes_to_write)
1277 rc = -EAGAIN;
1280 /* retry on data-integrity flush */
1281 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
1282 goto retry_write;
1284 /* fix the stats and EOF */
1285 if (bytes_written > 0) {
1286 cifs_stats_bytes_written(tcon, bytes_written);
1287 cifs_update_eof(cifsi, offset, bytes_written);
1290 for (i = 0; i < n_iov; i++) {
1291 page = pvec.pages[first + i];
1292 /* on retryable write error, redirty page */
1293 if (rc == -EAGAIN)
1294 redirty_page_for_writepage(wbc, page);
1295 else if (rc != 0)
1296 SetPageError(page);
1297 kunmap(page);
1298 unlock_page(page);
1299 end_page_writeback(page);
1300 page_cache_release(page);
1303 if (rc != -EAGAIN)
1304 mapping_set_error(mapping, rc);
1305 else
1306 rc = 0;
1308 if ((wbc->nr_to_write -= n_iov) <= 0)
1309 done = 1;
1310 index = next;
1311 } else
1312 /* Need to re-find the pages we skipped */
1313 index = pvec.pages[0]->index + 1;
1315 pagevec_release(&pvec);
1317 if (!scanned && !done) {
1319 * We hit the last page and there is more work to be done: wrap
1320 * back to the start of the file
1322 scanned = 1;
1323 index = 0;
1324 goto retry;
1326 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1327 mapping->writeback_index = index;
1329 FreeXid(xid);
1330 kfree(iov);
1331 return rc;
1334 static int
1335 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1337 int rc;
1338 int xid;
1340 xid = GetXid();
1341 /* BB add check for wbc flags */
1342 page_cache_get(page);
1343 if (!PageUptodate(page))
1344 cFYI(1, "ppw - page not up to date");
1347 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1349 * A writepage() implementation always needs to do either this,
1350 * or re-dirty the page with "redirty_page_for_writepage()" in
1351 * the case of a failure.
1353 * Just unlocking the page will cause the radix tree tag-bits
1354 * to fail to update with the state of the page correctly.
1356 set_page_writeback(page);
1357 retry_write:
1358 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1359 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1360 goto retry_write;
1361 else if (rc == -EAGAIN)
1362 redirty_page_for_writepage(wbc, page);
1363 else if (rc != 0)
1364 SetPageError(page);
1365 else
1366 SetPageUptodate(page);
1367 end_page_writeback(page);
1368 page_cache_release(page);
1369 FreeXid(xid);
1370 return rc;
1373 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1375 int rc = cifs_writepage_locked(page, wbc);
1376 unlock_page(page);
1377 return rc;
1380 static int cifs_write_end(struct file *file, struct address_space *mapping,
1381 loff_t pos, unsigned len, unsigned copied,
1382 struct page *page, void *fsdata)
1384 int rc;
1385 struct inode *inode = mapping->host;
1387 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1388 page, pos, copied);
1390 if (PageChecked(page)) {
1391 if (copied == len)
1392 SetPageUptodate(page);
1393 ClearPageChecked(page);
1394 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1395 SetPageUptodate(page);
1397 if (!PageUptodate(page)) {
1398 char *page_data;
1399 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1400 int xid;
1402 xid = GetXid();
1403 /* this is probably better than directly calling
1404 partialpage_write since in this function the file handle is
1405 known which we might as well leverage */
1406 /* BB check if anything else missing out of ppw
1407 such as updating last write time */
1408 page_data = kmap(page);
1409 rc = cifs_write(file->private_data, page_data + offset,
1410 copied, &pos);
1411 /* if (rc < 0) should we set writebehind rc? */
1412 kunmap(page);
1414 FreeXid(xid);
1415 } else {
1416 rc = copied;
1417 pos += copied;
1418 set_page_dirty(page);
1421 if (rc > 0) {
1422 spin_lock(&inode->i_lock);
1423 if (pos > inode->i_size)
1424 i_size_write(inode, pos);
1425 spin_unlock(&inode->i_lock);
1428 unlock_page(page);
1429 page_cache_release(page);
1431 return rc;
1434 int cifs_strict_fsync(struct file *file, int datasync)
1436 int xid;
1437 int rc = 0;
1438 struct cifsTconInfo *tcon;
1439 struct cifsFileInfo *smbfile = file->private_data;
1440 struct inode *inode = file->f_path.dentry->d_inode;
1441 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1443 xid = GetXid();
1445 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1446 file->f_path.dentry->d_name.name, datasync);
1448 if (!CIFS_I(inode)->clientCanCacheRead) {
1449 rc = cifs_invalidate_mapping(inode);
1450 if (rc) {
1451 cFYI(1, "rc: %d during invalidate phase", rc);
1452 rc = 0; /* don't care about it in fsync */
1456 tcon = tlink_tcon(smbfile->tlink);
1457 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1458 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1460 FreeXid(xid);
1461 return rc;
1464 int cifs_fsync(struct file *file, int datasync)
1466 int xid;
1467 int rc = 0;
1468 struct cifsTconInfo *tcon;
1469 struct cifsFileInfo *smbfile = file->private_data;
1470 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1472 xid = GetXid();
1474 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1475 file->f_path.dentry->d_name.name, datasync);
1477 tcon = tlink_tcon(smbfile->tlink);
1478 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1479 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1481 FreeXid(xid);
1482 return rc;
1486 * As file closes, flush all cached write data for this inode checking
1487 * for write behind errors.
1489 int cifs_flush(struct file *file, fl_owner_t id)
1491 struct inode *inode = file->f_path.dentry->d_inode;
1492 int rc = 0;
1494 if (file->f_mode & FMODE_WRITE)
1495 rc = filemap_write_and_wait(inode->i_mapping);
1497 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1499 return rc;
1502 static int
1503 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1505 int rc = 0;
1506 unsigned long i;
1508 for (i = 0; i < num_pages; i++) {
1509 pages[i] = alloc_page(__GFP_HIGHMEM);
1510 if (!pages[i]) {
1512 * save number of pages we have already allocated and
1513 * return with ENOMEM error
1515 num_pages = i;
1516 rc = -ENOMEM;
1517 goto error;
1521 return rc;
1523 error:
1524 for (i = 0; i < num_pages; i++)
1525 put_page(pages[i]);
1526 return rc;
1529 static inline
1530 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1532 size_t num_pages;
1533 size_t clen;
1535 clen = min_t(const size_t, len, wsize);
1536 num_pages = clen / PAGE_CACHE_SIZE;
1537 if (clen % PAGE_CACHE_SIZE)
1538 num_pages++;
1540 if (cur_len)
1541 *cur_len = clen;
1543 return num_pages;
1546 static ssize_t
1547 cifs_iovec_write(struct file *file, const struct iovec *iov,
1548 unsigned long nr_segs, loff_t *poffset)
1550 unsigned int written;
1551 unsigned long num_pages, npages, i;
1552 size_t copied, len, cur_len;
1553 ssize_t total_written = 0;
1554 struct kvec *to_send;
1555 struct page **pages;
1556 struct iov_iter it;
1557 struct inode *inode;
1558 struct cifsFileInfo *open_file;
1559 struct cifsTconInfo *pTcon;
1560 struct cifs_sb_info *cifs_sb;
1561 int xid, rc;
1563 len = iov_length(iov, nr_segs);
1564 if (!len)
1565 return 0;
1567 rc = generic_write_checks(file, poffset, &len, 0);
1568 if (rc)
1569 return rc;
1571 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1572 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1574 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1575 if (!pages)
1576 return -ENOMEM;
1578 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1579 if (!to_send) {
1580 kfree(pages);
1581 return -ENOMEM;
1584 rc = cifs_write_allocate_pages(pages, num_pages);
1585 if (rc) {
1586 kfree(pages);
1587 kfree(to_send);
1588 return rc;
1591 xid = GetXid();
1592 open_file = file->private_data;
1593 pTcon = tlink_tcon(open_file->tlink);
1594 inode = file->f_path.dentry->d_inode;
1596 iov_iter_init(&it, iov, nr_segs, len, 0);
1597 npages = num_pages;
1599 do {
1600 size_t save_len = cur_len;
1601 for (i = 0; i < npages; i++) {
1602 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1603 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1604 copied);
1605 cur_len -= copied;
1606 iov_iter_advance(&it, copied);
1607 to_send[i+1].iov_base = kmap(pages[i]);
1608 to_send[i+1].iov_len = copied;
1611 cur_len = save_len - cur_len;
1613 do {
1614 if (open_file->invalidHandle) {
1615 rc = cifs_reopen_file(open_file, false);
1616 if (rc != 0)
1617 break;
1619 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
1620 cur_len, *poffset, &written,
1621 to_send, npages, 0);
1622 } while (rc == -EAGAIN);
1624 for (i = 0; i < npages; i++)
1625 kunmap(pages[i]);
1627 if (written) {
1628 len -= written;
1629 total_written += written;
1630 cifs_update_eof(CIFS_I(inode), *poffset, written);
1631 *poffset += written;
1632 } else if (rc < 0) {
1633 if (!total_written)
1634 total_written = rc;
1635 break;
1638 /* get length and number of kvecs of the next write */
1639 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1640 } while (len > 0);
1642 if (total_written > 0) {
1643 spin_lock(&inode->i_lock);
1644 if (*poffset > inode->i_size)
1645 i_size_write(inode, *poffset);
1646 spin_unlock(&inode->i_lock);
1649 cifs_stats_bytes_written(pTcon, total_written);
1650 mark_inode_dirty_sync(inode);
1652 for (i = 0; i < num_pages; i++)
1653 put_page(pages[i]);
1654 kfree(to_send);
1655 kfree(pages);
1656 FreeXid(xid);
1657 return total_written;
1660 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1661 unsigned long nr_segs, loff_t pos)
1663 ssize_t written;
1664 struct inode *inode;
1666 inode = iocb->ki_filp->f_path.dentry->d_inode;
1669 * BB - optimize the way when signing is disabled. We can drop this
1670 * extra memory-to-memory copying and use iovec buffers for constructing
1671 * write request.
1674 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1675 if (written > 0) {
1676 CIFS_I(inode)->invalid_mapping = true;
1677 iocb->ki_pos = pos;
1680 return written;
1683 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1684 unsigned long nr_segs, loff_t pos)
1686 struct inode *inode;
1688 inode = iocb->ki_filp->f_path.dentry->d_inode;
1690 if (CIFS_I(inode)->clientCanCacheAll)
1691 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1694 * In strict cache mode we need to write the data to the server exactly
1695 * from the pos to pos+len-1 rather than flush all affected pages
1696 * because it may cause a error with mandatory locks on these pages but
1697 * not on the region from pos to ppos+len-1.
1700 return cifs_user_writev(iocb, iov, nr_segs, pos);
1703 static ssize_t
1704 cifs_iovec_read(struct file *file, const struct iovec *iov,
1705 unsigned long nr_segs, loff_t *poffset)
1707 int rc;
1708 int xid;
1709 ssize_t total_read;
1710 unsigned int bytes_read = 0;
1711 size_t len, cur_len;
1712 int iov_offset = 0;
1713 struct cifs_sb_info *cifs_sb;
1714 struct cifsTconInfo *pTcon;
1715 struct cifsFileInfo *open_file;
1716 struct smb_com_read_rsp *pSMBr;
1717 char *read_data;
1719 if (!nr_segs)
1720 return 0;
1722 len = iov_length(iov, nr_segs);
1723 if (!len)
1724 return 0;
1726 xid = GetXid();
1727 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1729 open_file = file->private_data;
1730 pTcon = tlink_tcon(open_file->tlink);
1732 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1733 cFYI(1, "attempting read on write only file instance");
1735 for (total_read = 0; total_read < len; total_read += bytes_read) {
1736 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1737 rc = -EAGAIN;
1738 read_data = NULL;
1740 while (rc == -EAGAIN) {
1741 int buf_type = CIFS_NO_BUFFER;
1742 if (open_file->invalidHandle) {
1743 rc = cifs_reopen_file(open_file, true);
1744 if (rc != 0)
1745 break;
1747 rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
1748 cur_len, *poffset, &bytes_read,
1749 &read_data, &buf_type);
1750 pSMBr = (struct smb_com_read_rsp *)read_data;
1751 if (read_data) {
1752 char *data_offset = read_data + 4 +
1753 le16_to_cpu(pSMBr->DataOffset);
1754 if (memcpy_toiovecend(iov, data_offset,
1755 iov_offset, bytes_read))
1756 rc = -EFAULT;
1757 if (buf_type == CIFS_SMALL_BUFFER)
1758 cifs_small_buf_release(read_data);
1759 else if (buf_type == CIFS_LARGE_BUFFER)
1760 cifs_buf_release(read_data);
1761 read_data = NULL;
1762 iov_offset += bytes_read;
1766 if (rc || (bytes_read == 0)) {
1767 if (total_read) {
1768 break;
1769 } else {
1770 FreeXid(xid);
1771 return rc;
1773 } else {
1774 cifs_stats_bytes_read(pTcon, bytes_read);
1775 *poffset += bytes_read;
1779 FreeXid(xid);
1780 return total_read;
1783 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1784 unsigned long nr_segs, loff_t pos)
1786 ssize_t read;
1788 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1789 if (read > 0)
1790 iocb->ki_pos = pos;
1792 return read;
1795 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1796 unsigned long nr_segs, loff_t pos)
1798 struct inode *inode;
1800 inode = iocb->ki_filp->f_path.dentry->d_inode;
1802 if (CIFS_I(inode)->clientCanCacheRead)
1803 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1806 * In strict cache mode we need to read from the server all the time
1807 * if we don't have level II oplock because the server can delay mtime
1808 * change - so we can't make a decision about inode invalidating.
1809 * And we can also fail with pagereading if there are mandatory locks
1810 * on pages affected by this read but not on the region from pos to
1811 * pos+len-1.
1814 return cifs_user_readv(iocb, iov, nr_segs, pos);
1817 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1818 loff_t *poffset)
1820 int rc = -EACCES;
1821 unsigned int bytes_read = 0;
1822 unsigned int total_read;
1823 unsigned int current_read_size;
1824 struct cifs_sb_info *cifs_sb;
1825 struct cifsTconInfo *pTcon;
1826 int xid;
1827 char *current_offset;
1828 struct cifsFileInfo *open_file;
1829 int buf_type = CIFS_NO_BUFFER;
1831 xid = GetXid();
1832 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1834 if (file->private_data == NULL) {
1835 rc = -EBADF;
1836 FreeXid(xid);
1837 return rc;
1839 open_file = file->private_data;
1840 pTcon = tlink_tcon(open_file->tlink);
1842 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1843 cFYI(1, "attempting read on write only file instance");
1845 for (total_read = 0, current_offset = read_data;
1846 read_size > total_read;
1847 total_read += bytes_read, current_offset += bytes_read) {
1848 current_read_size = min_t(const int, read_size - total_read,
1849 cifs_sb->rsize);
1850 /* For windows me and 9x we do not want to request more
1851 than it negotiated since it will refuse the read then */
1852 if ((pTcon->ses) &&
1853 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1854 current_read_size = min_t(const int, current_read_size,
1855 pTcon->ses->server->maxBuf - 128);
1857 rc = -EAGAIN;
1858 while (rc == -EAGAIN) {
1859 if (open_file->invalidHandle) {
1860 rc = cifs_reopen_file(open_file, true);
1861 if (rc != 0)
1862 break;
1864 rc = CIFSSMBRead(xid, pTcon,
1865 open_file->netfid,
1866 current_read_size, *poffset,
1867 &bytes_read, &current_offset,
1868 &buf_type);
1870 if (rc || (bytes_read == 0)) {
1871 if (total_read) {
1872 break;
1873 } else {
1874 FreeXid(xid);
1875 return rc;
1877 } else {
1878 cifs_stats_bytes_read(pTcon, total_read);
1879 *poffset += bytes_read;
1882 FreeXid(xid);
1883 return total_read;
1887 * If the page is mmap'ed into a process' page tables, then we need to make
1888 * sure that it doesn't change while being written back.
1890 static int
1891 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1893 struct page *page = vmf->page;
1895 lock_page(page);
1896 return VM_FAULT_LOCKED;
1899 static struct vm_operations_struct cifs_file_vm_ops = {
1900 .fault = filemap_fault,
1901 .page_mkwrite = cifs_page_mkwrite,
1904 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1906 int rc, xid;
1907 struct inode *inode = file->f_path.dentry->d_inode;
1909 xid = GetXid();
1911 if (!CIFS_I(inode)->clientCanCacheRead) {
1912 rc = cifs_invalidate_mapping(inode);
1913 if (rc)
1914 return rc;
1917 rc = generic_file_mmap(file, vma);
1918 if (rc == 0)
1919 vma->vm_ops = &cifs_file_vm_ops;
1920 FreeXid(xid);
1921 return rc;
1924 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1926 int rc, xid;
1928 xid = GetXid();
1929 rc = cifs_revalidate_file(file);
1930 if (rc) {
1931 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1932 FreeXid(xid);
1933 return rc;
1935 rc = generic_file_mmap(file, vma);
1936 if (rc == 0)
1937 vma->vm_ops = &cifs_file_vm_ops;
1938 FreeXid(xid);
1939 return rc;
1943 static void cifs_copy_cache_pages(struct address_space *mapping,
1944 struct list_head *pages, int bytes_read, char *data)
1946 struct page *page;
1947 char *target;
1949 while (bytes_read > 0) {
1950 if (list_empty(pages))
1951 break;
1953 page = list_entry(pages->prev, struct page, lru);
1954 list_del(&page->lru);
1956 if (add_to_page_cache_lru(page, mapping, page->index,
1957 GFP_KERNEL)) {
1958 page_cache_release(page);
1959 cFYI(1, "Add page cache failed");
1960 data += PAGE_CACHE_SIZE;
1961 bytes_read -= PAGE_CACHE_SIZE;
1962 continue;
1964 page_cache_release(page);
1966 target = kmap_atomic(page, KM_USER0);
1968 if (PAGE_CACHE_SIZE > bytes_read) {
1969 memcpy(target, data, bytes_read);
1970 /* zero the tail end of this partial page */
1971 memset(target + bytes_read, 0,
1972 PAGE_CACHE_SIZE - bytes_read);
1973 bytes_read = 0;
1974 } else {
1975 memcpy(target, data, PAGE_CACHE_SIZE);
1976 bytes_read -= PAGE_CACHE_SIZE;
1978 kunmap_atomic(target, KM_USER0);
1980 flush_dcache_page(page);
1981 SetPageUptodate(page);
1982 unlock_page(page);
1983 data += PAGE_CACHE_SIZE;
1985 /* add page to FS-Cache */
1986 cifs_readpage_to_fscache(mapping->host, page);
1988 return;
1991 static int cifs_readpages(struct file *file, struct address_space *mapping,
1992 struct list_head *page_list, unsigned num_pages)
1994 int rc = -EACCES;
1995 int xid;
1996 loff_t offset;
1997 struct page *page;
1998 struct cifs_sb_info *cifs_sb;
1999 struct cifsTconInfo *pTcon;
2000 unsigned int bytes_read = 0;
2001 unsigned int read_size, i;
2002 char *smb_read_data = NULL;
2003 struct smb_com_read_rsp *pSMBr;
2004 struct cifsFileInfo *open_file;
2005 int buf_type = CIFS_NO_BUFFER;
2007 xid = GetXid();
2008 if (file->private_data == NULL) {
2009 rc = -EBADF;
2010 FreeXid(xid);
2011 return rc;
2013 open_file = file->private_data;
2014 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2015 pTcon = tlink_tcon(open_file->tlink);
2018 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2019 * immediately if the cookie is negative
2021 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2022 &num_pages);
2023 if (rc == 0)
2024 goto read_complete;
2026 cFYI(DBG2, "rpages: num pages %d", num_pages);
2027 for (i = 0; i < num_pages; ) {
2028 unsigned contig_pages;
2029 struct page *tmp_page;
2030 unsigned long expected_index;
2032 if (list_empty(page_list))
2033 break;
2035 page = list_entry(page_list->prev, struct page, lru);
2036 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2038 /* count adjacent pages that we will read into */
2039 contig_pages = 0;
2040 expected_index =
2041 list_entry(page_list->prev, struct page, lru)->index;
2042 list_for_each_entry_reverse(tmp_page, page_list, lru) {
2043 if (tmp_page->index == expected_index) {
2044 contig_pages++;
2045 expected_index++;
2046 } else
2047 break;
2049 if (contig_pages + i > num_pages)
2050 contig_pages = num_pages - i;
2052 /* for reads over a certain size could initiate async
2053 read ahead */
2055 read_size = contig_pages * PAGE_CACHE_SIZE;
2056 /* Read size needs to be in multiples of one page */
2057 read_size = min_t(const unsigned int, read_size,
2058 cifs_sb->rsize & PAGE_CACHE_MASK);
2059 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2060 read_size, contig_pages);
2061 rc = -EAGAIN;
2062 while (rc == -EAGAIN) {
2063 if (open_file->invalidHandle) {
2064 rc = cifs_reopen_file(open_file, true);
2065 if (rc != 0)
2066 break;
2069 rc = CIFSSMBRead(xid, pTcon,
2070 open_file->netfid,
2071 read_size, offset,
2072 &bytes_read, &smb_read_data,
2073 &buf_type);
2074 /* BB more RC checks ? */
2075 if (rc == -EAGAIN) {
2076 if (smb_read_data) {
2077 if (buf_type == CIFS_SMALL_BUFFER)
2078 cifs_small_buf_release(smb_read_data);
2079 else if (buf_type == CIFS_LARGE_BUFFER)
2080 cifs_buf_release(smb_read_data);
2081 smb_read_data = NULL;
2085 if ((rc < 0) || (smb_read_data == NULL)) {
2086 cFYI(1, "Read error in readpages: %d", rc);
2087 break;
2088 } else if (bytes_read > 0) {
2089 task_io_account_read(bytes_read);
2090 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2091 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2092 smb_read_data + 4 /* RFC1001 hdr */ +
2093 le16_to_cpu(pSMBr->DataOffset));
2095 i += bytes_read >> PAGE_CACHE_SHIFT;
2096 cifs_stats_bytes_read(pTcon, bytes_read);
2097 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2098 i++; /* account for partial page */
2100 /* server copy of file can have smaller size
2101 than client */
2102 /* BB do we need to verify this common case ?
2103 this case is ok - if we are at server EOF
2104 we will hit it on next read */
2106 /* break; */
2108 } else {
2109 cFYI(1, "No bytes read (%d) at offset %lld . "
2110 "Cleaning remaining pages from readahead list",
2111 bytes_read, offset);
2112 /* BB turn off caching and do new lookup on
2113 file size at server? */
2114 break;
2116 if (smb_read_data) {
2117 if (buf_type == CIFS_SMALL_BUFFER)
2118 cifs_small_buf_release(smb_read_data);
2119 else if (buf_type == CIFS_LARGE_BUFFER)
2120 cifs_buf_release(smb_read_data);
2121 smb_read_data = NULL;
2123 bytes_read = 0;
2126 /* need to free smb_read_data buf before exit */
2127 if (smb_read_data) {
2128 if (buf_type == CIFS_SMALL_BUFFER)
2129 cifs_small_buf_release(smb_read_data);
2130 else if (buf_type == CIFS_LARGE_BUFFER)
2131 cifs_buf_release(smb_read_data);
2132 smb_read_data = NULL;
2135 read_complete:
2136 FreeXid(xid);
2137 return rc;
2140 static int cifs_readpage_worker(struct file *file, struct page *page,
2141 loff_t *poffset)
2143 char *read_data;
2144 int rc;
2146 /* Is the page cached? */
2147 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2148 if (rc == 0)
2149 goto read_complete;
2151 page_cache_get(page);
2152 read_data = kmap(page);
2153 /* for reads over a certain size could initiate async read ahead */
2155 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2157 if (rc < 0)
2158 goto io_error;
2159 else
2160 cFYI(1, "Bytes read %d", rc);
2162 file->f_path.dentry->d_inode->i_atime =
2163 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2165 if (PAGE_CACHE_SIZE > rc)
2166 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2168 flush_dcache_page(page);
2169 SetPageUptodate(page);
2171 /* send this page to the cache */
2172 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2174 rc = 0;
2176 io_error:
2177 kunmap(page);
2178 page_cache_release(page);
2180 read_complete:
2181 return rc;
2184 static int cifs_readpage(struct file *file, struct page *page)
2186 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2187 int rc = -EACCES;
2188 int xid;
2190 xid = GetXid();
2192 if (file->private_data == NULL) {
2193 rc = -EBADF;
2194 FreeXid(xid);
2195 return rc;
2198 cFYI(1, "readpage %p at offset %d 0x%x\n",
2199 page, (int)offset, (int)offset);
2201 rc = cifs_readpage_worker(file, page, &offset);
2203 unlock_page(page);
2205 FreeXid(xid);
2206 return rc;
2209 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2211 struct cifsFileInfo *open_file;
2213 spin_lock(&cifs_file_list_lock);
2214 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2215 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2216 spin_unlock(&cifs_file_list_lock);
2217 return 1;
2220 spin_unlock(&cifs_file_list_lock);
2221 return 0;
2224 /* We do not want to update the file size from server for inodes
2225 open for write - to avoid races with writepage extending
2226 the file - in the future we could consider allowing
2227 refreshing the inode only on increases in the file size
2228 but this is tricky to do without racing with writebehind
2229 page caching in the current Linux kernel design */
2230 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2232 if (!cifsInode)
2233 return true;
2235 if (is_inode_writable(cifsInode)) {
2236 /* This inode is open for write at least once */
2237 struct cifs_sb_info *cifs_sb;
2239 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2240 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2241 /* since no page cache to corrupt on directio
2242 we can change size safely */
2243 return true;
2246 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2247 return true;
2249 return false;
2250 } else
2251 return true;
2254 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2255 loff_t pos, unsigned len, unsigned flags,
2256 struct page **pagep, void **fsdata)
2258 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2259 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2260 loff_t page_start = pos & PAGE_MASK;
2261 loff_t i_size;
2262 struct page *page;
2263 int rc = 0;
2265 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2267 page = grab_cache_page_write_begin(mapping, index, flags);
2268 if (!page) {
2269 rc = -ENOMEM;
2270 goto out;
2273 if (PageUptodate(page))
2274 goto out;
2277 * If we write a full page it will be up to date, no need to read from
2278 * the server. If the write is short, we'll end up doing a sync write
2279 * instead.
2281 if (len == PAGE_CACHE_SIZE)
2282 goto out;
2285 * optimize away the read when we have an oplock, and we're not
2286 * expecting to use any of the data we'd be reading in. That
2287 * is, when the page lies beyond the EOF, or straddles the EOF
2288 * and the write will cover all of the existing data.
2290 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2291 i_size = i_size_read(mapping->host);
2292 if (page_start >= i_size ||
2293 (offset == 0 && (pos + len) >= i_size)) {
2294 zero_user_segments(page, 0, offset,
2295 offset + len,
2296 PAGE_CACHE_SIZE);
2298 * PageChecked means that the parts of the page
2299 * to which we're not writing are considered up
2300 * to date. Once the data is copied to the
2301 * page, it can be set uptodate.
2303 SetPageChecked(page);
2304 goto out;
2308 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2310 * might as well read a page, it is fast enough. If we get
2311 * an error, we don't need to return it. cifs_write_end will
2312 * do a sync write instead since PG_uptodate isn't set.
2314 cifs_readpage_worker(file, page, &page_start);
2315 } else {
2316 /* we could try using another file handle if there is one -
2317 but how would we lock it to prevent close of that handle
2318 racing with this read? In any case
2319 this will be written out by write_end so is fine */
2321 out:
2322 *pagep = page;
2323 return rc;
2326 static int cifs_release_page(struct page *page, gfp_t gfp)
2328 if (PagePrivate(page))
2329 return 0;
2331 return cifs_fscache_release_page(page, gfp);
2334 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2336 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2338 if (offset == 0)
2339 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2342 static int cifs_launder_page(struct page *page)
2344 int rc = 0;
2345 loff_t range_start = page_offset(page);
2346 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2347 struct writeback_control wbc = {
2348 .sync_mode = WB_SYNC_ALL,
2349 .nr_to_write = 0,
2350 .range_start = range_start,
2351 .range_end = range_end,
2354 cFYI(1, "Launder page: %p", page);
2356 if (clear_page_dirty_for_io(page))
2357 rc = cifs_writepage_locked(page, &wbc);
2359 cifs_fscache_invalidate_page(page, page->mapping->host);
2360 return rc;
2363 void cifs_oplock_break(struct work_struct *work)
2365 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2366 oplock_break);
2367 struct inode *inode = cfile->dentry->d_inode;
2368 struct cifsInodeInfo *cinode = CIFS_I(inode);
2369 int rc = 0;
2371 if (inode && S_ISREG(inode->i_mode)) {
2372 if (cinode->clientCanCacheRead)
2373 break_lease(inode, O_RDONLY);
2374 else
2375 break_lease(inode, O_WRONLY);
2376 rc = filemap_fdatawrite(inode->i_mapping);
2377 if (cinode->clientCanCacheRead == 0) {
2378 rc = filemap_fdatawait(inode->i_mapping);
2379 mapping_set_error(inode->i_mapping, rc);
2380 invalidate_remote_inode(inode);
2382 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2386 * releasing stale oplock after recent reconnect of smb session using
2387 * a now incorrect file handle is not a data integrity issue but do
2388 * not bother sending an oplock release if session to server still is
2389 * disconnected since oplock already released by the server
2391 if (!cfile->oplock_break_cancelled) {
2392 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2393 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2394 cinode->clientCanCacheRead ? 1 : 0);
2395 cFYI(1, "Oplock release rc = %d", rc);
2399 * We might have kicked in before is_valid_oplock_break()
2400 * finished grabbing reference for us. Make sure it's done by
2401 * waiting for cifs_file_list_lock.
2403 spin_lock(&cifs_file_list_lock);
2404 spin_unlock(&cifs_file_list_lock);
2406 cifs_oplock_break_put(cfile);
2409 /* must be called while holding cifs_file_list_lock */
2410 void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2412 cifs_sb_active(cfile->dentry->d_sb);
2413 cifsFileInfo_get(cfile);
2416 void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2418 struct super_block *sb = cfile->dentry->d_sb;
2420 cifsFileInfo_put(cfile);
2421 cifs_sb_deactive(sb);
2424 const struct address_space_operations cifs_addr_ops = {
2425 .readpage = cifs_readpage,
2426 .readpages = cifs_readpages,
2427 .writepage = cifs_writepage,
2428 .writepages = cifs_writepages,
2429 .write_begin = cifs_write_begin,
2430 .write_end = cifs_write_end,
2431 .set_page_dirty = __set_page_dirty_nobuffers,
2432 .releasepage = cifs_release_page,
2433 .invalidatepage = cifs_invalidate_page,
2434 .launder_page = cifs_launder_page,
2438 * cifs_readpages requires the server to support a buffer large enough to
2439 * contain the header plus one complete page of data. Otherwise, we need
2440 * to leave cifs_readpages out of the address space operations.
2442 const struct address_space_operations cifs_addr_ops_smallbuf = {
2443 .readpage = cifs_readpage,
2444 .writepage = cifs_writepage,
2445 .writepages = cifs_writepages,
2446 .write_begin = cifs_write_begin,
2447 .write_end = cifs_write_end,
2448 .set_page_dirty = __set_page_dirty_nobuffers,
2449 .releasepage = cifs_release_page,
2450 .invalidatepage = cifs_invalidate_page,
2451 .launder_page = cifs_launder_page,