Add support for Future Electronics FWBADAPT-7WVGA expansion board
[wandboard.git] / fs / cifs / file.c
blob9040cb0695cdc7cc6a7db7bf97048dd245664a78
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
45 static inline int cifs_convert_flags(unsigned int flags)
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
63 static u32 cifs_posix_convert_flags(unsigned int flags)
65 u32 posix_flags = 0;
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 posix_flags = SMB_O_RDONLY;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
81 if (flags & O_DSYNC)
82 posix_flags |= SMB_O_SYNC;
83 if (flags & O_DIRECTORY)
84 posix_flags |= SMB_O_DIRECTORY;
85 if (flags & O_NOFOLLOW)
86 posix_flags |= SMB_O_NOFOLLOW;
87 if (flags & O_DIRECT)
88 posix_flags |= SMB_O_DIRECT;
90 return posix_flags;
93 static inline int cifs_get_disposition(unsigned int flags)
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
103 else
104 return FILE_OPEN;
107 int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
117 struct cifs_tcon *tcon;
119 cFYI(1, "posix open %s", full_path);
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
141 if (rc)
142 goto posix_open_ret;
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
164 posix_open_ret:
165 kfree(presp_data);
166 return rc;
169 static int
170 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
172 __u16 *pnetfid, int xid)
174 int rc;
175 int desiredAccess;
176 int disposition;
177 FILE_ALL_INFO *buf;
179 desiredAccess = cifs_convert_flags(f_flags);
181 /*********************************************************************
182 * open flag mapping table:
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
205 disposition = cifs_get_disposition(f_flags);
207 /* BB pass O_SYNC flag through on file attributes .. BB */
209 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 if (!buf)
211 return -ENOMEM;
213 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else
219 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
224 if (rc)
225 goto out;
227 if (tcon->unix_ext)
228 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 xid);
230 else
231 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 xid, pnetfid);
234 out:
235 kfree(buf);
236 return rc;
239 struct cifsFileInfo *
240 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 struct tcon_link *tlink, __u32 oplock)
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 struct cifsFileInfo *pCifsFile;
248 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 if (pCifsFile == NULL)
250 return pCifsFile;
252 pCifsFile->count = 1;
253 pCifsFile->netfid = fileHandle;
254 pCifsFile->pid = current->tgid;
255 pCifsFile->uid = current_fsuid();
256 pCifsFile->dentry = dget(dentry);
257 pCifsFile->f_flags = file->f_flags;
258 pCifsFile->invalidHandle = false;
259 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
265 spin_lock(&cifs_file_list_lock);
266 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 else
271 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
272 spin_unlock(&cifs_file_list_lock);
274 cifs_set_oplock_level(pCifsInode, oplock);
276 file->private_data = pCifsFile;
277 return pCifsFile;
281 * Release a reference on the file private data. This may involve closing
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
285 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
287 struct inode *inode = cifs_file->dentry->d_inode;
288 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
291 struct cifsLockInfo *li, *tmp;
293 spin_lock(&cifs_file_list_lock);
294 if (--cifs_file->count > 0) {
295 spin_unlock(&cifs_file_list_lock);
296 return;
299 /* remove it from the lists */
300 list_del(&cifs_file->flist);
301 list_del(&cifs_file->tlist);
303 if (list_empty(&cifsi->openFileList)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file->dentry->d_inode);
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 CIFS_I(inode)->invalid_mapping = true;
313 cifs_set_oplock_level(cifsi, 0);
315 spin_unlock(&cifs_file_list_lock);
317 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
318 int xid, rc;
320 xid = GetXid();
321 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
322 FreeXid(xid);
325 /* Delete any outstanding lock records. We'll lose them when the file
326 * is closed anyway.
328 mutex_lock(&cifs_file->lock_mutex);
329 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
330 list_del(&li->llist);
331 kfree(li);
333 mutex_unlock(&cifs_file->lock_mutex);
335 cifs_put_tlink(cifs_file->tlink);
336 dput(cifs_file->dentry);
337 kfree(cifs_file);
340 int cifs_open(struct inode *inode, struct file *file)
342 int rc = -EACCES;
343 int xid;
344 __u32 oplock;
345 struct cifs_sb_info *cifs_sb;
346 struct cifs_tcon *tcon;
347 struct tcon_link *tlink;
348 struct cifsFileInfo *pCifsFile = NULL;
349 char *full_path = NULL;
350 bool posix_open_ok = false;
351 __u16 netfid;
353 xid = GetXid();
355 cifs_sb = CIFS_SB(inode->i_sb);
356 tlink = cifs_sb_tlink(cifs_sb);
357 if (IS_ERR(tlink)) {
358 FreeXid(xid);
359 return PTR_ERR(tlink);
361 tcon = tlink_tcon(tlink);
363 full_path = build_path_from_dentry(file->f_path.dentry);
364 if (full_path == NULL) {
365 rc = -ENOMEM;
366 goto out;
369 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
370 inode, file->f_flags, full_path);
372 if (oplockEnabled)
373 oplock = REQ_OPLOCK;
374 else
375 oplock = 0;
377 if (!tcon->broken_posix_open && tcon->unix_ext &&
378 (tcon->ses->capabilities & CAP_UNIX) &&
379 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
380 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
381 /* can not refresh inode info since size could be stale */
382 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
383 cifs_sb->mnt_file_mode /* ignored */,
384 file->f_flags, &oplock, &netfid, xid);
385 if (rc == 0) {
386 cFYI(1, "posix open succeeded");
387 posix_open_ok = true;
388 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
389 if (tcon->ses->serverNOS)
390 cERROR(1, "server %s of type %s returned"
391 " unexpected error on SMB posix open"
392 ", disabling posix open support."
393 " Check if server update available.",
394 tcon->ses->serverName,
395 tcon->ses->serverNOS);
396 tcon->broken_posix_open = true;
397 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
398 (rc != -EOPNOTSUPP)) /* path not found or net err */
399 goto out;
400 /* else fallthrough to retry open the old way on network i/o
401 or DFS errors */
404 if (!posix_open_ok) {
405 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
406 file->f_flags, &oplock, &netfid, xid);
407 if (rc)
408 goto out;
411 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
412 if (pCifsFile == NULL) {
413 CIFSSMBClose(xid, tcon, netfid);
414 rc = -ENOMEM;
415 goto out;
418 cifs_fscache_set_inode_cookie(inode, file);
420 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
421 /* time to set mode which we can not set earlier due to
422 problems creating new read-only files */
423 struct cifs_unix_set_info_args args = {
424 .mode = inode->i_mode,
425 .uid = NO_CHANGE_64,
426 .gid = NO_CHANGE_64,
427 .ctime = NO_CHANGE_64,
428 .atime = NO_CHANGE_64,
429 .mtime = NO_CHANGE_64,
430 .device = 0,
432 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
433 pCifsFile->pid);
436 out:
437 kfree(full_path);
438 FreeXid(xid);
439 cifs_put_tlink(tlink);
440 return rc;
443 /* Try to reacquire byte range locks that were released when session */
444 /* to server was lost */
445 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
447 int rc = 0;
449 /* BB list all locks open on this file and relock */
451 return rc;
454 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
456 int rc = -EACCES;
457 int xid;
458 __u32 oplock;
459 struct cifs_sb_info *cifs_sb;
460 struct cifs_tcon *tcon;
461 struct cifsInodeInfo *pCifsInode;
462 struct inode *inode;
463 char *full_path = NULL;
464 int desiredAccess;
465 int disposition = FILE_OPEN;
466 __u16 netfid;
468 xid = GetXid();
469 mutex_lock(&pCifsFile->fh_mutex);
470 if (!pCifsFile->invalidHandle) {
471 mutex_unlock(&pCifsFile->fh_mutex);
472 rc = 0;
473 FreeXid(xid);
474 return rc;
477 inode = pCifsFile->dentry->d_inode;
478 cifs_sb = CIFS_SB(inode->i_sb);
479 tcon = tlink_tcon(pCifsFile->tlink);
481 /* can not grab rename sem here because various ops, including
482 those that already have the rename sem can end up causing writepage
483 to get called and if the server was down that means we end up here,
484 and we can never tell if the caller already has the rename_sem */
485 full_path = build_path_from_dentry(pCifsFile->dentry);
486 if (full_path == NULL) {
487 rc = -ENOMEM;
488 mutex_unlock(&pCifsFile->fh_mutex);
489 FreeXid(xid);
490 return rc;
493 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
494 inode, pCifsFile->f_flags, full_path);
496 if (oplockEnabled)
497 oplock = REQ_OPLOCK;
498 else
499 oplock = 0;
501 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
502 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
503 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
506 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
507 * original open. Must mask them off for a reopen.
509 unsigned int oflags = pCifsFile->f_flags &
510 ~(O_CREAT | O_EXCL | O_TRUNC);
512 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
513 cifs_sb->mnt_file_mode /* ignored */,
514 oflags, &oplock, &netfid, xid);
515 if (rc == 0) {
516 cFYI(1, "posix reopen succeeded");
517 goto reopen_success;
519 /* fallthrough to retry open the old way on errors, especially
520 in the reconnect path it is important to retry hard */
523 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
525 /* Can not refresh inode by passing in file_info buf to be returned
526 by SMBOpen and then calling get_inode_info with returned buf
527 since file might have write behind data that needs to be flushed
528 and server version of file size can be stale. If we knew for sure
529 that inode was not dirty locally we could do this */
531 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
532 CREATE_NOT_DIR, &netfid, &oplock, NULL,
533 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
534 CIFS_MOUNT_MAP_SPECIAL_CHR);
535 if (rc) {
536 mutex_unlock(&pCifsFile->fh_mutex);
537 cFYI(1, "cifs_open returned 0x%x", rc);
538 cFYI(1, "oplock: %d", oplock);
539 goto reopen_error_exit;
542 reopen_success:
543 pCifsFile->netfid = netfid;
544 pCifsFile->invalidHandle = false;
545 mutex_unlock(&pCifsFile->fh_mutex);
546 pCifsInode = CIFS_I(inode);
548 if (can_flush) {
549 rc = filemap_write_and_wait(inode->i_mapping);
550 mapping_set_error(inode->i_mapping, rc);
552 if (tcon->unix_ext)
553 rc = cifs_get_inode_info_unix(&inode,
554 full_path, inode->i_sb, xid);
555 else
556 rc = cifs_get_inode_info(&inode,
557 full_path, NULL, inode->i_sb,
558 xid, NULL);
559 } /* else we are writing out data to server already
560 and could deadlock if we tried to flush data, and
561 since we do not know if we have data that would
562 invalidate the current end of file on the server
563 we can not go to the server to get the new inod
564 info */
566 cifs_set_oplock_level(pCifsInode, oplock);
568 cifs_relock_file(pCifsFile);
570 reopen_error_exit:
571 kfree(full_path);
572 FreeXid(xid);
573 return rc;
576 int cifs_close(struct inode *inode, struct file *file)
578 if (file->private_data != NULL) {
579 cifsFileInfo_put(file->private_data);
580 file->private_data = NULL;
583 /* return code from the ->release op is always ignored */
584 return 0;
587 int cifs_closedir(struct inode *inode, struct file *file)
589 int rc = 0;
590 int xid;
591 struct cifsFileInfo *pCFileStruct = file->private_data;
592 char *ptmp;
594 cFYI(1, "Closedir inode = 0x%p", inode);
596 xid = GetXid();
598 if (pCFileStruct) {
599 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
601 cFYI(1, "Freeing private data in close dir");
602 spin_lock(&cifs_file_list_lock);
603 if (!pCFileStruct->srch_inf.endOfSearch &&
604 !pCFileStruct->invalidHandle) {
605 pCFileStruct->invalidHandle = true;
606 spin_unlock(&cifs_file_list_lock);
607 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
608 cFYI(1, "Closing uncompleted readdir with rc %d",
609 rc);
610 /* not much we can do if it fails anyway, ignore rc */
611 rc = 0;
612 } else
613 spin_unlock(&cifs_file_list_lock);
614 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
615 if (ptmp) {
616 cFYI(1, "closedir free smb buf in srch struct");
617 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
618 if (pCFileStruct->srch_inf.smallBuf)
619 cifs_small_buf_release(ptmp);
620 else
621 cifs_buf_release(ptmp);
623 cifs_put_tlink(pCFileStruct->tlink);
624 kfree(file->private_data);
625 file->private_data = NULL;
627 /* BB can we lock the filestruct while this is going on? */
628 FreeXid(xid);
629 return rc;
632 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
633 __u64 offset, __u8 lockType)
635 struct cifsLockInfo *li =
636 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
637 if (li == NULL)
638 return -ENOMEM;
639 li->offset = offset;
640 li->length = len;
641 li->type = lockType;
642 mutex_lock(&fid->lock_mutex);
643 list_add(&li->llist, &fid->llist);
644 mutex_unlock(&fid->lock_mutex);
645 return 0;
648 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
650 int rc, xid;
651 __u32 numLock = 0;
652 __u32 numUnlock = 0;
653 __u64 length;
654 bool wait_flag = false;
655 struct cifs_sb_info *cifs_sb;
656 struct cifs_tcon *tcon;
657 __u16 netfid;
658 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
659 bool posix_locking = 0;
661 length = 1 + pfLock->fl_end - pfLock->fl_start;
662 rc = -EACCES;
663 xid = GetXid();
665 cFYI(1, "Lock parm: 0x%x flockflags: "
666 "0x%x flocktype: 0x%x start: %lld end: %lld",
667 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
668 pfLock->fl_end);
670 if (pfLock->fl_flags & FL_POSIX)
671 cFYI(1, "Posix");
672 if (pfLock->fl_flags & FL_FLOCK)
673 cFYI(1, "Flock");
674 if (pfLock->fl_flags & FL_SLEEP) {
675 cFYI(1, "Blocking lock");
676 wait_flag = true;
678 if (pfLock->fl_flags & FL_ACCESS)
679 cFYI(1, "Process suspended by mandatory locking - "
680 "not implemented yet");
681 if (pfLock->fl_flags & FL_LEASE)
682 cFYI(1, "Lease on file - not implemented yet");
683 if (pfLock->fl_flags &
684 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
685 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
687 if (pfLock->fl_type == F_WRLCK) {
688 cFYI(1, "F_WRLCK ");
689 numLock = 1;
690 } else if (pfLock->fl_type == F_UNLCK) {
691 cFYI(1, "F_UNLCK");
692 numUnlock = 1;
693 /* Check if unlock includes more than
694 one lock range */
695 } else if (pfLock->fl_type == F_RDLCK) {
696 cFYI(1, "F_RDLCK");
697 lockType |= LOCKING_ANDX_SHARED_LOCK;
698 numLock = 1;
699 } else if (pfLock->fl_type == F_EXLCK) {
700 cFYI(1, "F_EXLCK");
701 numLock = 1;
702 } else if (pfLock->fl_type == F_SHLCK) {
703 cFYI(1, "F_SHLCK");
704 lockType |= LOCKING_ANDX_SHARED_LOCK;
705 numLock = 1;
706 } else
707 cFYI(1, "Unknown type of lock");
709 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
710 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
711 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
713 if ((tcon->ses->capabilities & CAP_UNIX) &&
714 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
715 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
716 posix_locking = 1;
717 /* BB add code here to normalize offset and length to
718 account for negative length which we can not accept over the
719 wire */
720 if (IS_GETLK(cmd)) {
721 if (posix_locking) {
722 int posix_lock_type;
723 if (lockType & LOCKING_ANDX_SHARED_LOCK)
724 posix_lock_type = CIFS_RDLCK;
725 else
726 posix_lock_type = CIFS_WRLCK;
727 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
728 length, pfLock, posix_lock_type,
729 wait_flag);
730 FreeXid(xid);
731 return rc;
734 /* BB we could chain these into one lock request BB */
735 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
736 0, 1, lockType, 0 /* wait flag */, 0);
737 if (rc == 0) {
738 rc = CIFSSMBLock(xid, tcon, netfid, length,
739 pfLock->fl_start, 1 /* numUnlock */ ,
740 0 /* numLock */ , lockType,
741 0 /* wait flag */, 0);
742 pfLock->fl_type = F_UNLCK;
743 if (rc != 0)
744 cERROR(1, "Error unlocking previously locked "
745 "range %d during test of lock", rc);
746 rc = 0;
748 } else {
749 /* if rc == ERR_SHARING_VIOLATION ? */
750 rc = 0;
752 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
753 pfLock->fl_type = F_WRLCK;
754 } else {
755 rc = CIFSSMBLock(xid, tcon, netfid, length,
756 pfLock->fl_start, 0, 1,
757 lockType | LOCKING_ANDX_SHARED_LOCK,
758 0 /* wait flag */, 0);
759 if (rc == 0) {
760 rc = CIFSSMBLock(xid, tcon, netfid,
761 length, pfLock->fl_start, 1, 0,
762 lockType |
763 LOCKING_ANDX_SHARED_LOCK,
764 0 /* wait flag */, 0);
765 pfLock->fl_type = F_RDLCK;
766 if (rc != 0)
767 cERROR(1, "Error unlocking "
768 "previously locked range %d "
769 "during test of lock", rc);
770 rc = 0;
771 } else {
772 pfLock->fl_type = F_WRLCK;
773 rc = 0;
778 FreeXid(xid);
779 return rc;
782 if (!numLock && !numUnlock) {
783 /* if no lock or unlock then nothing
784 to do since we do not know what it is */
785 FreeXid(xid);
786 return -EOPNOTSUPP;
789 if (posix_locking) {
790 int posix_lock_type;
791 if (lockType & LOCKING_ANDX_SHARED_LOCK)
792 posix_lock_type = CIFS_RDLCK;
793 else
794 posix_lock_type = CIFS_WRLCK;
796 if (numUnlock == 1)
797 posix_lock_type = CIFS_UNLCK;
799 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
800 length, pfLock, posix_lock_type,
801 wait_flag);
802 } else {
803 struct cifsFileInfo *fid = file->private_data;
805 if (numLock) {
806 rc = CIFSSMBLock(xid, tcon, netfid, length,
807 pfLock->fl_start, 0, numLock, lockType,
808 wait_flag, 0);
810 if (rc == 0) {
811 /* For Windows locks we must store them. */
812 rc = store_file_lock(fid, length,
813 pfLock->fl_start, lockType);
815 } else if (numUnlock) {
816 /* For each stored lock that this unlock overlaps
817 completely, unlock it. */
818 int stored_rc = 0;
819 struct cifsLockInfo *li, *tmp;
821 rc = 0;
822 mutex_lock(&fid->lock_mutex);
823 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
824 if (pfLock->fl_start <= li->offset &&
825 (pfLock->fl_start + length) >=
826 (li->offset + li->length)) {
827 stored_rc = CIFSSMBLock(xid, tcon,
828 netfid, li->length,
829 li->offset, 1, 0,
830 li->type, false, 0);
831 if (stored_rc)
832 rc = stored_rc;
833 else {
834 list_del(&li->llist);
835 kfree(li);
839 mutex_unlock(&fid->lock_mutex);
843 if (pfLock->fl_flags & FL_POSIX)
844 posix_lock_file_wait(file, pfLock);
845 FreeXid(xid);
846 return rc;
849 /* update the file size (if needed) after a write */
850 void
851 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
852 unsigned int bytes_written)
854 loff_t end_of_write = offset + bytes_written;
856 if (end_of_write > cifsi->server_eof)
857 cifsi->server_eof = end_of_write;
860 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
861 const char *write_data, size_t write_size,
862 loff_t *poffset)
864 int rc = 0;
865 unsigned int bytes_written = 0;
866 unsigned int total_written;
867 struct cifs_sb_info *cifs_sb;
868 struct cifs_tcon *pTcon;
869 int xid;
870 struct dentry *dentry = open_file->dentry;
871 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
872 struct cifs_io_parms io_parms;
874 cifs_sb = CIFS_SB(dentry->d_sb);
876 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
877 *poffset, dentry->d_name.name);
879 pTcon = tlink_tcon(open_file->tlink);
881 xid = GetXid();
883 for (total_written = 0; write_size > total_written;
884 total_written += bytes_written) {
885 rc = -EAGAIN;
886 while (rc == -EAGAIN) {
887 struct kvec iov[2];
888 unsigned int len;
890 if (open_file->invalidHandle) {
891 /* we could deadlock if we called
892 filemap_fdatawait from here so tell
893 reopen_file not to flush data to
894 server now */
895 rc = cifs_reopen_file(open_file, false);
896 if (rc != 0)
897 break;
900 len = min((size_t)cifs_sb->wsize,
901 write_size - total_written);
902 /* iov[0] is reserved for smb header */
903 iov[1].iov_base = (char *)write_data + total_written;
904 iov[1].iov_len = len;
905 io_parms.netfid = open_file->netfid;
906 io_parms.pid = pid;
907 io_parms.tcon = pTcon;
908 io_parms.offset = *poffset;
909 io_parms.length = len;
910 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
911 1, 0);
913 if (rc || (bytes_written == 0)) {
914 if (total_written)
915 break;
916 else {
917 FreeXid(xid);
918 return rc;
920 } else {
921 cifs_update_eof(cifsi, *poffset, bytes_written);
922 *poffset += bytes_written;
926 cifs_stats_bytes_written(pTcon, total_written);
928 if (total_written > 0) {
929 spin_lock(&dentry->d_inode->i_lock);
930 if (*poffset > dentry->d_inode->i_size)
931 i_size_write(dentry->d_inode, *poffset);
932 spin_unlock(&dentry->d_inode->i_lock);
934 mark_inode_dirty_sync(dentry->d_inode);
935 FreeXid(xid);
936 return total_written;
939 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
940 bool fsuid_only)
942 struct cifsFileInfo *open_file = NULL;
943 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
945 /* only filter by fsuid on multiuser mounts */
946 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
947 fsuid_only = false;
949 spin_lock(&cifs_file_list_lock);
950 /* we could simply get the first_list_entry since write-only entries
951 are always at the end of the list but since the first entry might
952 have a close pending, we go through the whole list */
953 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
954 if (fsuid_only && open_file->uid != current_fsuid())
955 continue;
956 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
957 if (!open_file->invalidHandle) {
958 /* found a good file */
959 /* lock it so it will not be closed on us */
960 cifsFileInfo_get(open_file);
961 spin_unlock(&cifs_file_list_lock);
962 return open_file;
963 } /* else might as well continue, and look for
964 another, or simply have the caller reopen it
965 again rather than trying to fix this handle */
966 } else /* write only file */
967 break; /* write only files are last so must be done */
969 spin_unlock(&cifs_file_list_lock);
970 return NULL;
973 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
974 bool fsuid_only)
976 struct cifsFileInfo *open_file, *inv_file = NULL;
977 struct cifs_sb_info *cifs_sb;
978 bool any_available = false;
979 int rc;
980 unsigned int refind = 0;
982 /* Having a null inode here (because mapping->host was set to zero by
983 the VFS or MM) should not happen but we had reports of on oops (due to
984 it being zero) during stress testcases so we need to check for it */
986 if (cifs_inode == NULL) {
987 cERROR(1, "Null inode passed to cifs_writeable_file");
988 dump_stack();
989 return NULL;
992 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
994 /* only filter by fsuid on multiuser mounts */
995 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
996 fsuid_only = false;
998 spin_lock(&cifs_file_list_lock);
999 refind_writable:
1000 if (refind > MAX_REOPEN_ATT) {
1001 spin_unlock(&cifs_file_list_lock);
1002 return NULL;
1004 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1005 if (!any_available && open_file->pid != current->tgid)
1006 continue;
1007 if (fsuid_only && open_file->uid != current_fsuid())
1008 continue;
1009 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1010 if (!open_file->invalidHandle) {
1011 /* found a good writable file */
1012 cifsFileInfo_get(open_file);
1013 spin_unlock(&cifs_file_list_lock);
1014 return open_file;
1015 } else {
1016 if (!inv_file)
1017 inv_file = open_file;
1021 /* couldn't find useable FH with same pid, try any available */
1022 if (!any_available) {
1023 any_available = true;
1024 goto refind_writable;
1027 if (inv_file) {
1028 any_available = false;
1029 cifsFileInfo_get(inv_file);
1032 spin_unlock(&cifs_file_list_lock);
1034 if (inv_file) {
1035 rc = cifs_reopen_file(inv_file, false);
1036 if (!rc)
1037 return inv_file;
1038 else {
1039 spin_lock(&cifs_file_list_lock);
1040 list_move_tail(&inv_file->flist,
1041 &cifs_inode->openFileList);
1042 spin_unlock(&cifs_file_list_lock);
1043 cifsFileInfo_put(inv_file);
1044 spin_lock(&cifs_file_list_lock);
1045 ++refind;
1046 goto refind_writable;
1050 return NULL;
1053 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1055 struct address_space *mapping = page->mapping;
1056 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1057 char *write_data;
1058 int rc = -EFAULT;
1059 int bytes_written = 0;
1060 struct inode *inode;
1061 struct cifsFileInfo *open_file;
1063 if (!mapping || !mapping->host)
1064 return -EFAULT;
1066 inode = page->mapping->host;
1068 offset += (loff_t)from;
1069 write_data = kmap(page);
1070 write_data += from;
1072 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1073 kunmap(page);
1074 return -EIO;
1077 /* racing with truncate? */
1078 if (offset > mapping->host->i_size) {
1079 kunmap(page);
1080 return 0; /* don't care */
1083 /* check to make sure that we are not extending the file */
1084 if (mapping->host->i_size - offset < (loff_t)to)
1085 to = (unsigned)(mapping->host->i_size - offset);
1087 open_file = find_writable_file(CIFS_I(mapping->host), false);
1088 if (open_file) {
1089 bytes_written = cifs_write(open_file, open_file->pid,
1090 write_data, to - from, &offset);
1091 cifsFileInfo_put(open_file);
1092 /* Does mm or vfs already set times? */
1093 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1094 if ((bytes_written > 0) && (offset))
1095 rc = 0;
1096 else if (bytes_written < 0)
1097 rc = bytes_written;
1098 } else {
1099 cFYI(1, "No writeable filehandles for inode");
1100 rc = -EIO;
1103 kunmap(page);
1104 return rc;
1107 static int cifs_writepages(struct address_space *mapping,
1108 struct writeback_control *wbc)
1110 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1111 bool done = false, scanned = false, range_whole = false;
1112 pgoff_t end, index;
1113 struct cifs_writedata *wdata;
1114 struct page *page;
1115 int rc = 0;
1118 * If wsize is smaller than the page cache size, default to writing
1119 * one page at a time via cifs_writepage
1121 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1122 return generic_writepages(mapping, wbc);
1124 if (wbc->range_cyclic) {
1125 index = mapping->writeback_index; /* Start from prev offset */
1126 end = -1;
1127 } else {
1128 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1129 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1130 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1131 range_whole = true;
1132 scanned = true;
1134 retry:
1135 while (!done && index <= end) {
1136 unsigned int i, nr_pages, found_pages;
1137 pgoff_t next = 0, tofind;
1138 struct page **pages;
1140 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1141 end - index) + 1;
1143 wdata = cifs_writedata_alloc((unsigned int)tofind);
1144 if (!wdata) {
1145 rc = -ENOMEM;
1146 break;
1150 * find_get_pages_tag seems to return a max of 256 on each
1151 * iteration, so we must call it several times in order to
1152 * fill the array or the wsize is effectively limited to
1153 * 256 * PAGE_CACHE_SIZE.
1155 found_pages = 0;
1156 pages = wdata->pages;
1157 do {
1158 nr_pages = find_get_pages_tag(mapping, &index,
1159 PAGECACHE_TAG_DIRTY,
1160 tofind, pages);
1161 found_pages += nr_pages;
1162 tofind -= nr_pages;
1163 pages += nr_pages;
1164 } while (nr_pages && tofind && index <= end);
1166 if (found_pages == 0) {
1167 kref_put(&wdata->refcount, cifs_writedata_release);
1168 break;
1171 nr_pages = 0;
1172 for (i = 0; i < found_pages; i++) {
1173 page = wdata->pages[i];
1175 * At this point we hold neither mapping->tree_lock nor
1176 * lock on the page itself: the page may be truncated or
1177 * invalidated (changing page->mapping to NULL), or even
1178 * swizzled back from swapper_space to tmpfs file
1179 * mapping
1182 if (nr_pages == 0)
1183 lock_page(page);
1184 else if (!trylock_page(page))
1185 break;
1187 if (unlikely(page->mapping != mapping)) {
1188 unlock_page(page);
1189 break;
1192 if (!wbc->range_cyclic && page->index > end) {
1193 done = true;
1194 unlock_page(page);
1195 break;
1198 if (next && (page->index != next)) {
1199 /* Not next consecutive page */
1200 unlock_page(page);
1201 break;
1204 if (wbc->sync_mode != WB_SYNC_NONE)
1205 wait_on_page_writeback(page);
1207 if (PageWriteback(page) ||
1208 !clear_page_dirty_for_io(page)) {
1209 unlock_page(page);
1210 break;
1214 * This actually clears the dirty bit in the radix tree.
1215 * See cifs_writepage() for more commentary.
1217 set_page_writeback(page);
1219 if (page_offset(page) >= mapping->host->i_size) {
1220 done = true;
1221 unlock_page(page);
1222 end_page_writeback(page);
1223 break;
1226 wdata->pages[i] = page;
1227 next = page->index + 1;
1228 ++nr_pages;
1231 /* reset index to refind any pages skipped */
1232 if (nr_pages == 0)
1233 index = wdata->pages[0]->index + 1;
1235 /* put any pages we aren't going to use */
1236 for (i = nr_pages; i < found_pages; i++) {
1237 page_cache_release(wdata->pages[i]);
1238 wdata->pages[i] = NULL;
1241 /* nothing to write? */
1242 if (nr_pages == 0) {
1243 kref_put(&wdata->refcount, cifs_writedata_release);
1244 continue;
1247 wdata->sync_mode = wbc->sync_mode;
1248 wdata->nr_pages = nr_pages;
1249 wdata->offset = page_offset(wdata->pages[0]);
1251 do {
1252 if (wdata->cfile != NULL)
1253 cifsFileInfo_put(wdata->cfile);
1254 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1255 false);
1256 if (!wdata->cfile) {
1257 cERROR(1, "No writable handles for inode");
1258 rc = -EBADF;
1259 break;
1261 rc = cifs_async_writev(wdata);
1262 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1264 for (i = 0; i < nr_pages; ++i)
1265 unlock_page(wdata->pages[i]);
1267 /* send failure -- clean up the mess */
1268 if (rc != 0) {
1269 for (i = 0; i < nr_pages; ++i) {
1270 if (rc == -EAGAIN)
1271 redirty_page_for_writepage(wbc,
1272 wdata->pages[i]);
1273 else
1274 SetPageError(wdata->pages[i]);
1275 end_page_writeback(wdata->pages[i]);
1276 page_cache_release(wdata->pages[i]);
1278 if (rc != -EAGAIN)
1279 mapping_set_error(mapping, rc);
1281 kref_put(&wdata->refcount, cifs_writedata_release);
1283 wbc->nr_to_write -= nr_pages;
1284 if (wbc->nr_to_write <= 0)
1285 done = true;
1287 index = next;
1290 if (!scanned && !done) {
1292 * We hit the last page and there is more work to be done: wrap
1293 * back to the start of the file
1295 scanned = true;
1296 index = 0;
1297 goto retry;
1300 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1301 mapping->writeback_index = index;
1303 return rc;
1306 static int
1307 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1309 int rc;
1310 int xid;
1312 xid = GetXid();
1313 /* BB add check for wbc flags */
1314 page_cache_get(page);
1315 if (!PageUptodate(page))
1316 cFYI(1, "ppw - page not up to date");
1319 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1321 * A writepage() implementation always needs to do either this,
1322 * or re-dirty the page with "redirty_page_for_writepage()" in
1323 * the case of a failure.
1325 * Just unlocking the page will cause the radix tree tag-bits
1326 * to fail to update with the state of the page correctly.
1328 set_page_writeback(page);
1329 retry_write:
1330 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1331 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1332 goto retry_write;
1333 else if (rc == -EAGAIN)
1334 redirty_page_for_writepage(wbc, page);
1335 else if (rc != 0)
1336 SetPageError(page);
1337 else
1338 SetPageUptodate(page);
1339 end_page_writeback(page);
1340 page_cache_release(page);
1341 FreeXid(xid);
1342 return rc;
1345 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1347 int rc = cifs_writepage_locked(page, wbc);
1348 unlock_page(page);
1349 return rc;
1352 static int cifs_write_end(struct file *file, struct address_space *mapping,
1353 loff_t pos, unsigned len, unsigned copied,
1354 struct page *page, void *fsdata)
1356 int rc;
1357 struct inode *inode = mapping->host;
1358 struct cifsFileInfo *cfile = file->private_data;
1359 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1360 __u32 pid;
1362 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1363 pid = cfile->pid;
1364 else
1365 pid = current->tgid;
1367 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1368 page, pos, copied);
1370 if (PageChecked(page)) {
1371 if (copied == len)
1372 SetPageUptodate(page);
1373 ClearPageChecked(page);
1374 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1375 SetPageUptodate(page);
1377 if (!PageUptodate(page)) {
1378 char *page_data;
1379 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1380 int xid;
1382 xid = GetXid();
1383 /* this is probably better than directly calling
1384 partialpage_write since in this function the file handle is
1385 known which we might as well leverage */
1386 /* BB check if anything else missing out of ppw
1387 such as updating last write time */
1388 page_data = kmap(page);
1389 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1390 /* if (rc < 0) should we set writebehind rc? */
1391 kunmap(page);
1393 FreeXid(xid);
1394 } else {
1395 rc = copied;
1396 pos += copied;
1397 set_page_dirty(page);
1400 if (rc > 0) {
1401 spin_lock(&inode->i_lock);
1402 if (pos > inode->i_size)
1403 i_size_write(inode, pos);
1404 spin_unlock(&inode->i_lock);
1407 unlock_page(page);
1408 page_cache_release(page);
1410 return rc;
1413 int cifs_strict_fsync(struct file *file, int datasync)
1415 int xid;
1416 int rc = 0;
1417 struct cifs_tcon *tcon;
1418 struct cifsFileInfo *smbfile = file->private_data;
1419 struct inode *inode = file->f_path.dentry->d_inode;
1420 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1422 xid = GetXid();
1424 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1425 file->f_path.dentry->d_name.name, datasync);
1427 if (!CIFS_I(inode)->clientCanCacheRead) {
1428 rc = cifs_invalidate_mapping(inode);
1429 if (rc) {
1430 cFYI(1, "rc: %d during invalidate phase", rc);
1431 rc = 0; /* don't care about it in fsync */
1435 tcon = tlink_tcon(smbfile->tlink);
1436 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1437 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1439 FreeXid(xid);
1440 return rc;
1443 int cifs_fsync(struct file *file, int datasync)
1445 int xid;
1446 int rc = 0;
1447 struct cifs_tcon *tcon;
1448 struct cifsFileInfo *smbfile = file->private_data;
1449 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1451 xid = GetXid();
1453 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1454 file->f_path.dentry->d_name.name, datasync);
1456 tcon = tlink_tcon(smbfile->tlink);
1457 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1458 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1460 FreeXid(xid);
1461 return rc;
1465 * As file closes, flush all cached write data for this inode checking
1466 * for write behind errors.
1468 int cifs_flush(struct file *file, fl_owner_t id)
1470 struct inode *inode = file->f_path.dentry->d_inode;
1471 int rc = 0;
1473 if (file->f_mode & FMODE_WRITE)
1474 rc = filemap_write_and_wait(inode->i_mapping);
1476 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1478 return rc;
1481 static int
1482 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1484 int rc = 0;
1485 unsigned long i;
1487 for (i = 0; i < num_pages; i++) {
1488 pages[i] = alloc_page(__GFP_HIGHMEM);
1489 if (!pages[i]) {
1491 * save number of pages we have already allocated and
1492 * return with ENOMEM error
1494 num_pages = i;
1495 rc = -ENOMEM;
1496 goto error;
1500 return rc;
1502 error:
1503 for (i = 0; i < num_pages; i++)
1504 put_page(pages[i]);
1505 return rc;
1508 static inline
1509 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1511 size_t num_pages;
1512 size_t clen;
1514 clen = min_t(const size_t, len, wsize);
1515 num_pages = clen / PAGE_CACHE_SIZE;
1516 if (clen % PAGE_CACHE_SIZE)
1517 num_pages++;
1519 if (cur_len)
1520 *cur_len = clen;
1522 return num_pages;
1525 static ssize_t
1526 cifs_iovec_write(struct file *file, const struct iovec *iov,
1527 unsigned long nr_segs, loff_t *poffset)
1529 unsigned int written;
1530 unsigned long num_pages, npages, i;
1531 size_t copied, len, cur_len;
1532 ssize_t total_written = 0;
1533 struct kvec *to_send;
1534 struct page **pages;
1535 struct iov_iter it;
1536 struct inode *inode;
1537 struct cifsFileInfo *open_file;
1538 struct cifs_tcon *pTcon;
1539 struct cifs_sb_info *cifs_sb;
1540 struct cifs_io_parms io_parms;
1541 int xid, rc;
1542 __u32 pid;
1544 len = iov_length(iov, nr_segs);
1545 if (!len)
1546 return 0;
1548 rc = generic_write_checks(file, poffset, &len, 0);
1549 if (rc)
1550 return rc;
1552 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1553 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1555 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1556 if (!pages)
1557 return -ENOMEM;
1559 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1560 if (!to_send) {
1561 kfree(pages);
1562 return -ENOMEM;
1565 rc = cifs_write_allocate_pages(pages, num_pages);
1566 if (rc) {
1567 kfree(pages);
1568 kfree(to_send);
1569 return rc;
1572 xid = GetXid();
1573 open_file = file->private_data;
1575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1576 pid = open_file->pid;
1577 else
1578 pid = current->tgid;
1580 pTcon = tlink_tcon(open_file->tlink);
1581 inode = file->f_path.dentry->d_inode;
1583 iov_iter_init(&it, iov, nr_segs, len, 0);
1584 npages = num_pages;
1586 do {
1587 size_t save_len = cur_len;
1588 for (i = 0; i < npages; i++) {
1589 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1590 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1591 copied);
1592 cur_len -= copied;
1593 iov_iter_advance(&it, copied);
1594 to_send[i+1].iov_base = kmap(pages[i]);
1595 to_send[i+1].iov_len = copied;
1598 cur_len = save_len - cur_len;
1600 do {
1601 if (open_file->invalidHandle) {
1602 rc = cifs_reopen_file(open_file, false);
1603 if (rc != 0)
1604 break;
1606 io_parms.netfid = open_file->netfid;
1607 io_parms.pid = pid;
1608 io_parms.tcon = pTcon;
1609 io_parms.offset = *poffset;
1610 io_parms.length = cur_len;
1611 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1612 npages, 0);
1613 } while (rc == -EAGAIN);
1615 for (i = 0; i < npages; i++)
1616 kunmap(pages[i]);
1618 if (written) {
1619 len -= written;
1620 total_written += written;
1621 cifs_update_eof(CIFS_I(inode), *poffset, written);
1622 *poffset += written;
1623 } else if (rc < 0) {
1624 if (!total_written)
1625 total_written = rc;
1626 break;
1629 /* get length and number of kvecs of the next write */
1630 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1631 } while (len > 0);
1633 if (total_written > 0) {
1634 spin_lock(&inode->i_lock);
1635 if (*poffset > inode->i_size)
1636 i_size_write(inode, *poffset);
1637 spin_unlock(&inode->i_lock);
1640 cifs_stats_bytes_written(pTcon, total_written);
1641 mark_inode_dirty_sync(inode);
1643 for (i = 0; i < num_pages; i++)
1644 put_page(pages[i]);
1645 kfree(to_send);
1646 kfree(pages);
1647 FreeXid(xid);
1648 return total_written;
1651 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1652 unsigned long nr_segs, loff_t pos)
1654 ssize_t written;
1655 struct inode *inode;
1657 inode = iocb->ki_filp->f_path.dentry->d_inode;
1660 * BB - optimize the way when signing is disabled. We can drop this
1661 * extra memory-to-memory copying and use iovec buffers for constructing
1662 * write request.
1665 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1666 if (written > 0) {
1667 CIFS_I(inode)->invalid_mapping = true;
1668 iocb->ki_pos = pos;
1671 return written;
1674 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1675 unsigned long nr_segs, loff_t pos)
1677 struct inode *inode;
1679 inode = iocb->ki_filp->f_path.dentry->d_inode;
1681 if (CIFS_I(inode)->clientCanCacheAll)
1682 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1685 * In strict cache mode we need to write the data to the server exactly
1686 * from the pos to pos+len-1 rather than flush all affected pages
1687 * because it may cause a error with mandatory locks on these pages but
1688 * not on the region from pos to ppos+len-1.
1691 return cifs_user_writev(iocb, iov, nr_segs, pos);
1694 static ssize_t
1695 cifs_iovec_read(struct file *file, const struct iovec *iov,
1696 unsigned long nr_segs, loff_t *poffset)
1698 int rc;
1699 int xid;
1700 ssize_t total_read;
1701 unsigned int bytes_read = 0;
1702 size_t len, cur_len;
1703 int iov_offset = 0;
1704 struct cifs_sb_info *cifs_sb;
1705 struct cifs_tcon *pTcon;
1706 struct cifsFileInfo *open_file;
1707 struct smb_com_read_rsp *pSMBr;
1708 struct cifs_io_parms io_parms;
1709 char *read_data;
1710 __u32 pid;
1712 if (!nr_segs)
1713 return 0;
1715 len = iov_length(iov, nr_segs);
1716 if (!len)
1717 return 0;
1719 xid = GetXid();
1720 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1722 open_file = file->private_data;
1723 pTcon = tlink_tcon(open_file->tlink);
1725 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1726 pid = open_file->pid;
1727 else
1728 pid = current->tgid;
1730 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1731 cFYI(1, "attempting read on write only file instance");
1733 for (total_read = 0; total_read < len; total_read += bytes_read) {
1734 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1735 rc = -EAGAIN;
1736 read_data = NULL;
1738 while (rc == -EAGAIN) {
1739 int buf_type = CIFS_NO_BUFFER;
1740 if (open_file->invalidHandle) {
1741 rc = cifs_reopen_file(open_file, true);
1742 if (rc != 0)
1743 break;
1745 io_parms.netfid = open_file->netfid;
1746 io_parms.pid = pid;
1747 io_parms.tcon = pTcon;
1748 io_parms.offset = *poffset;
1749 io_parms.length = cur_len;
1750 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1751 &read_data, &buf_type);
1752 pSMBr = (struct smb_com_read_rsp *)read_data;
1753 if (read_data) {
1754 char *data_offset = read_data + 4 +
1755 le16_to_cpu(pSMBr->DataOffset);
1756 if (memcpy_toiovecend(iov, data_offset,
1757 iov_offset, bytes_read))
1758 rc = -EFAULT;
1759 if (buf_type == CIFS_SMALL_BUFFER)
1760 cifs_small_buf_release(read_data);
1761 else if (buf_type == CIFS_LARGE_BUFFER)
1762 cifs_buf_release(read_data);
1763 read_data = NULL;
1764 iov_offset += bytes_read;
1768 if (rc || (bytes_read == 0)) {
1769 if (total_read) {
1770 break;
1771 } else {
1772 FreeXid(xid);
1773 return rc;
1775 } else {
1776 cifs_stats_bytes_read(pTcon, bytes_read);
1777 *poffset += bytes_read;
1781 FreeXid(xid);
1782 return total_read;
1785 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1786 unsigned long nr_segs, loff_t pos)
1788 ssize_t read;
1790 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1791 if (read > 0)
1792 iocb->ki_pos = pos;
1794 return read;
1797 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1798 unsigned long nr_segs, loff_t pos)
1800 struct inode *inode;
1802 inode = iocb->ki_filp->f_path.dentry->d_inode;
1804 if (CIFS_I(inode)->clientCanCacheRead)
1805 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1808 * In strict cache mode we need to read from the server all the time
1809 * if we don't have level II oplock because the server can delay mtime
1810 * change - so we can't make a decision about inode invalidating.
1811 * And we can also fail with pagereading if there are mandatory locks
1812 * on pages affected by this read but not on the region from pos to
1813 * pos+len-1.
1816 return cifs_user_readv(iocb, iov, nr_segs, pos);
1819 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1820 loff_t *poffset)
1822 int rc = -EACCES;
1823 unsigned int bytes_read = 0;
1824 unsigned int total_read;
1825 unsigned int current_read_size;
1826 struct cifs_sb_info *cifs_sb;
1827 struct cifs_tcon *pTcon;
1828 int xid;
1829 char *current_offset;
1830 struct cifsFileInfo *open_file;
1831 struct cifs_io_parms io_parms;
1832 int buf_type = CIFS_NO_BUFFER;
1833 __u32 pid;
1835 xid = GetXid();
1836 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1838 if (file->private_data == NULL) {
1839 rc = -EBADF;
1840 FreeXid(xid);
1841 return rc;
1843 open_file = file->private_data;
1844 pTcon = tlink_tcon(open_file->tlink);
1846 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1847 pid = open_file->pid;
1848 else
1849 pid = current->tgid;
1851 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1852 cFYI(1, "attempting read on write only file instance");
1854 for (total_read = 0, current_offset = read_data;
1855 read_size > total_read;
1856 total_read += bytes_read, current_offset += bytes_read) {
1857 current_read_size = min_t(const int, read_size - total_read,
1858 cifs_sb->rsize);
1859 /* For windows me and 9x we do not want to request more
1860 than it negotiated since it will refuse the read then */
1861 if ((pTcon->ses) &&
1862 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1863 current_read_size = min_t(const int, current_read_size,
1864 pTcon->ses->server->maxBuf - 128);
1866 rc = -EAGAIN;
1867 while (rc == -EAGAIN) {
1868 if (open_file->invalidHandle) {
1869 rc = cifs_reopen_file(open_file, true);
1870 if (rc != 0)
1871 break;
1873 io_parms.netfid = open_file->netfid;
1874 io_parms.pid = pid;
1875 io_parms.tcon = pTcon;
1876 io_parms.offset = *poffset;
1877 io_parms.length = current_read_size;
1878 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1879 &current_offset, &buf_type);
1881 if (rc || (bytes_read == 0)) {
1882 if (total_read) {
1883 break;
1884 } else {
1885 FreeXid(xid);
1886 return rc;
1888 } else {
1889 cifs_stats_bytes_read(pTcon, total_read);
1890 *poffset += bytes_read;
1893 FreeXid(xid);
1894 return total_read;
1898 * If the page is mmap'ed into a process' page tables, then we need to make
1899 * sure that it doesn't change while being written back.
1901 static int
1902 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1904 struct page *page = vmf->page;
1906 lock_page(page);
1907 return VM_FAULT_LOCKED;
1910 static struct vm_operations_struct cifs_file_vm_ops = {
1911 .fault = filemap_fault,
1912 .page_mkwrite = cifs_page_mkwrite,
1915 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1917 int rc, xid;
1918 struct inode *inode = file->f_path.dentry->d_inode;
1920 xid = GetXid();
1922 if (!CIFS_I(inode)->clientCanCacheRead) {
1923 rc = cifs_invalidate_mapping(inode);
1924 if (rc)
1925 return rc;
1928 rc = generic_file_mmap(file, vma);
1929 if (rc == 0)
1930 vma->vm_ops = &cifs_file_vm_ops;
1931 FreeXid(xid);
1932 return rc;
1935 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1937 int rc, xid;
1939 xid = GetXid();
1940 rc = cifs_revalidate_file(file);
1941 if (rc) {
1942 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1943 FreeXid(xid);
1944 return rc;
1946 rc = generic_file_mmap(file, vma);
1947 if (rc == 0)
1948 vma->vm_ops = &cifs_file_vm_ops;
1949 FreeXid(xid);
1950 return rc;
1954 static void cifs_copy_cache_pages(struct address_space *mapping,
1955 struct list_head *pages, int bytes_read, char *data)
1957 struct page *page;
1958 char *target;
1960 while (bytes_read > 0) {
1961 if (list_empty(pages))
1962 break;
1964 page = list_entry(pages->prev, struct page, lru);
1965 list_del(&page->lru);
1967 if (add_to_page_cache_lru(page, mapping, page->index,
1968 GFP_KERNEL)) {
1969 page_cache_release(page);
1970 cFYI(1, "Add page cache failed");
1971 data += PAGE_CACHE_SIZE;
1972 bytes_read -= PAGE_CACHE_SIZE;
1973 continue;
1975 page_cache_release(page);
1977 target = kmap_atomic(page, KM_USER0);
1979 if (PAGE_CACHE_SIZE > bytes_read) {
1980 memcpy(target, data, bytes_read);
1981 /* zero the tail end of this partial page */
1982 memset(target + bytes_read, 0,
1983 PAGE_CACHE_SIZE - bytes_read);
1984 bytes_read = 0;
1985 } else {
1986 memcpy(target, data, PAGE_CACHE_SIZE);
1987 bytes_read -= PAGE_CACHE_SIZE;
1989 kunmap_atomic(target, KM_USER0);
1991 flush_dcache_page(page);
1992 SetPageUptodate(page);
1993 unlock_page(page);
1994 data += PAGE_CACHE_SIZE;
1996 /* add page to FS-Cache */
1997 cifs_readpage_to_fscache(mapping->host, page);
1999 return;
2002 static int cifs_readpages(struct file *file, struct address_space *mapping,
2003 struct list_head *page_list, unsigned num_pages)
2005 int rc = -EACCES;
2006 int xid;
2007 loff_t offset;
2008 struct page *page;
2009 struct cifs_sb_info *cifs_sb;
2010 struct cifs_tcon *pTcon;
2011 unsigned int bytes_read = 0;
2012 unsigned int read_size, i;
2013 char *smb_read_data = NULL;
2014 struct smb_com_read_rsp *pSMBr;
2015 struct cifsFileInfo *open_file;
2016 struct cifs_io_parms io_parms;
2017 int buf_type = CIFS_NO_BUFFER;
2018 __u32 pid;
2020 xid = GetXid();
2021 if (file->private_data == NULL) {
2022 rc = -EBADF;
2023 FreeXid(xid);
2024 return rc;
2026 open_file = file->private_data;
2027 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2028 pTcon = tlink_tcon(open_file->tlink);
2031 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2032 * immediately if the cookie is negative
2034 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2035 &num_pages);
2036 if (rc == 0)
2037 goto read_complete;
2039 cFYI(DBG2, "rpages: num pages %d", num_pages);
2040 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2041 pid = open_file->pid;
2042 else
2043 pid = current->tgid;
2045 for (i = 0; i < num_pages; ) {
2046 unsigned contig_pages;
2047 struct page *tmp_page;
2048 unsigned long expected_index;
2050 if (list_empty(page_list))
2051 break;
2053 page = list_entry(page_list->prev, struct page, lru);
2054 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2056 /* count adjacent pages that we will read into */
2057 contig_pages = 0;
2058 expected_index =
2059 list_entry(page_list->prev, struct page, lru)->index;
2060 list_for_each_entry_reverse(tmp_page, page_list, lru) {
2061 if (tmp_page->index == expected_index) {
2062 contig_pages++;
2063 expected_index++;
2064 } else
2065 break;
2067 if (contig_pages + i > num_pages)
2068 contig_pages = num_pages - i;
2070 /* for reads over a certain size could initiate async
2071 read ahead */
2073 read_size = contig_pages * PAGE_CACHE_SIZE;
2074 /* Read size needs to be in multiples of one page */
2075 read_size = min_t(const unsigned int, read_size,
2076 cifs_sb->rsize & PAGE_CACHE_MASK);
2077 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2078 read_size, contig_pages);
2079 rc = -EAGAIN;
2080 while (rc == -EAGAIN) {
2081 if (open_file->invalidHandle) {
2082 rc = cifs_reopen_file(open_file, true);
2083 if (rc != 0)
2084 break;
2086 io_parms.netfid = open_file->netfid;
2087 io_parms.pid = pid;
2088 io_parms.tcon = pTcon;
2089 io_parms.offset = offset;
2090 io_parms.length = read_size;
2091 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2092 &smb_read_data, &buf_type);
2093 /* BB more RC checks ? */
2094 if (rc == -EAGAIN) {
2095 if (smb_read_data) {
2096 if (buf_type == CIFS_SMALL_BUFFER)
2097 cifs_small_buf_release(smb_read_data);
2098 else if (buf_type == CIFS_LARGE_BUFFER)
2099 cifs_buf_release(smb_read_data);
2100 smb_read_data = NULL;
2104 if ((rc < 0) || (smb_read_data == NULL)) {
2105 cFYI(1, "Read error in readpages: %d", rc);
2106 break;
2107 } else if (bytes_read > 0) {
2108 task_io_account_read(bytes_read);
2109 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2110 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2111 smb_read_data + 4 /* RFC1001 hdr */ +
2112 le16_to_cpu(pSMBr->DataOffset));
2114 i += bytes_read >> PAGE_CACHE_SHIFT;
2115 cifs_stats_bytes_read(pTcon, bytes_read);
2116 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2117 i++; /* account for partial page */
2119 /* server copy of file can have smaller size
2120 than client */
2121 /* BB do we need to verify this common case ?
2122 this case is ok - if we are at server EOF
2123 we will hit it on next read */
2125 /* break; */
2127 } else {
2128 cFYI(1, "No bytes read (%d) at offset %lld . "
2129 "Cleaning remaining pages from readahead list",
2130 bytes_read, offset);
2131 /* BB turn off caching and do new lookup on
2132 file size at server? */
2133 break;
2135 if (smb_read_data) {
2136 if (buf_type == CIFS_SMALL_BUFFER)
2137 cifs_small_buf_release(smb_read_data);
2138 else if (buf_type == CIFS_LARGE_BUFFER)
2139 cifs_buf_release(smb_read_data);
2140 smb_read_data = NULL;
2142 bytes_read = 0;
2145 /* need to free smb_read_data buf before exit */
2146 if (smb_read_data) {
2147 if (buf_type == CIFS_SMALL_BUFFER)
2148 cifs_small_buf_release(smb_read_data);
2149 else if (buf_type == CIFS_LARGE_BUFFER)
2150 cifs_buf_release(smb_read_data);
2151 smb_read_data = NULL;
2154 read_complete:
2155 FreeXid(xid);
2156 return rc;
2159 static int cifs_readpage_worker(struct file *file, struct page *page,
2160 loff_t *poffset)
2162 char *read_data;
2163 int rc;
2165 /* Is the page cached? */
2166 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2167 if (rc == 0)
2168 goto read_complete;
2170 page_cache_get(page);
2171 read_data = kmap(page);
2172 /* for reads over a certain size could initiate async read ahead */
2174 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2176 if (rc < 0)
2177 goto io_error;
2178 else
2179 cFYI(1, "Bytes read %d", rc);
2181 file->f_path.dentry->d_inode->i_atime =
2182 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2184 if (PAGE_CACHE_SIZE > rc)
2185 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2187 flush_dcache_page(page);
2188 SetPageUptodate(page);
2190 /* send this page to the cache */
2191 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2193 rc = 0;
2195 io_error:
2196 kunmap(page);
2197 page_cache_release(page);
2199 read_complete:
2200 return rc;
2203 static int cifs_readpage(struct file *file, struct page *page)
2205 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2206 int rc = -EACCES;
2207 int xid;
2209 xid = GetXid();
2211 if (file->private_data == NULL) {
2212 rc = -EBADF;
2213 FreeXid(xid);
2214 return rc;
2217 cFYI(1, "readpage %p at offset %d 0x%x\n",
2218 page, (int)offset, (int)offset);
2220 rc = cifs_readpage_worker(file, page, &offset);
2222 unlock_page(page);
2224 FreeXid(xid);
2225 return rc;
2228 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2230 struct cifsFileInfo *open_file;
2232 spin_lock(&cifs_file_list_lock);
2233 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2234 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2235 spin_unlock(&cifs_file_list_lock);
2236 return 1;
2239 spin_unlock(&cifs_file_list_lock);
2240 return 0;
2243 /* We do not want to update the file size from server for inodes
2244 open for write - to avoid races with writepage extending
2245 the file - in the future we could consider allowing
2246 refreshing the inode only on increases in the file size
2247 but this is tricky to do without racing with writebehind
2248 page caching in the current Linux kernel design */
2249 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2251 if (!cifsInode)
2252 return true;
2254 if (is_inode_writable(cifsInode)) {
2255 /* This inode is open for write at least once */
2256 struct cifs_sb_info *cifs_sb;
2258 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2259 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2260 /* since no page cache to corrupt on directio
2261 we can change size safely */
2262 return true;
2265 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2266 return true;
2268 return false;
2269 } else
2270 return true;
2273 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2274 loff_t pos, unsigned len, unsigned flags,
2275 struct page **pagep, void **fsdata)
2277 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2278 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2279 loff_t page_start = pos & PAGE_MASK;
2280 loff_t i_size;
2281 struct page *page;
2282 int rc = 0;
2284 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2286 page = grab_cache_page_write_begin(mapping, index, flags);
2287 if (!page) {
2288 rc = -ENOMEM;
2289 goto out;
2292 if (PageUptodate(page))
2293 goto out;
2296 * If we write a full page it will be up to date, no need to read from
2297 * the server. If the write is short, we'll end up doing a sync write
2298 * instead.
2300 if (len == PAGE_CACHE_SIZE)
2301 goto out;
2304 * optimize away the read when we have an oplock, and we're not
2305 * expecting to use any of the data we'd be reading in. That
2306 * is, when the page lies beyond the EOF, or straddles the EOF
2307 * and the write will cover all of the existing data.
2309 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2310 i_size = i_size_read(mapping->host);
2311 if (page_start >= i_size ||
2312 (offset == 0 && (pos + len) >= i_size)) {
2313 zero_user_segments(page, 0, offset,
2314 offset + len,
2315 PAGE_CACHE_SIZE);
2317 * PageChecked means that the parts of the page
2318 * to which we're not writing are considered up
2319 * to date. Once the data is copied to the
2320 * page, it can be set uptodate.
2322 SetPageChecked(page);
2323 goto out;
2327 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2329 * might as well read a page, it is fast enough. If we get
2330 * an error, we don't need to return it. cifs_write_end will
2331 * do a sync write instead since PG_uptodate isn't set.
2333 cifs_readpage_worker(file, page, &page_start);
2334 } else {
2335 /* we could try using another file handle if there is one -
2336 but how would we lock it to prevent close of that handle
2337 racing with this read? In any case
2338 this will be written out by write_end so is fine */
2340 out:
2341 *pagep = page;
2342 return rc;
2345 static int cifs_release_page(struct page *page, gfp_t gfp)
2347 if (PagePrivate(page))
2348 return 0;
2350 return cifs_fscache_release_page(page, gfp);
2353 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2355 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2357 if (offset == 0)
2358 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2361 static int cifs_launder_page(struct page *page)
2363 int rc = 0;
2364 loff_t range_start = page_offset(page);
2365 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2366 struct writeback_control wbc = {
2367 .sync_mode = WB_SYNC_ALL,
2368 .nr_to_write = 0,
2369 .range_start = range_start,
2370 .range_end = range_end,
2373 cFYI(1, "Launder page: %p", page);
2375 if (clear_page_dirty_for_io(page))
2376 rc = cifs_writepage_locked(page, &wbc);
2378 cifs_fscache_invalidate_page(page, page->mapping->host);
2379 return rc;
2382 void cifs_oplock_break(struct work_struct *work)
2384 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2385 oplock_break);
2386 struct inode *inode = cfile->dentry->d_inode;
2387 struct cifsInodeInfo *cinode = CIFS_I(inode);
2388 int rc = 0;
2390 if (inode && S_ISREG(inode->i_mode)) {
2391 if (cinode->clientCanCacheRead)
2392 break_lease(inode, O_RDONLY);
2393 else
2394 break_lease(inode, O_WRONLY);
2395 rc = filemap_fdatawrite(inode->i_mapping);
2396 if (cinode->clientCanCacheRead == 0) {
2397 rc = filemap_fdatawait(inode->i_mapping);
2398 mapping_set_error(inode->i_mapping, rc);
2399 invalidate_remote_inode(inode);
2401 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2405 * releasing stale oplock after recent reconnect of smb session using
2406 * a now incorrect file handle is not a data integrity issue but do
2407 * not bother sending an oplock release if session to server still is
2408 * disconnected since oplock already released by the server
2410 if (!cfile->oplock_break_cancelled) {
2411 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2412 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2413 cinode->clientCanCacheRead ? 1 : 0);
2414 cFYI(1, "Oplock release rc = %d", rc);
2418 * We might have kicked in before is_valid_oplock_break()
2419 * finished grabbing reference for us. Make sure it's done by
2420 * waiting for cifs_file_list_lock.
2422 spin_lock(&cifs_file_list_lock);
2423 spin_unlock(&cifs_file_list_lock);
2425 cifs_oplock_break_put(cfile);
2428 /* must be called while holding cifs_file_list_lock */
2429 void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2431 cifs_sb_active(cfile->dentry->d_sb);
2432 cifsFileInfo_get(cfile);
2435 void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2437 struct super_block *sb = cfile->dentry->d_sb;
2439 cifsFileInfo_put(cfile);
2440 cifs_sb_deactive(sb);
2443 const struct address_space_operations cifs_addr_ops = {
2444 .readpage = cifs_readpage,
2445 .readpages = cifs_readpages,
2446 .writepage = cifs_writepage,
2447 .writepages = cifs_writepages,
2448 .write_begin = cifs_write_begin,
2449 .write_end = cifs_write_end,
2450 .set_page_dirty = __set_page_dirty_nobuffers,
2451 .releasepage = cifs_release_page,
2452 .invalidatepage = cifs_invalidate_page,
2453 .launder_page = cifs_launder_page,
2457 * cifs_readpages requires the server to support a buffer large enough to
2458 * contain the header plus one complete page of data. Otherwise, we need
2459 * to leave cifs_readpages out of the address space operations.
2461 const struct address_space_operations cifs_addr_ops_smallbuf = {
2462 .readpage = cifs_readpage,
2463 .writepage = cifs_writepage,
2464 .writepages = cifs_writepages,
2465 .write_begin = cifs_write_begin,
2466 .write_end = cifs_write_end,
2467 .set_page_dirty = __set_page_dirty_nobuffers,
2468 .releasepage = cifs_release_page,
2469 .invalidatepage = cifs_invalidate_page,
2470 .launder_page = cifs_launder_page,