Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / file.c
blob9b11a8f56f3ab1dbf7b994bc109917ac3fbc6a44
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2007
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
44 static inline int cifs_convert_flags(unsigned int flags)
46 if ((flags & O_ACCMODE) == O_RDONLY)
47 return GENERIC_READ;
48 else if ((flags & O_ACCMODE) == O_WRONLY)
49 return GENERIC_WRITE;
50 else if ((flags & O_ACCMODE) == O_RDWR) {
51 /* GENERIC_ALL is too much permission to request
52 can cause unnecessary access denied on create */
53 /* return GENERIC_ALL; */
54 return (GENERIC_READ | GENERIC_WRITE);
57 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
58 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
59 FILE_READ_DATA);
62 static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
64 fmode_t posix_flags = 0;
66 if ((flags & O_ACCMODE) == O_RDONLY)
67 posix_flags = FMODE_READ;
68 else if ((flags & O_ACCMODE) == O_WRONLY)
69 posix_flags = FMODE_WRITE;
70 else if ((flags & O_ACCMODE) == O_RDWR) {
71 /* GENERIC_ALL is too much permission to request
72 can cause unnecessary access denied on create */
73 /* return GENERIC_ALL; */
74 posix_flags = FMODE_READ | FMODE_WRITE;
76 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
77 reopening a file. They had their effect on the original open */
78 if (flags & O_APPEND)
79 posix_flags |= (fmode_t)O_APPEND;
80 if (flags & O_DSYNC)
81 posix_flags |= (fmode_t)O_DSYNC;
82 if (flags & __O_SYNC)
83 posix_flags |= (fmode_t)__O_SYNC;
84 if (flags & O_DIRECTORY)
85 posix_flags |= (fmode_t)O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= (fmode_t)O_NOFOLLOW;
88 if (flags & O_DIRECT)
89 posix_flags |= (fmode_t)O_DIRECT;
91 return posix_flags;
94 static inline int cifs_get_disposition(unsigned int flags)
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
104 else
105 return FILE_OPEN;
108 /* all arguments to this function must be checked for validity in caller */
109 static inline int
110 cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
111 struct cifsInodeInfo *pCifsInode,
112 struct cifsFileInfo *pCifsFile, __u32 oplock,
113 u16 netfid)
116 write_lock(&GlobalSMBSeslock);
118 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
119 if (pCifsInode == NULL) {
120 write_unlock(&GlobalSMBSeslock);
121 return -EINVAL;
124 if (pCifsInode->clientCanCacheRead) {
125 /* we have the inode open somewhere else
126 no need to discard cache data */
127 goto psx_client_can_cache;
130 /* BB FIXME need to fix this check to move it earlier into posix_open
131 BB fIX following section BB FIXME */
133 /* if not oplocked, invalidate inode pages if mtime or file
134 size changed */
135 /* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
136 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
137 (file->f_path.dentry->d_inode->i_size ==
138 (loff_t)le64_to_cpu(buf->EndOfFile))) {
139 cFYI(1, ("inode unchanged on server"));
140 } else {
141 if (file->f_path.dentry->d_inode->i_mapping) {
142 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
143 if (rc != 0)
144 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
146 cFYI(1, ("invalidating remote inode since open detected it "
147 "changed"));
148 invalidate_remote_inode(file->f_path.dentry->d_inode);
149 } */
151 psx_client_can_cache:
152 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
153 pCifsInode->clientCanCacheAll = true;
154 pCifsInode->clientCanCacheRead = true;
155 cFYI(1, ("Exclusive Oplock granted on inode %p",
156 file->f_path.dentry->d_inode));
157 } else if ((oplock & 0xF) == OPLOCK_READ)
158 pCifsInode->clientCanCacheRead = true;
160 /* will have to change the unlock if we reenable the
161 filemap_fdatawrite (which does not seem necessary */
162 write_unlock(&GlobalSMBSeslock);
163 return 0;
166 static struct cifsFileInfo *
167 cifs_fill_filedata(struct file *file)
169 struct list_head *tmp;
170 struct cifsFileInfo *pCifsFile = NULL;
171 struct cifsInodeInfo *pCifsInode = NULL;
173 /* search inode for this file and fill in file->private_data */
174 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
175 read_lock(&GlobalSMBSeslock);
176 list_for_each(tmp, &pCifsInode->openFileList) {
177 pCifsFile = list_entry(tmp, struct cifsFileInfo, flist);
178 if ((pCifsFile->pfile == NULL) &&
179 (pCifsFile->pid == current->tgid)) {
180 /* mode set in cifs_create */
182 /* needed for writepage */
183 pCifsFile->pfile = file;
184 file->private_data = pCifsFile;
185 break;
188 read_unlock(&GlobalSMBSeslock);
190 if (file->private_data != NULL) {
191 return pCifsFile;
192 } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
193 cERROR(1, ("could not find file instance for "
194 "new file %p", file));
195 return NULL;
198 /* all arguments to this function must be checked for validity in caller */
199 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
200 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
201 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
202 char *full_path, int xid)
204 struct timespec temp;
205 int rc;
207 if (pCifsInode->clientCanCacheRead) {
208 /* we have the inode open somewhere else
209 no need to discard cache data */
210 goto client_can_cache;
213 /* BB need same check in cifs_create too? */
214 /* if not oplocked, invalidate inode pages if mtime or file
215 size changed */
216 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
217 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
218 (file->f_path.dentry->d_inode->i_size ==
219 (loff_t)le64_to_cpu(buf->EndOfFile))) {
220 cFYI(1, ("inode unchanged on server"));
221 } else {
222 if (file->f_path.dentry->d_inode->i_mapping) {
223 /* BB no need to lock inode until after invalidate
224 since namei code should already have it locked? */
225 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
226 if (rc != 0)
227 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
229 cFYI(1, ("invalidating remote inode since open detected it "
230 "changed"));
231 invalidate_remote_inode(file->f_path.dentry->d_inode);
234 client_can_cache:
235 if (pTcon->unix_ext)
236 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
237 full_path, inode->i_sb, xid);
238 else
239 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
240 full_path, buf, inode->i_sb, xid, NULL);
242 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
243 pCifsInode->clientCanCacheAll = true;
244 pCifsInode->clientCanCacheRead = true;
245 cFYI(1, ("Exclusive Oplock granted on inode %p",
246 file->f_path.dentry->d_inode));
247 } else if ((*oplock & 0xF) == OPLOCK_READ)
248 pCifsInode->clientCanCacheRead = true;
250 return rc;
253 int cifs_open(struct inode *inode, struct file *file)
255 int rc = -EACCES;
256 int xid;
257 __u32 oplock;
258 struct cifs_sb_info *cifs_sb;
259 struct cifsTconInfo *tcon;
260 struct cifsFileInfo *pCifsFile;
261 struct cifsInodeInfo *pCifsInode;
262 char *full_path = NULL;
263 int desiredAccess;
264 int disposition;
265 __u16 netfid;
266 FILE_ALL_INFO *buf = NULL;
268 xid = GetXid();
270 cifs_sb = CIFS_SB(inode->i_sb);
271 tcon = cifs_sb->tcon;
273 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
274 pCifsFile = cifs_fill_filedata(file);
275 if (pCifsFile) {
276 rc = 0;
277 FreeXid(xid);
278 return rc;
281 full_path = build_path_from_dentry(file->f_path.dentry);
282 if (full_path == NULL) {
283 rc = -ENOMEM;
284 FreeXid(xid);
285 return rc;
288 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
289 inode, file->f_flags, full_path));
291 if (oplockEnabled)
292 oplock = REQ_OPLOCK;
293 else
294 oplock = 0;
296 if (!tcon->broken_posix_open && tcon->unix_ext &&
297 (tcon->ses->capabilities & CAP_UNIX) &&
298 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
299 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
300 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
301 /* can not refresh inode info since size could be stale */
302 rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
303 cifs_sb->mnt_file_mode /* ignored */,
304 oflags, &oplock, &netfid, xid);
305 if (rc == 0) {
306 cFYI(1, ("posix open succeeded"));
307 /* no need for special case handling of setting mode
308 on read only files needed here */
310 pCifsFile = cifs_fill_filedata(file);
311 cifs_posix_open_inode_helper(inode, file, pCifsInode,
312 pCifsFile, oplock, netfid);
313 goto out;
314 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
315 if (tcon->ses->serverNOS)
316 cERROR(1, ("server %s of type %s returned"
317 " unexpected error on SMB posix open"
318 ", disabling posix open support."
319 " Check if server update available.",
320 tcon->ses->serverName,
321 tcon->ses->serverNOS));
322 tcon->broken_posix_open = true;
323 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
324 (rc != -EOPNOTSUPP)) /* path not found or net err */
325 goto out;
326 /* else fallthrough to retry open the old way on network i/o
327 or DFS errors */
330 desiredAccess = cifs_convert_flags(file->f_flags);
332 /*********************************************************************
333 * open flag mapping table:
335 * POSIX Flag CIFS Disposition
336 * ---------- ----------------
337 * O_CREAT FILE_OPEN_IF
338 * O_CREAT | O_EXCL FILE_CREATE
339 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
340 * O_TRUNC FILE_OVERWRITE
341 * none of the above FILE_OPEN
343 * Note that there is not a direct match between disposition
344 * FILE_SUPERSEDE (ie create whether or not file exists although
345 * O_CREAT | O_TRUNC is similar but truncates the existing
346 * file rather than creating a new file as FILE_SUPERSEDE does
347 * (which uses the attributes / metadata passed in on open call)
349 *? O_SYNC is a reasonable match to CIFS writethrough flag
350 *? and the read write flags match reasonably. O_LARGEFILE
351 *? is irrelevant because largefile support is always used
352 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
353 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
354 *********************************************************************/
356 disposition = cifs_get_disposition(file->f_flags);
358 /* BB pass O_SYNC flag through on file attributes .. BB */
360 /* Also refresh inode by passing in file_info buf returned by SMBOpen
361 and calling get_inode_info with returned buf (at least helps
362 non-Unix server case) */
364 /* BB we can not do this if this is the second open of a file
365 and the first handle has writebehind data, we might be
366 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
367 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
368 if (!buf) {
369 rc = -ENOMEM;
370 goto out;
373 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
374 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
375 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
376 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
377 & CIFS_MOUNT_MAP_SPECIAL_CHR);
378 else
379 rc = -EIO; /* no NT SMB support fall into legacy open below */
381 if (rc == -EIO) {
382 /* Old server, try legacy style OpenX */
383 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
384 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
385 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
386 & CIFS_MOUNT_MAP_SPECIAL_CHR);
388 if (rc) {
389 cFYI(1, ("cifs_open returned 0x%x", rc));
390 goto out;
393 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
394 file->f_flags);
395 file->private_data = pCifsFile;
396 if (file->private_data == NULL) {
397 rc = -ENOMEM;
398 goto out;
401 rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
402 &oplock, buf, full_path, xid);
404 if (oplock & CIFS_CREATE_ACTION) {
405 /* time to set mode which we can not set earlier due to
406 problems creating new read-only files */
407 if (tcon->unix_ext) {
408 struct cifs_unix_set_info_args args = {
409 .mode = inode->i_mode,
410 .uid = NO_CHANGE_64,
411 .gid = NO_CHANGE_64,
412 .ctime = NO_CHANGE_64,
413 .atime = NO_CHANGE_64,
414 .mtime = NO_CHANGE_64,
415 .device = 0,
417 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
418 cifs_sb->local_nls,
419 cifs_sb->mnt_cifs_flags &
420 CIFS_MOUNT_MAP_SPECIAL_CHR);
424 out:
425 kfree(buf);
426 kfree(full_path);
427 FreeXid(xid);
428 return rc;
431 /* Try to reacquire byte range locks that were released when session */
432 /* to server was lost */
433 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
435 int rc = 0;
437 /* BB list all locks open on this file and relock */
439 return rc;
442 static int cifs_reopen_file(struct file *file, bool can_flush)
444 int rc = -EACCES;
445 int xid;
446 __u32 oplock;
447 struct cifs_sb_info *cifs_sb;
448 struct cifsTconInfo *tcon;
449 struct cifsFileInfo *pCifsFile;
450 struct cifsInodeInfo *pCifsInode;
451 struct inode *inode;
452 char *full_path = NULL;
453 int desiredAccess;
454 int disposition = FILE_OPEN;
455 __u16 netfid;
457 if (file->private_data)
458 pCifsFile = (struct cifsFileInfo *)file->private_data;
459 else
460 return -EBADF;
462 xid = GetXid();
463 mutex_lock(&pCifsFile->fh_mutex);
464 if (!pCifsFile->invalidHandle) {
465 mutex_unlock(&pCifsFile->fh_mutex);
466 rc = 0;
467 FreeXid(xid);
468 return rc;
471 if (file->f_path.dentry == NULL) {
472 cERROR(1, ("no valid name if dentry freed"));
473 dump_stack();
474 rc = -EBADF;
475 goto reopen_error_exit;
478 inode = file->f_path.dentry->d_inode;
479 if (inode == NULL) {
480 cERROR(1, ("inode not valid"));
481 dump_stack();
482 rc = -EBADF;
483 goto reopen_error_exit;
486 cifs_sb = CIFS_SB(inode->i_sb);
487 tcon = cifs_sb->tcon;
489 /* can not grab rename sem here because various ops, including
490 those that already have the rename sem can end up causing writepage
491 to get called and if the server was down that means we end up here,
492 and we can never tell if the caller already has the rename_sem */
493 full_path = build_path_from_dentry(file->f_path.dentry);
494 if (full_path == NULL) {
495 rc = -ENOMEM;
496 reopen_error_exit:
497 mutex_unlock(&pCifsFile->fh_mutex);
498 FreeXid(xid);
499 return rc;
502 cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
503 inode, file->f_flags, full_path));
505 if (oplockEnabled)
506 oplock = REQ_OPLOCK;
507 else
508 oplock = 0;
510 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
511 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
512 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
513 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
514 /* can not refresh inode info since size could be stale */
515 rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
516 cifs_sb->mnt_file_mode /* ignored */,
517 oflags, &oplock, &netfid, xid);
518 if (rc == 0) {
519 cFYI(1, ("posix reopen succeeded"));
520 goto reopen_success;
522 /* fallthrough to retry open the old way on errors, especially
523 in the reconnect path it is important to retry hard */
526 desiredAccess = cifs_convert_flags(file->f_flags);
528 /* Can not refresh inode by passing in file_info buf to be returned
529 by SMBOpen and then calling get_inode_info with returned buf
530 since file might have write behind data that needs to be flushed
531 and server version of file size can be stale. If we knew for sure
532 that inode was not dirty locally we could do this */
534 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
535 CREATE_NOT_DIR, &netfid, &oplock, NULL,
536 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
537 CIFS_MOUNT_MAP_SPECIAL_CHR);
538 if (rc) {
539 mutex_unlock(&pCifsFile->fh_mutex);
540 cFYI(1, ("cifs_open returned 0x%x", rc));
541 cFYI(1, ("oplock: %d", oplock));
542 } else {
543 reopen_success:
544 pCifsFile->netfid = netfid;
545 pCifsFile->invalidHandle = false;
546 mutex_unlock(&pCifsFile->fh_mutex);
547 pCifsInode = CIFS_I(inode);
548 if (pCifsInode) {
549 if (can_flush) {
550 rc = filemap_write_and_wait(inode->i_mapping);
551 if (rc != 0)
552 CIFS_I(inode)->write_behind_rc = rc;
553 /* temporarily disable caching while we
554 go to server to get inode info */
555 pCifsInode->clientCanCacheAll = false;
556 pCifsInode->clientCanCacheRead = false;
557 if (tcon->unix_ext)
558 rc = cifs_get_inode_info_unix(&inode,
559 full_path, inode->i_sb, xid);
560 else
561 rc = cifs_get_inode_info(&inode,
562 full_path, NULL, inode->i_sb,
563 xid, NULL);
564 } /* else we are writing out data to server already
565 and could deadlock if we tried to flush data, and
566 since we do not know if we have data that would
567 invalidate the current end of file on the server
568 we can not go to the server to get the new inod
569 info */
570 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
571 pCifsInode->clientCanCacheAll = true;
572 pCifsInode->clientCanCacheRead = true;
573 cFYI(1, ("Exclusive Oplock granted on inode %p",
574 file->f_path.dentry->d_inode));
575 } else if ((oplock & 0xF) == OPLOCK_READ) {
576 pCifsInode->clientCanCacheRead = true;
577 pCifsInode->clientCanCacheAll = false;
578 } else {
579 pCifsInode->clientCanCacheRead = false;
580 pCifsInode->clientCanCacheAll = false;
582 cifs_relock_file(pCifsFile);
585 kfree(full_path);
586 FreeXid(xid);
587 return rc;
590 int cifs_close(struct inode *inode, struct file *file)
592 int rc = 0;
593 int xid, timeout;
594 struct cifs_sb_info *cifs_sb;
595 struct cifsTconInfo *pTcon;
596 struct cifsFileInfo *pSMBFile =
597 (struct cifsFileInfo *)file->private_data;
599 xid = GetXid();
601 cifs_sb = CIFS_SB(inode->i_sb);
602 pTcon = cifs_sb->tcon;
603 if (pSMBFile) {
604 struct cifsLockInfo *li, *tmp;
605 write_lock(&GlobalSMBSeslock);
606 pSMBFile->closePend = true;
607 if (pTcon) {
608 /* no sense reconnecting to close a file that is
609 already closed */
610 if (!pTcon->need_reconnect) {
611 write_unlock(&GlobalSMBSeslock);
612 timeout = 2;
613 while ((atomic_read(&pSMBFile->count) != 1)
614 && (timeout <= 2048)) {
615 /* Give write a better chance to get to
616 server ahead of the close. We do not
617 want to add a wait_q here as it would
618 increase the memory utilization as
619 the struct would be in each open file,
620 but this should give enough time to
621 clear the socket */
622 cFYI(DBG2,
623 ("close delay, write pending"));
624 msleep(timeout);
625 timeout *= 4;
627 if (!pTcon->need_reconnect &&
628 !pSMBFile->invalidHandle)
629 rc = CIFSSMBClose(xid, pTcon,
630 pSMBFile->netfid);
631 } else
632 write_unlock(&GlobalSMBSeslock);
633 } else
634 write_unlock(&GlobalSMBSeslock);
636 /* Delete any outstanding lock records.
637 We'll lose them when the file is closed anyway. */
638 mutex_lock(&pSMBFile->lock_mutex);
639 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
640 list_del(&li->llist);
641 kfree(li);
643 mutex_unlock(&pSMBFile->lock_mutex);
645 write_lock(&GlobalSMBSeslock);
646 list_del(&pSMBFile->flist);
647 list_del(&pSMBFile->tlist);
648 write_unlock(&GlobalSMBSeslock);
649 cifsFileInfo_put(file->private_data);
650 file->private_data = NULL;
651 } else
652 rc = -EBADF;
654 read_lock(&GlobalSMBSeslock);
655 if (list_empty(&(CIFS_I(inode)->openFileList))) {
656 cFYI(1, ("closing last open instance for inode %p", inode));
657 /* if the file is not open we do not know if we can cache info
658 on this inode, much less write behind and read ahead */
659 CIFS_I(inode)->clientCanCacheRead = false;
660 CIFS_I(inode)->clientCanCacheAll = false;
662 read_unlock(&GlobalSMBSeslock);
663 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
664 rc = CIFS_I(inode)->write_behind_rc;
665 FreeXid(xid);
666 return rc;
669 int cifs_closedir(struct inode *inode, struct file *file)
671 int rc = 0;
672 int xid;
673 struct cifsFileInfo *pCFileStruct =
674 (struct cifsFileInfo *)file->private_data;
675 char *ptmp;
677 cFYI(1, ("Closedir inode = 0x%p", inode));
679 xid = GetXid();
681 if (pCFileStruct) {
682 struct cifsTconInfo *pTcon;
683 struct cifs_sb_info *cifs_sb =
684 CIFS_SB(file->f_path.dentry->d_sb);
686 pTcon = cifs_sb->tcon;
688 cFYI(1, ("Freeing private data in close dir"));
689 write_lock(&GlobalSMBSeslock);
690 if (!pCFileStruct->srch_inf.endOfSearch &&
691 !pCFileStruct->invalidHandle) {
692 pCFileStruct->invalidHandle = true;
693 write_unlock(&GlobalSMBSeslock);
694 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
695 cFYI(1, ("Closing uncompleted readdir with rc %d",
696 rc));
697 /* not much we can do if it fails anyway, ignore rc */
698 rc = 0;
699 } else
700 write_unlock(&GlobalSMBSeslock);
701 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
702 if (ptmp) {
703 cFYI(1, ("closedir free smb buf in srch struct"));
704 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
705 if (pCFileStruct->srch_inf.smallBuf)
706 cifs_small_buf_release(ptmp);
707 else
708 cifs_buf_release(ptmp);
710 kfree(file->private_data);
711 file->private_data = NULL;
713 /* BB can we lock the filestruct while this is going on? */
714 FreeXid(xid);
715 return rc;
718 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
719 __u64 offset, __u8 lockType)
721 struct cifsLockInfo *li =
722 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
723 if (li == NULL)
724 return -ENOMEM;
725 li->offset = offset;
726 li->length = len;
727 li->type = lockType;
728 mutex_lock(&fid->lock_mutex);
729 list_add(&li->llist, &fid->llist);
730 mutex_unlock(&fid->lock_mutex);
731 return 0;
734 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
736 int rc, xid;
737 __u32 numLock = 0;
738 __u32 numUnlock = 0;
739 __u64 length;
740 bool wait_flag = false;
741 struct cifs_sb_info *cifs_sb;
742 struct cifsTconInfo *tcon;
743 __u16 netfid;
744 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
745 bool posix_locking = 0;
747 length = 1 + pfLock->fl_end - pfLock->fl_start;
748 rc = -EACCES;
749 xid = GetXid();
751 cFYI(1, ("Lock parm: 0x%x flockflags: "
752 "0x%x flocktype: 0x%x start: %lld end: %lld",
753 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
754 pfLock->fl_end));
756 if (pfLock->fl_flags & FL_POSIX)
757 cFYI(1, ("Posix"));
758 if (pfLock->fl_flags & FL_FLOCK)
759 cFYI(1, ("Flock"));
760 if (pfLock->fl_flags & FL_SLEEP) {
761 cFYI(1, ("Blocking lock"));
762 wait_flag = true;
764 if (pfLock->fl_flags & FL_ACCESS)
765 cFYI(1, ("Process suspended by mandatory locking - "
766 "not implemented yet"));
767 if (pfLock->fl_flags & FL_LEASE)
768 cFYI(1, ("Lease on file - not implemented yet"));
769 if (pfLock->fl_flags &
770 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
771 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
773 if (pfLock->fl_type == F_WRLCK) {
774 cFYI(1, ("F_WRLCK "));
775 numLock = 1;
776 } else if (pfLock->fl_type == F_UNLCK) {
777 cFYI(1, ("F_UNLCK"));
778 numUnlock = 1;
779 /* Check if unlock includes more than
780 one lock range */
781 } else if (pfLock->fl_type == F_RDLCK) {
782 cFYI(1, ("F_RDLCK"));
783 lockType |= LOCKING_ANDX_SHARED_LOCK;
784 numLock = 1;
785 } else if (pfLock->fl_type == F_EXLCK) {
786 cFYI(1, ("F_EXLCK"));
787 numLock = 1;
788 } else if (pfLock->fl_type == F_SHLCK) {
789 cFYI(1, ("F_SHLCK"));
790 lockType |= LOCKING_ANDX_SHARED_LOCK;
791 numLock = 1;
792 } else
793 cFYI(1, ("Unknown type of lock"));
795 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
796 tcon = cifs_sb->tcon;
798 if (file->private_data == NULL) {
799 rc = -EBADF;
800 FreeXid(xid);
801 return rc;
803 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
805 if ((tcon->ses->capabilities & CAP_UNIX) &&
806 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
807 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
808 posix_locking = 1;
809 /* BB add code here to normalize offset and length to
810 account for negative length which we can not accept over the
811 wire */
812 if (IS_GETLK(cmd)) {
813 if (posix_locking) {
814 int posix_lock_type;
815 if (lockType & LOCKING_ANDX_SHARED_LOCK)
816 posix_lock_type = CIFS_RDLCK;
817 else
818 posix_lock_type = CIFS_WRLCK;
819 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
820 length, pfLock,
821 posix_lock_type, wait_flag);
822 FreeXid(xid);
823 return rc;
826 /* BB we could chain these into one lock request BB */
827 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
828 0, 1, lockType, 0 /* wait flag */ );
829 if (rc == 0) {
830 rc = CIFSSMBLock(xid, tcon, netfid, length,
831 pfLock->fl_start, 1 /* numUnlock */ ,
832 0 /* numLock */ , lockType,
833 0 /* wait flag */ );
834 pfLock->fl_type = F_UNLCK;
835 if (rc != 0)
836 cERROR(1, ("Error unlocking previously locked "
837 "range %d during test of lock", rc));
838 rc = 0;
840 } else {
841 /* if rc == ERR_SHARING_VIOLATION ? */
842 rc = 0;
844 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
845 pfLock->fl_type = F_WRLCK;
846 } else {
847 rc = CIFSSMBLock(xid, tcon, netfid, length,
848 pfLock->fl_start, 0, 1,
849 lockType | LOCKING_ANDX_SHARED_LOCK,
850 0 /* wait flag */);
851 if (rc == 0) {
852 rc = CIFSSMBLock(xid, tcon, netfid,
853 length, pfLock->fl_start, 1, 0,
854 lockType |
855 LOCKING_ANDX_SHARED_LOCK,
856 0 /* wait flag */);
857 pfLock->fl_type = F_RDLCK;
858 if (rc != 0)
859 cERROR(1, ("Error unlocking "
860 "previously locked range %d "
861 "during test of lock", rc));
862 rc = 0;
863 } else {
864 pfLock->fl_type = F_WRLCK;
865 rc = 0;
870 FreeXid(xid);
871 return rc;
874 if (!numLock && !numUnlock) {
875 /* if no lock or unlock then nothing
876 to do since we do not know what it is */
877 FreeXid(xid);
878 return -EOPNOTSUPP;
881 if (posix_locking) {
882 int posix_lock_type;
883 if (lockType & LOCKING_ANDX_SHARED_LOCK)
884 posix_lock_type = CIFS_RDLCK;
885 else
886 posix_lock_type = CIFS_WRLCK;
888 if (numUnlock == 1)
889 posix_lock_type = CIFS_UNLCK;
891 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
892 length, pfLock,
893 posix_lock_type, wait_flag);
894 } else {
895 struct cifsFileInfo *fid =
896 (struct cifsFileInfo *)file->private_data;
898 if (numLock) {
899 rc = CIFSSMBLock(xid, tcon, netfid, length,
900 pfLock->fl_start,
901 0, numLock, lockType, wait_flag);
903 if (rc == 0) {
904 /* For Windows locks we must store them. */
905 rc = store_file_lock(fid, length,
906 pfLock->fl_start, lockType);
908 } else if (numUnlock) {
909 /* For each stored lock that this unlock overlaps
910 completely, unlock it. */
911 int stored_rc = 0;
912 struct cifsLockInfo *li, *tmp;
914 rc = 0;
915 mutex_lock(&fid->lock_mutex);
916 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
917 if (pfLock->fl_start <= li->offset &&
918 (pfLock->fl_start + length) >=
919 (li->offset + li->length)) {
920 stored_rc = CIFSSMBLock(xid, tcon,
921 netfid,
922 li->length, li->offset,
923 1, 0, li->type, false);
924 if (stored_rc)
925 rc = stored_rc;
927 list_del(&li->llist);
928 kfree(li);
931 mutex_unlock(&fid->lock_mutex);
935 if (pfLock->fl_flags & FL_POSIX)
936 posix_lock_file_wait(file, pfLock);
937 FreeXid(xid);
938 return rc;
942 * Set the timeout on write requests past EOF. For some servers (Windows)
943 * these calls can be very long.
945 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
946 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
947 * The 10M cutoff is totally arbitrary. A better scheme for this would be
948 * welcome if someone wants to suggest one.
950 * We may be able to do a better job with this if there were some way to
951 * declare that a file should be sparse.
953 static int
954 cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
956 if (offset <= cifsi->server_eof)
957 return CIFS_STD_OP;
958 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
959 return CIFS_VLONG_OP;
960 else
961 return CIFS_LONG_OP;
964 /* update the file size (if needed) after a write */
965 static void
966 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
967 unsigned int bytes_written)
969 loff_t end_of_write = offset + bytes_written;
971 if (end_of_write > cifsi->server_eof)
972 cifsi->server_eof = end_of_write;
975 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
976 size_t write_size, loff_t *poffset)
978 int rc = 0;
979 unsigned int bytes_written = 0;
980 unsigned int total_written;
981 struct cifs_sb_info *cifs_sb;
982 struct cifsTconInfo *pTcon;
983 int xid, long_op;
984 struct cifsFileInfo *open_file;
985 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
987 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
989 pTcon = cifs_sb->tcon;
991 /* cFYI(1,
992 (" write %d bytes to offset %lld of %s", write_size,
993 *poffset, file->f_path.dentry->d_name.name)); */
995 if (file->private_data == NULL)
996 return -EBADF;
997 open_file = (struct cifsFileInfo *) file->private_data;
999 rc = generic_write_checks(file, poffset, &write_size, 0);
1000 if (rc)
1001 return rc;
1003 xid = GetXid();
1005 long_op = cifs_write_timeout(cifsi, *poffset);
1006 for (total_written = 0; write_size > total_written;
1007 total_written += bytes_written) {
1008 rc = -EAGAIN;
1009 while (rc == -EAGAIN) {
1010 if (file->private_data == NULL) {
1011 /* file has been closed on us */
1012 FreeXid(xid);
1013 /* if we have gotten here we have written some data
1014 and blocked, and the file has been freed on us while
1015 we blocked so return what we managed to write */
1016 return total_written;
1018 if (open_file->closePend) {
1019 FreeXid(xid);
1020 if (total_written)
1021 return total_written;
1022 else
1023 return -EBADF;
1025 if (open_file->invalidHandle) {
1026 /* we could deadlock if we called
1027 filemap_fdatawait from here so tell
1028 reopen_file not to flush data to server
1029 now */
1030 rc = cifs_reopen_file(file, false);
1031 if (rc != 0)
1032 break;
1035 rc = CIFSSMBWrite(xid, pTcon,
1036 open_file->netfid,
1037 min_t(const int, cifs_sb->wsize,
1038 write_size - total_written),
1039 *poffset, &bytes_written,
1040 NULL, write_data + total_written, long_op);
1042 if (rc || (bytes_written == 0)) {
1043 if (total_written)
1044 break;
1045 else {
1046 FreeXid(xid);
1047 return rc;
1049 } else {
1050 cifs_update_eof(cifsi, *poffset, bytes_written);
1051 *poffset += bytes_written;
1053 long_op = CIFS_STD_OP; /* subsequent writes fast -
1054 15 seconds is plenty */
1057 cifs_stats_bytes_written(pTcon, total_written);
1059 /* since the write may have blocked check these pointers again */
1060 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1061 struct inode *inode = file->f_path.dentry->d_inode;
1062 /* Do not update local mtime - server will set its actual value on write
1063 * inode->i_ctime = inode->i_mtime =
1064 * current_fs_time(inode->i_sb);*/
1065 if (total_written > 0) {
1066 spin_lock(&inode->i_lock);
1067 if (*poffset > file->f_path.dentry->d_inode->i_size)
1068 i_size_write(file->f_path.dentry->d_inode,
1069 *poffset);
1070 spin_unlock(&inode->i_lock);
1072 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1074 FreeXid(xid);
1075 return total_written;
1078 static ssize_t cifs_write(struct file *file, const char *write_data,
1079 size_t write_size, loff_t *poffset)
1081 int rc = 0;
1082 unsigned int bytes_written = 0;
1083 unsigned int total_written;
1084 struct cifs_sb_info *cifs_sb;
1085 struct cifsTconInfo *pTcon;
1086 int xid, long_op;
1087 struct cifsFileInfo *open_file;
1088 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1090 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1092 pTcon = cifs_sb->tcon;
1094 cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
1095 *poffset, file->f_path.dentry->d_name.name));
1097 if (file->private_data == NULL)
1098 return -EBADF;
1099 open_file = (struct cifsFileInfo *)file->private_data;
1101 xid = GetXid();
1103 long_op = cifs_write_timeout(cifsi, *poffset);
1104 for (total_written = 0; write_size > total_written;
1105 total_written += bytes_written) {
1106 rc = -EAGAIN;
1107 while (rc == -EAGAIN) {
1108 if (file->private_data == NULL) {
1109 /* file has been closed on us */
1110 FreeXid(xid);
1111 /* if we have gotten here we have written some data
1112 and blocked, and the file has been freed on us
1113 while we blocked so return what we managed to
1114 write */
1115 return total_written;
1117 if (open_file->closePend) {
1118 FreeXid(xid);
1119 if (total_written)
1120 return total_written;
1121 else
1122 return -EBADF;
1124 if (open_file->invalidHandle) {
1125 /* we could deadlock if we called
1126 filemap_fdatawait from here so tell
1127 reopen_file not to flush data to
1128 server now */
1129 rc = cifs_reopen_file(file, false);
1130 if (rc != 0)
1131 break;
1133 if (experimEnabled || (pTcon->ses->server &&
1134 ((pTcon->ses->server->secMode &
1135 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1136 == 0))) {
1137 struct kvec iov[2];
1138 unsigned int len;
1140 len = min((size_t)cifs_sb->wsize,
1141 write_size - total_written);
1142 /* iov[0] is reserved for smb header */
1143 iov[1].iov_base = (char *)write_data +
1144 total_written;
1145 iov[1].iov_len = len;
1146 rc = CIFSSMBWrite2(xid, pTcon,
1147 open_file->netfid, len,
1148 *poffset, &bytes_written,
1149 iov, 1, long_op);
1150 } else
1151 rc = CIFSSMBWrite(xid, pTcon,
1152 open_file->netfid,
1153 min_t(const int, cifs_sb->wsize,
1154 write_size - total_written),
1155 *poffset, &bytes_written,
1156 write_data + total_written,
1157 NULL, long_op);
1159 if (rc || (bytes_written == 0)) {
1160 if (total_written)
1161 break;
1162 else {
1163 FreeXid(xid);
1164 return rc;
1166 } else {
1167 cifs_update_eof(cifsi, *poffset, bytes_written);
1168 *poffset += bytes_written;
1170 long_op = CIFS_STD_OP; /* subsequent writes fast -
1171 15 seconds is plenty */
1174 cifs_stats_bytes_written(pTcon, total_written);
1176 /* since the write may have blocked check these pointers again */
1177 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1178 /*BB We could make this contingent on superblock ATIME flag too */
1179 /* file->f_path.dentry->d_inode->i_ctime =
1180 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1181 if (total_written > 0) {
1182 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1183 if (*poffset > file->f_path.dentry->d_inode->i_size)
1184 i_size_write(file->f_path.dentry->d_inode,
1185 *poffset);
1186 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1188 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1190 FreeXid(xid);
1191 return total_written;
1194 #ifdef CONFIG_CIFS_EXPERIMENTAL
1195 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1197 struct cifsFileInfo *open_file = NULL;
1199 read_lock(&GlobalSMBSeslock);
1200 /* we could simply get the first_list_entry since write-only entries
1201 are always at the end of the list but since the first entry might
1202 have a close pending, we go through the whole list */
1203 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1204 if (open_file->closePend)
1205 continue;
1206 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1207 (open_file->pfile->f_flags & O_RDONLY))) {
1208 if (!open_file->invalidHandle) {
1209 /* found a good file */
1210 /* lock it so it will not be closed on us */
1211 cifsFileInfo_get(open_file);
1212 read_unlock(&GlobalSMBSeslock);
1213 return open_file;
1214 } /* else might as well continue, and look for
1215 another, or simply have the caller reopen it
1216 again rather than trying to fix this handle */
1217 } else /* write only file */
1218 break; /* write only files are last so must be done */
1220 read_unlock(&GlobalSMBSeslock);
1221 return NULL;
1223 #endif
1225 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1227 struct cifsFileInfo *open_file;
1228 bool any_available = false;
1229 int rc;
1231 /* Having a null inode here (because mapping->host was set to zero by
1232 the VFS or MM) should not happen but we had reports of on oops (due to
1233 it being zero) during stress testcases so we need to check for it */
1235 if (cifs_inode == NULL) {
1236 cERROR(1, ("Null inode passed to cifs_writeable_file"));
1237 dump_stack();
1238 return NULL;
1241 read_lock(&GlobalSMBSeslock);
1242 refind_writable:
1243 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1244 if (open_file->closePend ||
1245 (!any_available && open_file->pid != current->tgid))
1246 continue;
1248 if (open_file->pfile &&
1249 ((open_file->pfile->f_flags & O_RDWR) ||
1250 (open_file->pfile->f_flags & O_WRONLY))) {
1251 cifsFileInfo_get(open_file);
1253 if (!open_file->invalidHandle) {
1254 /* found a good writable file */
1255 read_unlock(&GlobalSMBSeslock);
1256 return open_file;
1259 read_unlock(&GlobalSMBSeslock);
1260 /* Had to unlock since following call can block */
1261 rc = cifs_reopen_file(open_file->pfile, false);
1262 if (!rc) {
1263 if (!open_file->closePend)
1264 return open_file;
1265 else { /* start over in case this was deleted */
1266 /* since the list could be modified */
1267 read_lock(&GlobalSMBSeslock);
1268 cifsFileInfo_put(open_file);
1269 goto refind_writable;
1273 /* if it fails, try another handle if possible -
1274 (we can not do this if closePending since
1275 loop could be modified - in which case we
1276 have to start at the beginning of the list
1277 again. Note that it would be bad
1278 to hold up writepages here (rather than
1279 in caller) with continuous retries */
1280 cFYI(1, ("wp failed on reopen file"));
1281 read_lock(&GlobalSMBSeslock);
1282 /* can not use this handle, no write
1283 pending on this one after all */
1284 cifsFileInfo_put(open_file);
1286 if (open_file->closePend) /* list could have changed */
1287 goto refind_writable;
1288 /* else we simply continue to the next entry. Thus
1289 we do not loop on reopen errors. If we
1290 can not reopen the file, for example if we
1291 reconnected to a server with another client
1292 racing to delete or lock the file we would not
1293 make progress if we restarted before the beginning
1294 of the loop here. */
1297 /* couldn't find useable FH with same pid, try any available */
1298 if (!any_available) {
1299 any_available = true;
1300 goto refind_writable;
1302 read_unlock(&GlobalSMBSeslock);
1303 return NULL;
1306 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1308 struct address_space *mapping = page->mapping;
1309 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1310 char *write_data;
1311 int rc = -EFAULT;
1312 int bytes_written = 0;
1313 struct cifs_sb_info *cifs_sb;
1314 struct cifsTconInfo *pTcon;
1315 struct inode *inode;
1316 struct cifsFileInfo *open_file;
1318 if (!mapping || !mapping->host)
1319 return -EFAULT;
1321 inode = page->mapping->host;
1322 cifs_sb = CIFS_SB(inode->i_sb);
1323 pTcon = cifs_sb->tcon;
1325 offset += (loff_t)from;
1326 write_data = kmap(page);
1327 write_data += from;
1329 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1330 kunmap(page);
1331 return -EIO;
1334 /* racing with truncate? */
1335 if (offset > mapping->host->i_size) {
1336 kunmap(page);
1337 return 0; /* don't care */
1340 /* check to make sure that we are not extending the file */
1341 if (mapping->host->i_size - offset < (loff_t)to)
1342 to = (unsigned)(mapping->host->i_size - offset);
1344 open_file = find_writable_file(CIFS_I(mapping->host));
1345 if (open_file) {
1346 bytes_written = cifs_write(open_file->pfile, write_data,
1347 to-from, &offset);
1348 cifsFileInfo_put(open_file);
1349 /* Does mm or vfs already set times? */
1350 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1351 if ((bytes_written > 0) && (offset))
1352 rc = 0;
1353 else if (bytes_written < 0)
1354 rc = bytes_written;
1355 } else {
1356 cFYI(1, ("No writeable filehandles for inode"));
1357 rc = -EIO;
1360 kunmap(page);
1361 return rc;
1364 static int cifs_writepages(struct address_space *mapping,
1365 struct writeback_control *wbc)
1367 struct backing_dev_info *bdi = mapping->backing_dev_info;
1368 unsigned int bytes_to_write;
1369 unsigned int bytes_written;
1370 struct cifs_sb_info *cifs_sb;
1371 int done = 0;
1372 pgoff_t end;
1373 pgoff_t index;
1374 int range_whole = 0;
1375 struct kvec *iov;
1376 int len;
1377 int n_iov = 0;
1378 pgoff_t next;
1379 int nr_pages;
1380 __u64 offset = 0;
1381 struct cifsFileInfo *open_file;
1382 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1383 struct page *page;
1384 struct pagevec pvec;
1385 int rc = 0;
1386 int scanned = 0;
1387 int xid, long_op;
1389 cifs_sb = CIFS_SB(mapping->host->i_sb);
1392 * If wsize is smaller that the page cache size, default to writing
1393 * one page at a time via cifs_writepage
1395 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1396 return generic_writepages(mapping, wbc);
1398 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1399 if (cifs_sb->tcon->ses->server->secMode &
1400 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1401 if (!experimEnabled)
1402 return generic_writepages(mapping, wbc);
1404 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1405 if (iov == NULL)
1406 return generic_writepages(mapping, wbc);
1410 * BB: Is this meaningful for a non-block-device file system?
1411 * If it is, we should test it again after we do I/O
1413 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1414 wbc->encountered_congestion = 1;
1415 kfree(iov);
1416 return 0;
1419 xid = GetXid();
1421 pagevec_init(&pvec, 0);
1422 if (wbc->range_cyclic) {
1423 index = mapping->writeback_index; /* Start from prev offset */
1424 end = -1;
1425 } else {
1426 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1427 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1428 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1429 range_whole = 1;
1430 scanned = 1;
1432 retry:
1433 while (!done && (index <= end) &&
1434 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1435 PAGECACHE_TAG_DIRTY,
1436 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1437 int first;
1438 unsigned int i;
1440 first = -1;
1441 next = 0;
1442 n_iov = 0;
1443 bytes_to_write = 0;
1445 for (i = 0; i < nr_pages; i++) {
1446 page = pvec.pages[i];
1448 * At this point we hold neither mapping->tree_lock nor
1449 * lock on the page itself: the page may be truncated or
1450 * invalidated (changing page->mapping to NULL), or even
1451 * swizzled back from swapper_space to tmpfs file
1452 * mapping
1455 if (first < 0)
1456 lock_page(page);
1457 else if (!trylock_page(page))
1458 break;
1460 if (unlikely(page->mapping != mapping)) {
1461 unlock_page(page);
1462 break;
1465 if (!wbc->range_cyclic && page->index > end) {
1466 done = 1;
1467 unlock_page(page);
1468 break;
1471 if (next && (page->index != next)) {
1472 /* Not next consecutive page */
1473 unlock_page(page);
1474 break;
1477 if (wbc->sync_mode != WB_SYNC_NONE)
1478 wait_on_page_writeback(page);
1480 if (PageWriteback(page) ||
1481 !clear_page_dirty_for_io(page)) {
1482 unlock_page(page);
1483 break;
1487 * This actually clears the dirty bit in the radix tree.
1488 * See cifs_writepage() for more commentary.
1490 set_page_writeback(page);
1492 if (page_offset(page) >= mapping->host->i_size) {
1493 done = 1;
1494 unlock_page(page);
1495 end_page_writeback(page);
1496 break;
1500 * BB can we get rid of this? pages are held by pvec
1502 page_cache_get(page);
1504 len = min(mapping->host->i_size - page_offset(page),
1505 (loff_t)PAGE_CACHE_SIZE);
1507 /* reserve iov[0] for the smb header */
1508 n_iov++;
1509 iov[n_iov].iov_base = kmap(page);
1510 iov[n_iov].iov_len = len;
1511 bytes_to_write += len;
1513 if (first < 0) {
1514 first = i;
1515 offset = page_offset(page);
1517 next = page->index + 1;
1518 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1519 break;
1521 if (n_iov) {
1522 /* Search for a writable handle every time we call
1523 * CIFSSMBWrite2. We can't rely on the last handle
1524 * we used to still be valid
1526 open_file = find_writable_file(CIFS_I(mapping->host));
1527 if (!open_file) {
1528 cERROR(1, ("No writable handles for inode"));
1529 rc = -EBADF;
1530 } else {
1531 long_op = cifs_write_timeout(cifsi, offset);
1532 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1533 open_file->netfid,
1534 bytes_to_write, offset,
1535 &bytes_written, iov, n_iov,
1536 long_op);
1537 cifsFileInfo_put(open_file);
1538 cifs_update_eof(cifsi, offset, bytes_written);
1540 if (rc || bytes_written < bytes_to_write) {
1541 cERROR(1, ("Write2 ret %d, wrote %d",
1542 rc, bytes_written));
1543 /* BB what if continued retry is
1544 requested via mount flags? */
1545 if (rc == -ENOSPC)
1546 set_bit(AS_ENOSPC, &mapping->flags);
1547 else
1548 set_bit(AS_EIO, &mapping->flags);
1549 } else {
1550 cifs_stats_bytes_written(cifs_sb->tcon,
1551 bytes_written);
1554 for (i = 0; i < n_iov; i++) {
1555 page = pvec.pages[first + i];
1556 /* Should we also set page error on
1557 success rc but too little data written? */
1558 /* BB investigate retry logic on temporary
1559 server crash cases and how recovery works
1560 when page marked as error */
1561 if (rc)
1562 SetPageError(page);
1563 kunmap(page);
1564 unlock_page(page);
1565 end_page_writeback(page);
1566 page_cache_release(page);
1568 if ((wbc->nr_to_write -= n_iov) <= 0)
1569 done = 1;
1570 index = next;
1571 } else
1572 /* Need to re-find the pages we skipped */
1573 index = pvec.pages[0]->index + 1;
1575 pagevec_release(&pvec);
1577 if (!scanned && !done) {
1579 * We hit the last page and there is more work to be done: wrap
1580 * back to the start of the file
1582 scanned = 1;
1583 index = 0;
1584 goto retry;
1586 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1587 mapping->writeback_index = index;
1589 FreeXid(xid);
1590 kfree(iov);
1591 return rc;
1594 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1596 int rc = -EFAULT;
1597 int xid;
1599 xid = GetXid();
1600 /* BB add check for wbc flags */
1601 page_cache_get(page);
1602 if (!PageUptodate(page))
1603 cFYI(1, ("ppw - page not up to date"));
1606 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1608 * A writepage() implementation always needs to do either this,
1609 * or re-dirty the page with "redirty_page_for_writepage()" in
1610 * the case of a failure.
1612 * Just unlocking the page will cause the radix tree tag-bits
1613 * to fail to update with the state of the page correctly.
1615 set_page_writeback(page);
1616 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1617 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1618 unlock_page(page);
1619 end_page_writeback(page);
1620 page_cache_release(page);
1621 FreeXid(xid);
1622 return rc;
1625 static int cifs_write_end(struct file *file, struct address_space *mapping,
1626 loff_t pos, unsigned len, unsigned copied,
1627 struct page *page, void *fsdata)
1629 int rc;
1630 struct inode *inode = mapping->host;
1632 cFYI(1, ("write_end for page %p from pos %lld with %d bytes",
1633 page, pos, copied));
1635 if (PageChecked(page)) {
1636 if (copied == len)
1637 SetPageUptodate(page);
1638 ClearPageChecked(page);
1639 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1640 SetPageUptodate(page);
1642 if (!PageUptodate(page)) {
1643 char *page_data;
1644 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1645 int xid;
1647 xid = GetXid();
1648 /* this is probably better than directly calling
1649 partialpage_write since in this function the file handle is
1650 known which we might as well leverage */
1651 /* BB check if anything else missing out of ppw
1652 such as updating last write time */
1653 page_data = kmap(page);
1654 rc = cifs_write(file, page_data + offset, copied, &pos);
1655 /* if (rc < 0) should we set writebehind rc? */
1656 kunmap(page);
1658 FreeXid(xid);
1659 } else {
1660 rc = copied;
1661 pos += copied;
1662 set_page_dirty(page);
1665 if (rc > 0) {
1666 spin_lock(&inode->i_lock);
1667 if (pos > inode->i_size)
1668 i_size_write(inode, pos);
1669 spin_unlock(&inode->i_lock);
1672 unlock_page(page);
1673 page_cache_release(page);
1675 return rc;
1678 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1680 int xid;
1681 int rc = 0;
1682 struct cifsTconInfo *tcon;
1683 struct cifsFileInfo *smbfile =
1684 (struct cifsFileInfo *)file->private_data;
1685 struct inode *inode = file->f_path.dentry->d_inode;
1687 xid = GetXid();
1689 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1690 dentry->d_name.name, datasync));
1692 rc = filemap_write_and_wait(inode->i_mapping);
1693 if (rc == 0) {
1694 rc = CIFS_I(inode)->write_behind_rc;
1695 CIFS_I(inode)->write_behind_rc = 0;
1696 tcon = CIFS_SB(inode->i_sb)->tcon;
1697 if (!rc && tcon && smbfile &&
1698 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1699 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1702 FreeXid(xid);
1703 return rc;
1706 /* static void cifs_sync_page(struct page *page)
1708 struct address_space *mapping;
1709 struct inode *inode;
1710 unsigned long index = page->index;
1711 unsigned int rpages = 0;
1712 int rc = 0;
1714 cFYI(1, ("sync page %p",page));
1715 mapping = page->mapping;
1716 if (!mapping)
1717 return 0;
1718 inode = mapping->host;
1719 if (!inode)
1720 return; */
1722 /* fill in rpages then
1723 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1725 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1727 #if 0
1728 if (rc < 0)
1729 return rc;
1730 return 0;
1731 #endif
1732 } */
1735 * As file closes, flush all cached write data for this inode checking
1736 * for write behind errors.
1738 int cifs_flush(struct file *file, fl_owner_t id)
1740 struct inode *inode = file->f_path.dentry->d_inode;
1741 int rc = 0;
1743 /* Rather than do the steps manually:
1744 lock the inode for writing
1745 loop through pages looking for write behind data (dirty pages)
1746 coalesce into contiguous 16K (or smaller) chunks to write to server
1747 send to server (prefer in parallel)
1748 deal with writebehind errors
1749 unlock inode for writing
1750 filemapfdatawrite appears easier for the time being */
1752 rc = filemap_fdatawrite(inode->i_mapping);
1753 /* reset wb rc if we were able to write out dirty pages */
1754 if (!rc) {
1755 rc = CIFS_I(inode)->write_behind_rc;
1756 CIFS_I(inode)->write_behind_rc = 0;
1759 cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1761 return rc;
1764 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1765 size_t read_size, loff_t *poffset)
1767 int rc = -EACCES;
1768 unsigned int bytes_read = 0;
1769 unsigned int total_read = 0;
1770 unsigned int current_read_size;
1771 struct cifs_sb_info *cifs_sb;
1772 struct cifsTconInfo *pTcon;
1773 int xid;
1774 struct cifsFileInfo *open_file;
1775 char *smb_read_data;
1776 char __user *current_offset;
1777 struct smb_com_read_rsp *pSMBr;
1779 xid = GetXid();
1780 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1781 pTcon = cifs_sb->tcon;
1783 if (file->private_data == NULL) {
1784 rc = -EBADF;
1785 FreeXid(xid);
1786 return rc;
1788 open_file = (struct cifsFileInfo *)file->private_data;
1790 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1791 cFYI(1, ("attempting read on write only file instance"));
1793 for (total_read = 0, current_offset = read_data;
1794 read_size > total_read;
1795 total_read += bytes_read, current_offset += bytes_read) {
1796 current_read_size = min_t(const int, read_size - total_read,
1797 cifs_sb->rsize);
1798 rc = -EAGAIN;
1799 smb_read_data = NULL;
1800 while (rc == -EAGAIN) {
1801 int buf_type = CIFS_NO_BUFFER;
1802 if ((open_file->invalidHandle) &&
1803 (!open_file->closePend)) {
1804 rc = cifs_reopen_file(file, true);
1805 if (rc != 0)
1806 break;
1808 rc = CIFSSMBRead(xid, pTcon,
1809 open_file->netfid,
1810 current_read_size, *poffset,
1811 &bytes_read, &smb_read_data,
1812 &buf_type);
1813 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1814 if (smb_read_data) {
1815 if (copy_to_user(current_offset,
1816 smb_read_data +
1817 4 /* RFC1001 length field */ +
1818 le16_to_cpu(pSMBr->DataOffset),
1819 bytes_read))
1820 rc = -EFAULT;
1822 if (buf_type == CIFS_SMALL_BUFFER)
1823 cifs_small_buf_release(smb_read_data);
1824 else if (buf_type == CIFS_LARGE_BUFFER)
1825 cifs_buf_release(smb_read_data);
1826 smb_read_data = NULL;
1829 if (rc || (bytes_read == 0)) {
1830 if (total_read) {
1831 break;
1832 } else {
1833 FreeXid(xid);
1834 return rc;
1836 } else {
1837 cifs_stats_bytes_read(pTcon, bytes_read);
1838 *poffset += bytes_read;
1841 FreeXid(xid);
1842 return total_read;
1846 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1847 loff_t *poffset)
1849 int rc = -EACCES;
1850 unsigned int bytes_read = 0;
1851 unsigned int total_read;
1852 unsigned int current_read_size;
1853 struct cifs_sb_info *cifs_sb;
1854 struct cifsTconInfo *pTcon;
1855 int xid;
1856 char *current_offset;
1857 struct cifsFileInfo *open_file;
1858 int buf_type = CIFS_NO_BUFFER;
1860 xid = GetXid();
1861 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1862 pTcon = cifs_sb->tcon;
1864 if (file->private_data == NULL) {
1865 rc = -EBADF;
1866 FreeXid(xid);
1867 return rc;
1869 open_file = (struct cifsFileInfo *)file->private_data;
1871 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1872 cFYI(1, ("attempting read on write only file instance"));
1874 for (total_read = 0, current_offset = read_data;
1875 read_size > total_read;
1876 total_read += bytes_read, current_offset += bytes_read) {
1877 current_read_size = min_t(const int, read_size - total_read,
1878 cifs_sb->rsize);
1879 /* For windows me and 9x we do not want to request more
1880 than it negotiated since it will refuse the read then */
1881 if ((pTcon->ses) &&
1882 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1883 current_read_size = min_t(const int, current_read_size,
1884 pTcon->ses->server->maxBuf - 128);
1886 rc = -EAGAIN;
1887 while (rc == -EAGAIN) {
1888 if ((open_file->invalidHandle) &&
1889 (!open_file->closePend)) {
1890 rc = cifs_reopen_file(file, true);
1891 if (rc != 0)
1892 break;
1894 rc = CIFSSMBRead(xid, pTcon,
1895 open_file->netfid,
1896 current_read_size, *poffset,
1897 &bytes_read, &current_offset,
1898 &buf_type);
1900 if (rc || (bytes_read == 0)) {
1901 if (total_read) {
1902 break;
1903 } else {
1904 FreeXid(xid);
1905 return rc;
1907 } else {
1908 cifs_stats_bytes_read(pTcon, total_read);
1909 *poffset += bytes_read;
1912 FreeXid(xid);
1913 return total_read;
1916 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1918 int rc, xid;
1920 xid = GetXid();
1921 rc = cifs_revalidate_file(file);
1922 if (rc) {
1923 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1924 FreeXid(xid);
1925 return rc;
1927 rc = generic_file_mmap(file, vma);
1928 FreeXid(xid);
1929 return rc;
1933 static void cifs_copy_cache_pages(struct address_space *mapping,
1934 struct list_head *pages, int bytes_read, char *data,
1935 struct pagevec *plru_pvec)
1937 struct page *page;
1938 char *target;
1940 while (bytes_read > 0) {
1941 if (list_empty(pages))
1942 break;
1944 page = list_entry(pages->prev, struct page, lru);
1945 list_del(&page->lru);
1947 if (add_to_page_cache(page, mapping, page->index,
1948 GFP_KERNEL)) {
1949 page_cache_release(page);
1950 cFYI(1, ("Add page cache failed"));
1951 data += PAGE_CACHE_SIZE;
1952 bytes_read -= PAGE_CACHE_SIZE;
1953 continue;
1956 target = kmap_atomic(page, KM_USER0);
1958 if (PAGE_CACHE_SIZE > bytes_read) {
1959 memcpy(target, data, bytes_read);
1960 /* zero the tail end of this partial page */
1961 memset(target + bytes_read, 0,
1962 PAGE_CACHE_SIZE - bytes_read);
1963 bytes_read = 0;
1964 } else {
1965 memcpy(target, data, PAGE_CACHE_SIZE);
1966 bytes_read -= PAGE_CACHE_SIZE;
1968 kunmap_atomic(target, KM_USER0);
1970 flush_dcache_page(page);
1971 SetPageUptodate(page);
1972 unlock_page(page);
1973 if (!pagevec_add(plru_pvec, page))
1974 __pagevec_lru_add_file(plru_pvec);
1975 data += PAGE_CACHE_SIZE;
1977 return;
1980 static int cifs_readpages(struct file *file, struct address_space *mapping,
1981 struct list_head *page_list, unsigned num_pages)
1983 int rc = -EACCES;
1984 int xid;
1985 loff_t offset;
1986 struct page *page;
1987 struct cifs_sb_info *cifs_sb;
1988 struct cifsTconInfo *pTcon;
1989 unsigned int bytes_read = 0;
1990 unsigned int read_size, i;
1991 char *smb_read_data = NULL;
1992 struct smb_com_read_rsp *pSMBr;
1993 struct pagevec lru_pvec;
1994 struct cifsFileInfo *open_file;
1995 int buf_type = CIFS_NO_BUFFER;
1997 xid = GetXid();
1998 if (file->private_data == NULL) {
1999 rc = -EBADF;
2000 FreeXid(xid);
2001 return rc;
2003 open_file = (struct cifsFileInfo *)file->private_data;
2004 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2005 pTcon = cifs_sb->tcon;
2007 pagevec_init(&lru_pvec, 0);
2008 cFYI(DBG2, ("rpages: num pages %d", num_pages));
2009 for (i = 0; i < num_pages; ) {
2010 unsigned contig_pages;
2011 struct page *tmp_page;
2012 unsigned long expected_index;
2014 if (list_empty(page_list))
2015 break;
2017 page = list_entry(page_list->prev, struct page, lru);
2018 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2020 /* count adjacent pages that we will read into */
2021 contig_pages = 0;
2022 expected_index =
2023 list_entry(page_list->prev, struct page, lru)->index;
2024 list_for_each_entry_reverse(tmp_page, page_list, lru) {
2025 if (tmp_page->index == expected_index) {
2026 contig_pages++;
2027 expected_index++;
2028 } else
2029 break;
2031 if (contig_pages + i > num_pages)
2032 contig_pages = num_pages - i;
2034 /* for reads over a certain size could initiate async
2035 read ahead */
2037 read_size = contig_pages * PAGE_CACHE_SIZE;
2038 /* Read size needs to be in multiples of one page */
2039 read_size = min_t(const unsigned int, read_size,
2040 cifs_sb->rsize & PAGE_CACHE_MASK);
2041 cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d",
2042 read_size, contig_pages));
2043 rc = -EAGAIN;
2044 while (rc == -EAGAIN) {
2045 if ((open_file->invalidHandle) &&
2046 (!open_file->closePend)) {
2047 rc = cifs_reopen_file(file, true);
2048 if (rc != 0)
2049 break;
2052 rc = CIFSSMBRead(xid, pTcon,
2053 open_file->netfid,
2054 read_size, offset,
2055 &bytes_read, &smb_read_data,
2056 &buf_type);
2057 /* BB more RC checks ? */
2058 if (rc == -EAGAIN) {
2059 if (smb_read_data) {
2060 if (buf_type == CIFS_SMALL_BUFFER)
2061 cifs_small_buf_release(smb_read_data);
2062 else if (buf_type == CIFS_LARGE_BUFFER)
2063 cifs_buf_release(smb_read_data);
2064 smb_read_data = NULL;
2068 if ((rc < 0) || (smb_read_data == NULL)) {
2069 cFYI(1, ("Read error in readpages: %d", rc));
2070 break;
2071 } else if (bytes_read > 0) {
2072 task_io_account_read(bytes_read);
2073 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2074 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2075 smb_read_data + 4 /* RFC1001 hdr */ +
2076 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
2078 i += bytes_read >> PAGE_CACHE_SHIFT;
2079 cifs_stats_bytes_read(pTcon, bytes_read);
2080 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2081 i++; /* account for partial page */
2083 /* server copy of file can have smaller size
2084 than client */
2085 /* BB do we need to verify this common case ?
2086 this case is ok - if we are at server EOF
2087 we will hit it on next read */
2089 /* break; */
2091 } else {
2092 cFYI(1, ("No bytes read (%d) at offset %lld . "
2093 "Cleaning remaining pages from readahead list",
2094 bytes_read, offset));
2095 /* BB turn off caching and do new lookup on
2096 file size at server? */
2097 break;
2099 if (smb_read_data) {
2100 if (buf_type == CIFS_SMALL_BUFFER)
2101 cifs_small_buf_release(smb_read_data);
2102 else if (buf_type == CIFS_LARGE_BUFFER)
2103 cifs_buf_release(smb_read_data);
2104 smb_read_data = NULL;
2106 bytes_read = 0;
2109 pagevec_lru_add_file(&lru_pvec);
2111 /* need to free smb_read_data buf before exit */
2112 if (smb_read_data) {
2113 if (buf_type == CIFS_SMALL_BUFFER)
2114 cifs_small_buf_release(smb_read_data);
2115 else if (buf_type == CIFS_LARGE_BUFFER)
2116 cifs_buf_release(smb_read_data);
2117 smb_read_data = NULL;
2120 FreeXid(xid);
2121 return rc;
2124 static int cifs_readpage_worker(struct file *file, struct page *page,
2125 loff_t *poffset)
2127 char *read_data;
2128 int rc;
2130 page_cache_get(page);
2131 read_data = kmap(page);
2132 /* for reads over a certain size could initiate async read ahead */
2134 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2136 if (rc < 0)
2137 goto io_error;
2138 else
2139 cFYI(1, ("Bytes read %d", rc));
2141 file->f_path.dentry->d_inode->i_atime =
2142 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2144 if (PAGE_CACHE_SIZE > rc)
2145 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2147 flush_dcache_page(page);
2148 SetPageUptodate(page);
2149 rc = 0;
2151 io_error:
2152 kunmap(page);
2153 page_cache_release(page);
2154 return rc;
2157 static int cifs_readpage(struct file *file, struct page *page)
2159 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2160 int rc = -EACCES;
2161 int xid;
2163 xid = GetXid();
2165 if (file->private_data == NULL) {
2166 rc = -EBADF;
2167 FreeXid(xid);
2168 return rc;
2171 cFYI(1, ("readpage %p at offset %d 0x%x\n",
2172 page, (int)offset, (int)offset));
2174 rc = cifs_readpage_worker(file, page, &offset);
2176 unlock_page(page);
2178 FreeXid(xid);
2179 return rc;
2182 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2184 struct cifsFileInfo *open_file;
2186 read_lock(&GlobalSMBSeslock);
2187 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2188 if (open_file->closePend)
2189 continue;
2190 if (open_file->pfile &&
2191 ((open_file->pfile->f_flags & O_RDWR) ||
2192 (open_file->pfile->f_flags & O_WRONLY))) {
2193 read_unlock(&GlobalSMBSeslock);
2194 return 1;
2197 read_unlock(&GlobalSMBSeslock);
2198 return 0;
2201 /* We do not want to update the file size from server for inodes
2202 open for write - to avoid races with writepage extending
2203 the file - in the future we could consider allowing
2204 refreshing the inode only on increases in the file size
2205 but this is tricky to do without racing with writebehind
2206 page caching in the current Linux kernel design */
2207 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2209 if (!cifsInode)
2210 return true;
2212 if (is_inode_writable(cifsInode)) {
2213 /* This inode is open for write at least once */
2214 struct cifs_sb_info *cifs_sb;
2216 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2217 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2218 /* since no page cache to corrupt on directio
2219 we can change size safely */
2220 return true;
2223 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2224 return true;
2226 return false;
2227 } else
2228 return true;
2231 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2232 loff_t pos, unsigned len, unsigned flags,
2233 struct page **pagep, void **fsdata)
2235 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2236 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2237 loff_t page_start = pos & PAGE_MASK;
2238 loff_t i_size;
2239 struct page *page;
2240 int rc = 0;
2242 cFYI(1, ("write_begin from %lld len %d", (long long)pos, len));
2244 page = grab_cache_page_write_begin(mapping, index, flags);
2245 if (!page) {
2246 rc = -ENOMEM;
2247 goto out;
2250 if (PageUptodate(page))
2251 goto out;
2254 * If we write a full page it will be up to date, no need to read from
2255 * the server. If the write is short, we'll end up doing a sync write
2256 * instead.
2258 if (len == PAGE_CACHE_SIZE)
2259 goto out;
2262 * optimize away the read when we have an oplock, and we're not
2263 * expecting to use any of the data we'd be reading in. That
2264 * is, when the page lies beyond the EOF, or straddles the EOF
2265 * and the write will cover all of the existing data.
2267 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2268 i_size = i_size_read(mapping->host);
2269 if (page_start >= i_size ||
2270 (offset == 0 && (pos + len) >= i_size)) {
2271 zero_user_segments(page, 0, offset,
2272 offset + len,
2273 PAGE_CACHE_SIZE);
2275 * PageChecked means that the parts of the page
2276 * to which we're not writing are considered up
2277 * to date. Once the data is copied to the
2278 * page, it can be set uptodate.
2280 SetPageChecked(page);
2281 goto out;
2285 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2287 * might as well read a page, it is fast enough. If we get
2288 * an error, we don't need to return it. cifs_write_end will
2289 * do a sync write instead since PG_uptodate isn't set.
2291 cifs_readpage_worker(file, page, &page_start);
2292 } else {
2293 /* we could try using another file handle if there is one -
2294 but how would we lock it to prevent close of that handle
2295 racing with this read? In any case
2296 this will be written out by write_end so is fine */
2298 out:
2299 *pagep = page;
2300 return rc;
2303 static void
2304 cifs_oplock_break(struct slow_work *work)
2306 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2307 oplock_break);
2308 struct inode *inode = cfile->pInode;
2309 struct cifsInodeInfo *cinode = CIFS_I(inode);
2310 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
2311 int rc, waitrc = 0;
2313 if (inode && S_ISREG(inode->i_mode)) {
2314 #ifdef CONFIG_CIFS_EXPERIMENTAL
2315 if (cinode->clientCanCacheAll == 0)
2316 break_lease(inode, O_RDONLY);
2317 else if (cinode->clientCanCacheRead == 0)
2318 break_lease(inode, O_WRONLY);
2319 #endif
2320 rc = filemap_fdatawrite(inode->i_mapping);
2321 if (cinode->clientCanCacheRead == 0) {
2322 waitrc = filemap_fdatawait(inode->i_mapping);
2323 invalidate_remote_inode(inode);
2325 if (!rc)
2326 rc = waitrc;
2327 if (rc)
2328 cinode->write_behind_rc = rc;
2329 cFYI(1, ("Oplock flush inode %p rc %d", inode, rc));
2333 * releasing stale oplock after recent reconnect of smb session using
2334 * a now incorrect file handle is not a data integrity issue but do
2335 * not bother sending an oplock release if session to server still is
2336 * disconnected since oplock already released by the server
2338 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
2339 rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
2340 LOCKING_ANDX_OPLOCK_RELEASE, false);
2341 cFYI(1, ("Oplock release rc = %d", rc));
2345 static int
2346 cifs_oplock_break_get(struct slow_work *work)
2348 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2349 oplock_break);
2350 mntget(cfile->mnt);
2351 cifsFileInfo_get(cfile);
2352 return 0;
2355 static void
2356 cifs_oplock_break_put(struct slow_work *work)
2358 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2359 oplock_break);
2360 mntput(cfile->mnt);
2361 cifsFileInfo_put(cfile);
2364 const struct slow_work_ops cifs_oplock_break_ops = {
2365 .get_ref = cifs_oplock_break_get,
2366 .put_ref = cifs_oplock_break_put,
2367 .execute = cifs_oplock_break,
2370 const struct address_space_operations cifs_addr_ops = {
2371 .readpage = cifs_readpage,
2372 .readpages = cifs_readpages,
2373 .writepage = cifs_writepage,
2374 .writepages = cifs_writepages,
2375 .write_begin = cifs_write_begin,
2376 .write_end = cifs_write_end,
2377 .set_page_dirty = __set_page_dirty_nobuffers,
2378 /* .sync_page = cifs_sync_page, */
2379 /* .direct_IO = */
2383 * cifs_readpages requires the server to support a buffer large enough to
2384 * contain the header plus one complete page of data. Otherwise, we need
2385 * to leave cifs_readpages out of the address space operations.
2387 const struct address_space_operations cifs_addr_ops_smallbuf = {
2388 .readpage = cifs_readpage,
2389 .writepage = cifs_writepage,
2390 .writepages = cifs_writepages,
2391 .write_begin = cifs_write_begin,
2392 .write_end = cifs_write_end,
2393 .set_page_dirty = __set_page_dirty_nobuffers,
2394 /* .sync_page = cifs_sync_page, */
2395 /* .direct_IO = */