4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
45 static inline int cifs_convert_flags(unsigned int flags
)
47 if ((flags
& O_ACCMODE
) == O_RDONLY
)
49 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
51 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ
| GENERIC_WRITE
);
58 return (READ_CONTROL
| FILE_WRITE_ATTRIBUTES
| FILE_READ_ATTRIBUTES
|
59 FILE_WRITE_EA
| FILE_APPEND_DATA
| FILE_WRITE_DATA
|
63 static inline fmode_t
cifs_posix_convert_flags(unsigned int flags
)
65 fmode_t posix_flags
= 0;
67 if ((flags
& O_ACCMODE
) == O_RDONLY
)
68 posix_flags
= FMODE_READ
;
69 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
70 posix_flags
= FMODE_WRITE
;
71 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 posix_flags
= FMODE_READ
| FMODE_WRITE
;
77 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
78 reopening a file. They had their effect on the original open */
80 posix_flags
|= (fmode_t
)O_APPEND
;
82 posix_flags
|= (fmode_t
)O_DSYNC
;
84 posix_flags
|= (fmode_t
)__O_SYNC
;
85 if (flags
& O_DIRECTORY
)
86 posix_flags
|= (fmode_t
)O_DIRECTORY
;
87 if (flags
& O_NOFOLLOW
)
88 posix_flags
|= (fmode_t
)O_NOFOLLOW
;
90 posix_flags
|= (fmode_t
)O_DIRECT
;
95 static inline int cifs_get_disposition(unsigned int flags
)
97 if ((flags
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
))
99 else if ((flags
& (O_CREAT
| O_TRUNC
)) == (O_CREAT
| O_TRUNC
))
100 return FILE_OVERWRITE_IF
;
101 else if ((flags
& O_CREAT
) == O_CREAT
)
103 else if ((flags
& O_TRUNC
) == O_TRUNC
)
104 return FILE_OVERWRITE
;
109 /* all arguments to this function must be checked for validity in caller */
111 cifs_posix_open_inode_helper(struct inode
*inode
, struct file
*file
,
112 struct cifsInodeInfo
*pCifsInode
, __u32 oplock
,
116 write_lock(&GlobalSMBSeslock
);
118 pCifsInode
= CIFS_I(file
->f_path
.dentry
->d_inode
);
119 if (pCifsInode
== NULL
) {
120 write_unlock(&GlobalSMBSeslock
);
124 if (pCifsInode
->clientCanCacheRead
) {
125 /* we have the inode open somewhere else
126 no need to discard cache data */
127 goto psx_client_can_cache
;
131 /* if not oplocked, invalidate inode pages if mtime or file
133 /* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
134 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
135 (file->f_path.dentry->d_inode->i_size ==
136 (loff_t)le64_to_cpu(buf->EndOfFile))) {
137 cFYI(1, "inode unchanged on server");
139 if (file->f_path.dentry->d_inode->i_mapping) {
140 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
142 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
144 cFYI(1, "invalidating remote inode since open detected it "
146 invalidate_remote_inode(file->f_path.dentry->d_inode);
149 psx_client_can_cache
:
150 if ((oplock
& 0xF) == OPLOCK_EXCLUSIVE
) {
151 pCifsInode
->clientCanCacheAll
= true;
152 pCifsInode
->clientCanCacheRead
= true;
153 cFYI(1, "Exclusive Oplock granted on inode %p",
154 file
->f_path
.dentry
->d_inode
);
155 } else if ((oplock
& 0xF) == OPLOCK_READ
)
156 pCifsInode
->clientCanCacheRead
= true;
158 /* will have to change the unlock if we reenable the
159 filemap_fdatawrite (which does not seem necessary */
160 write_unlock(&GlobalSMBSeslock
);
164 /* all arguments to this function must be checked for validity in caller */
165 static inline int cifs_open_inode_helper(struct inode
*inode
,
166 struct cifsTconInfo
*pTcon
, int *oplock
, FILE_ALL_INFO
*buf
,
167 char *full_path
, int xid
)
169 struct cifsInodeInfo
*pCifsInode
= CIFS_I(inode
);
170 struct timespec temp
;
173 if (pCifsInode
->clientCanCacheRead
) {
174 /* we have the inode open somewhere else
175 no need to discard cache data */
176 goto client_can_cache
;
179 /* BB need same check in cifs_create too? */
180 /* if not oplocked, invalidate inode pages if mtime or file
182 temp
= cifs_NTtimeToUnix(buf
->LastWriteTime
);
183 if (timespec_equal(&inode
->i_mtime
, &temp
) &&
185 (loff_t
)le64_to_cpu(buf
->EndOfFile
))) {
186 cFYI(1, "inode unchanged on server");
188 if (inode
->i_mapping
) {
189 /* BB no need to lock inode until after invalidate
190 since namei code should already have it locked? */
191 rc
= filemap_write_and_wait(inode
->i_mapping
);
193 pCifsInode
->write_behind_rc
= rc
;
195 cFYI(1, "invalidating remote inode since open detected it "
197 invalidate_remote_inode(inode
);
202 rc
= cifs_get_inode_info_unix(&inode
, full_path
, inode
->i_sb
,
205 rc
= cifs_get_inode_info(&inode
, full_path
, buf
, inode
->i_sb
,
208 if ((*oplock
& 0xF) == OPLOCK_EXCLUSIVE
) {
209 pCifsInode
->clientCanCacheAll
= true;
210 pCifsInode
->clientCanCacheRead
= true;
211 cFYI(1, "Exclusive Oplock granted on inode %p", inode
);
212 } else if ((*oplock
& 0xF) == OPLOCK_READ
)
213 pCifsInode
->clientCanCacheRead
= true;
218 int cifs_open(struct inode
*inode
, struct file
*file
)
223 struct cifs_sb_info
*cifs_sb
;
224 struct cifsTconInfo
*tcon
;
225 struct cifsFileInfo
*pCifsFile
= NULL
;
226 struct cifsInodeInfo
*pCifsInode
;
227 char *full_path
= NULL
;
231 FILE_ALL_INFO
*buf
= NULL
;
235 cifs_sb
= CIFS_SB(inode
->i_sb
);
236 tcon
= cifs_sb
->tcon
;
238 pCifsInode
= CIFS_I(file
->f_path
.dentry
->d_inode
);
240 full_path
= build_path_from_dentry(file
->f_path
.dentry
);
241 if (full_path
== NULL
) {
246 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
247 inode
, file
->f_flags
, full_path
);
254 if (!tcon
->broken_posix_open
&& tcon
->unix_ext
&&
255 (tcon
->ses
->capabilities
& CAP_UNIX
) &&
256 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
257 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
258 int oflags
= (int) cifs_posix_convert_flags(file
->f_flags
);
259 oflags
|= SMB_O_CREAT
;
260 /* can not refresh inode info since size could be stale */
261 rc
= cifs_posix_open(full_path
, &inode
, inode
->i_sb
,
262 cifs_sb
->mnt_file_mode
/* ignored */,
263 oflags
, &oplock
, &netfid
, xid
);
265 cFYI(1, "posix open succeeded");
266 /* no need for special case handling of setting mode
267 on read only files needed here */
269 rc
= cifs_posix_open_inode_helper(inode
, file
,
270 pCifsInode
, oplock
, netfid
);
272 CIFSSMBClose(xid
, tcon
, netfid
);
276 pCifsFile
= cifs_new_fileinfo(inode
, netfid
, file
,
279 if (pCifsFile
== NULL
) {
280 CIFSSMBClose(xid
, tcon
, netfid
);
284 cifs_fscache_set_inode_cookie(inode
, file
);
287 } else if ((rc
== -EINVAL
) || (rc
== -EOPNOTSUPP
)) {
288 if (tcon
->ses
->serverNOS
)
289 cERROR(1, "server %s of type %s returned"
290 " unexpected error on SMB posix open"
291 ", disabling posix open support."
292 " Check if server update available.",
293 tcon
->ses
->serverName
,
294 tcon
->ses
->serverNOS
);
295 tcon
->broken_posix_open
= true;
296 } else if ((rc
!= -EIO
) && (rc
!= -EREMOTE
) &&
297 (rc
!= -EOPNOTSUPP
)) /* path not found or net err */
299 /* else fallthrough to retry open the old way on network i/o
303 desiredAccess
= cifs_convert_flags(file
->f_flags
);
305 /*********************************************************************
306 * open flag mapping table:
308 * POSIX Flag CIFS Disposition
309 * ---------- ----------------
310 * O_CREAT FILE_OPEN_IF
311 * O_CREAT | O_EXCL FILE_CREATE
312 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
313 * O_TRUNC FILE_OVERWRITE
314 * none of the above FILE_OPEN
316 * Note that there is not a direct match between disposition
317 * FILE_SUPERSEDE (ie create whether or not file exists although
318 * O_CREAT | O_TRUNC is similar but truncates the existing
319 * file rather than creating a new file as FILE_SUPERSEDE does
320 * (which uses the attributes / metadata passed in on open call)
322 *? O_SYNC is a reasonable match to CIFS writethrough flag
323 *? and the read write flags match reasonably. O_LARGEFILE
324 *? is irrelevant because largefile support is always used
325 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
326 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
327 *********************************************************************/
329 disposition
= cifs_get_disposition(file
->f_flags
);
331 /* BB pass O_SYNC flag through on file attributes .. BB */
333 /* Also refresh inode by passing in file_info buf returned by SMBOpen
334 and calling get_inode_info with returned buf (at least helps
335 non-Unix server case) */
337 /* BB we can not do this if this is the second open of a file
338 and the first handle has writebehind data, we might be
339 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
340 buf
= kmalloc(sizeof(FILE_ALL_INFO
), GFP_KERNEL
);
346 if (cifs_sb
->tcon
->ses
->capabilities
& CAP_NT_SMBS
)
347 rc
= CIFSSMBOpen(xid
, tcon
, full_path
, disposition
,
348 desiredAccess
, CREATE_NOT_DIR
, &netfid
, &oplock
, buf
,
349 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
350 & CIFS_MOUNT_MAP_SPECIAL_CHR
);
352 rc
= -EIO
; /* no NT SMB support fall into legacy open below */
355 /* Old server, try legacy style OpenX */
356 rc
= SMBLegacyOpen(xid
, tcon
, full_path
, disposition
,
357 desiredAccess
, CREATE_NOT_DIR
, &netfid
, &oplock
, buf
,
358 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
359 & CIFS_MOUNT_MAP_SPECIAL_CHR
);
362 cFYI(1, "cifs_open returned 0x%x", rc
);
366 rc
= cifs_open_inode_helper(inode
, tcon
, &oplock
, buf
, full_path
, xid
);
370 pCifsFile
= cifs_new_fileinfo(inode
, netfid
, file
, file
->f_path
.mnt
,
371 file
->f_flags
, oplock
);
372 if (pCifsFile
== NULL
) {
377 cifs_fscache_set_inode_cookie(inode
, file
);
379 if (oplock
& CIFS_CREATE_ACTION
) {
380 /* time to set mode which we can not set earlier due to
381 problems creating new read-only files */
382 if (tcon
->unix_ext
) {
383 struct cifs_unix_set_info_args args
= {
384 .mode
= inode
->i_mode
,
387 .ctime
= NO_CHANGE_64
,
388 .atime
= NO_CHANGE_64
,
389 .mtime
= NO_CHANGE_64
,
392 CIFSSMBUnixSetPathInfo(xid
, tcon
, full_path
, &args
,
394 cifs_sb
->mnt_cifs_flags
&
395 CIFS_MOUNT_MAP_SPECIAL_CHR
);
406 /* Try to reacquire byte range locks that were released when session */
407 /* to server was lost */
408 static int cifs_relock_file(struct cifsFileInfo
*cifsFile
)
412 /* BB list all locks open on this file and relock */
417 static int cifs_reopen_file(struct file
*file
, bool can_flush
)
422 struct cifs_sb_info
*cifs_sb
;
423 struct cifsTconInfo
*tcon
;
424 struct cifsFileInfo
*pCifsFile
;
425 struct cifsInodeInfo
*pCifsInode
;
427 char *full_path
= NULL
;
429 int disposition
= FILE_OPEN
;
432 if (file
->private_data
)
433 pCifsFile
= file
->private_data
;
438 mutex_lock(&pCifsFile
->fh_mutex
);
439 if (!pCifsFile
->invalidHandle
) {
440 mutex_unlock(&pCifsFile
->fh_mutex
);
446 if (file
->f_path
.dentry
== NULL
) {
447 cERROR(1, "no valid name if dentry freed");
450 goto reopen_error_exit
;
453 inode
= file
->f_path
.dentry
->d_inode
;
455 cERROR(1, "inode not valid");
458 goto reopen_error_exit
;
461 cifs_sb
= CIFS_SB(inode
->i_sb
);
462 tcon
= cifs_sb
->tcon
;
464 /* can not grab rename sem here because various ops, including
465 those that already have the rename sem can end up causing writepage
466 to get called and if the server was down that means we end up here,
467 and we can never tell if the caller already has the rename_sem */
468 full_path
= build_path_from_dentry(file
->f_path
.dentry
);
469 if (full_path
== NULL
) {
472 mutex_unlock(&pCifsFile
->fh_mutex
);
477 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
478 inode
, file
->f_flags
, full_path
);
485 if (tcon
->unix_ext
&& (tcon
->ses
->capabilities
& CAP_UNIX
) &&
486 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
487 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
488 int oflags
= (int) cifs_posix_convert_flags(file
->f_flags
);
489 /* can not refresh inode info since size could be stale */
490 rc
= cifs_posix_open(full_path
, NULL
, inode
->i_sb
,
491 cifs_sb
->mnt_file_mode
/* ignored */,
492 oflags
, &oplock
, &netfid
, xid
);
494 cFYI(1, "posix reopen succeeded");
497 /* fallthrough to retry open the old way on errors, especially
498 in the reconnect path it is important to retry hard */
501 desiredAccess
= cifs_convert_flags(file
->f_flags
);
503 /* Can not refresh inode by passing in file_info buf to be returned
504 by SMBOpen and then calling get_inode_info with returned buf
505 since file might have write behind data that needs to be flushed
506 and server version of file size can be stale. If we knew for sure
507 that inode was not dirty locally we could do this */
509 rc
= CIFSSMBOpen(xid
, tcon
, full_path
, disposition
, desiredAccess
,
510 CREATE_NOT_DIR
, &netfid
, &oplock
, NULL
,
511 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
&
512 CIFS_MOUNT_MAP_SPECIAL_CHR
);
514 mutex_unlock(&pCifsFile
->fh_mutex
);
515 cFYI(1, "cifs_open returned 0x%x", rc
);
516 cFYI(1, "oplock: %d", oplock
);
519 pCifsFile
->netfid
= netfid
;
520 pCifsFile
->invalidHandle
= false;
521 mutex_unlock(&pCifsFile
->fh_mutex
);
522 pCifsInode
= CIFS_I(inode
);
525 rc
= filemap_write_and_wait(inode
->i_mapping
);
527 CIFS_I(inode
)->write_behind_rc
= rc
;
528 /* temporarily disable caching while we
529 go to server to get inode info */
530 pCifsInode
->clientCanCacheAll
= false;
531 pCifsInode
->clientCanCacheRead
= false;
533 rc
= cifs_get_inode_info_unix(&inode
,
534 full_path
, inode
->i_sb
, xid
);
536 rc
= cifs_get_inode_info(&inode
,
537 full_path
, NULL
, inode
->i_sb
,
539 } /* else we are writing out data to server already
540 and could deadlock if we tried to flush data, and
541 since we do not know if we have data that would
542 invalidate the current end of file on the server
543 we can not go to the server to get the new inod
545 if ((oplock
& 0xF) == OPLOCK_EXCLUSIVE
) {
546 pCifsInode
->clientCanCacheAll
= true;
547 pCifsInode
->clientCanCacheRead
= true;
548 cFYI(1, "Exclusive Oplock granted on inode %p",
549 file
->f_path
.dentry
->d_inode
);
550 } else if ((oplock
& 0xF) == OPLOCK_READ
) {
551 pCifsInode
->clientCanCacheRead
= true;
552 pCifsInode
->clientCanCacheAll
= false;
554 pCifsInode
->clientCanCacheRead
= false;
555 pCifsInode
->clientCanCacheAll
= false;
557 cifs_relock_file(pCifsFile
);
565 int cifs_close(struct inode
*inode
, struct file
*file
)
569 struct cifs_sb_info
*cifs_sb
;
570 struct cifsTconInfo
*pTcon
;
571 struct cifsFileInfo
*pSMBFile
= file
->private_data
;
575 cifs_sb
= CIFS_SB(inode
->i_sb
);
576 pTcon
= cifs_sb
->tcon
;
578 struct cifsLockInfo
*li
, *tmp
;
579 write_lock(&GlobalSMBSeslock
);
580 pSMBFile
->closePend
= true;
582 /* no sense reconnecting to close a file that is
584 if (!pTcon
->need_reconnect
) {
585 write_unlock(&GlobalSMBSeslock
);
587 while ((atomic_read(&pSMBFile
->count
) != 1)
588 && (timeout
<= 2048)) {
589 /* Give write a better chance to get to
590 server ahead of the close. We do not
591 want to add a wait_q here as it would
592 increase the memory utilization as
593 the struct would be in each open file,
594 but this should give enough time to
596 cFYI(DBG2
, "close delay, write pending");
600 if (!pTcon
->need_reconnect
&&
601 !pSMBFile
->invalidHandle
)
602 rc
= CIFSSMBClose(xid
, pTcon
,
605 write_unlock(&GlobalSMBSeslock
);
607 write_unlock(&GlobalSMBSeslock
);
609 /* Delete any outstanding lock records.
610 We'll lose them when the file is closed anyway. */
611 mutex_lock(&pSMBFile
->lock_mutex
);
612 list_for_each_entry_safe(li
, tmp
, &pSMBFile
->llist
, llist
) {
613 list_del(&li
->llist
);
616 mutex_unlock(&pSMBFile
->lock_mutex
);
618 write_lock(&GlobalSMBSeslock
);
619 list_del(&pSMBFile
->flist
);
620 list_del(&pSMBFile
->tlist
);
621 write_unlock(&GlobalSMBSeslock
);
622 cifsFileInfo_put(file
->private_data
);
623 file
->private_data
= NULL
;
627 read_lock(&GlobalSMBSeslock
);
628 if (list_empty(&(CIFS_I(inode
)->openFileList
))) {
629 cFYI(1, "closing last open instance for inode %p", inode
);
630 /* if the file is not open we do not know if we can cache info
631 on this inode, much less write behind and read ahead */
632 CIFS_I(inode
)->clientCanCacheRead
= false;
633 CIFS_I(inode
)->clientCanCacheAll
= false;
635 read_unlock(&GlobalSMBSeslock
);
636 if ((rc
== 0) && CIFS_I(inode
)->write_behind_rc
)
637 rc
= CIFS_I(inode
)->write_behind_rc
;
642 int cifs_closedir(struct inode
*inode
, struct file
*file
)
646 struct cifsFileInfo
*pCFileStruct
= file
->private_data
;
649 cFYI(1, "Closedir inode = 0x%p", inode
);
654 struct cifsTconInfo
*pTcon
;
655 struct cifs_sb_info
*cifs_sb
=
656 CIFS_SB(file
->f_path
.dentry
->d_sb
);
658 pTcon
= cifs_sb
->tcon
;
660 cFYI(1, "Freeing private data in close dir");
661 write_lock(&GlobalSMBSeslock
);
662 if (!pCFileStruct
->srch_inf
.endOfSearch
&&
663 !pCFileStruct
->invalidHandle
) {
664 pCFileStruct
->invalidHandle
= true;
665 write_unlock(&GlobalSMBSeslock
);
666 rc
= CIFSFindClose(xid
, pTcon
, pCFileStruct
->netfid
);
667 cFYI(1, "Closing uncompleted readdir with rc %d",
669 /* not much we can do if it fails anyway, ignore rc */
672 write_unlock(&GlobalSMBSeslock
);
673 ptmp
= pCFileStruct
->srch_inf
.ntwrk_buf_start
;
675 cFYI(1, "closedir free smb buf in srch struct");
676 pCFileStruct
->srch_inf
.ntwrk_buf_start
= NULL
;
677 if (pCFileStruct
->srch_inf
.smallBuf
)
678 cifs_small_buf_release(ptmp
);
680 cifs_buf_release(ptmp
);
682 kfree(file
->private_data
);
683 file
->private_data
= NULL
;
685 /* BB can we lock the filestruct while this is going on? */
690 static int store_file_lock(struct cifsFileInfo
*fid
, __u64 len
,
691 __u64 offset
, __u8 lockType
)
693 struct cifsLockInfo
*li
=
694 kmalloc(sizeof(struct cifsLockInfo
), GFP_KERNEL
);
700 mutex_lock(&fid
->lock_mutex
);
701 list_add(&li
->llist
, &fid
->llist
);
702 mutex_unlock(&fid
->lock_mutex
);
706 int cifs_lock(struct file
*file
, int cmd
, struct file_lock
*pfLock
)
712 bool wait_flag
= false;
713 struct cifs_sb_info
*cifs_sb
;
714 struct cifsTconInfo
*tcon
;
716 __u8 lockType
= LOCKING_ANDX_LARGE_FILES
;
717 bool posix_locking
= 0;
719 length
= 1 + pfLock
->fl_end
- pfLock
->fl_start
;
723 cFYI(1, "Lock parm: 0x%x flockflags: "
724 "0x%x flocktype: 0x%x start: %lld end: %lld",
725 cmd
, pfLock
->fl_flags
, pfLock
->fl_type
, pfLock
->fl_start
,
728 if (pfLock
->fl_flags
& FL_POSIX
)
730 if (pfLock
->fl_flags
& FL_FLOCK
)
732 if (pfLock
->fl_flags
& FL_SLEEP
) {
733 cFYI(1, "Blocking lock");
736 if (pfLock
->fl_flags
& FL_ACCESS
)
737 cFYI(1, "Process suspended by mandatory locking - "
738 "not implemented yet");
739 if (pfLock
->fl_flags
& FL_LEASE
)
740 cFYI(1, "Lease on file - not implemented yet");
741 if (pfLock
->fl_flags
&
742 (~(FL_POSIX
| FL_FLOCK
| FL_SLEEP
| FL_ACCESS
| FL_LEASE
)))
743 cFYI(1, "Unknown lock flags 0x%x", pfLock
->fl_flags
);
745 if (pfLock
->fl_type
== F_WRLCK
) {
748 } else if (pfLock
->fl_type
== F_UNLCK
) {
751 /* Check if unlock includes more than
753 } else if (pfLock
->fl_type
== F_RDLCK
) {
755 lockType
|= LOCKING_ANDX_SHARED_LOCK
;
757 } else if (pfLock
->fl_type
== F_EXLCK
) {
760 } else if (pfLock
->fl_type
== F_SHLCK
) {
762 lockType
|= LOCKING_ANDX_SHARED_LOCK
;
765 cFYI(1, "Unknown type of lock");
767 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
768 tcon
= cifs_sb
->tcon
;
770 if (file
->private_data
== NULL
) {
775 netfid
= ((struct cifsFileInfo
*)file
->private_data
)->netfid
;
777 if ((tcon
->ses
->capabilities
& CAP_UNIX
) &&
778 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
779 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
781 /* BB add code here to normalize offset and length to
782 account for negative length which we can not accept over the
787 if (lockType
& LOCKING_ANDX_SHARED_LOCK
)
788 posix_lock_type
= CIFS_RDLCK
;
790 posix_lock_type
= CIFS_WRLCK
;
791 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
, 1 /* get */,
793 posix_lock_type
, wait_flag
);
798 /* BB we could chain these into one lock request BB */
799 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
, pfLock
->fl_start
,
800 0, 1, lockType
, 0 /* wait flag */ );
802 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
803 pfLock
->fl_start
, 1 /* numUnlock */ ,
804 0 /* numLock */ , lockType
,
806 pfLock
->fl_type
= F_UNLCK
;
808 cERROR(1, "Error unlocking previously locked "
809 "range %d during test of lock", rc
);
813 /* if rc == ERR_SHARING_VIOLATION ? */
816 if (lockType
& LOCKING_ANDX_SHARED_LOCK
) {
817 pfLock
->fl_type
= F_WRLCK
;
819 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
820 pfLock
->fl_start
, 0, 1,
821 lockType
| LOCKING_ANDX_SHARED_LOCK
,
824 rc
= CIFSSMBLock(xid
, tcon
, netfid
,
825 length
, pfLock
->fl_start
, 1, 0,
827 LOCKING_ANDX_SHARED_LOCK
,
829 pfLock
->fl_type
= F_RDLCK
;
831 cERROR(1, "Error unlocking "
832 "previously locked range %d "
833 "during test of lock", rc
);
836 pfLock
->fl_type
= F_WRLCK
;
846 if (!numLock
&& !numUnlock
) {
847 /* if no lock or unlock then nothing
848 to do since we do not know what it is */
855 if (lockType
& LOCKING_ANDX_SHARED_LOCK
)
856 posix_lock_type
= CIFS_RDLCK
;
858 posix_lock_type
= CIFS_WRLCK
;
861 posix_lock_type
= CIFS_UNLCK
;
863 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
, 0 /* set */,
865 posix_lock_type
, wait_flag
);
867 struct cifsFileInfo
*fid
= file
->private_data
;
870 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
872 0, numLock
, lockType
, wait_flag
);
875 /* For Windows locks we must store them. */
876 rc
= store_file_lock(fid
, length
,
877 pfLock
->fl_start
, lockType
);
879 } else if (numUnlock
) {
880 /* For each stored lock that this unlock overlaps
881 completely, unlock it. */
883 struct cifsLockInfo
*li
, *tmp
;
886 mutex_lock(&fid
->lock_mutex
);
887 list_for_each_entry_safe(li
, tmp
, &fid
->llist
, llist
) {
888 if (pfLock
->fl_start
<= li
->offset
&&
889 (pfLock
->fl_start
+ length
) >=
890 (li
->offset
+ li
->length
)) {
891 stored_rc
= CIFSSMBLock(xid
, tcon
,
893 li
->length
, li
->offset
,
894 1, 0, li
->type
, false);
898 list_del(&li
->llist
);
903 mutex_unlock(&fid
->lock_mutex
);
907 if (pfLock
->fl_flags
& FL_POSIX
)
908 posix_lock_file_wait(file
, pfLock
);
914 * Set the timeout on write requests past EOF. For some servers (Windows)
915 * these calls can be very long.
917 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
918 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
919 * The 10M cutoff is totally arbitrary. A better scheme for this would be
920 * welcome if someone wants to suggest one.
922 * We may be able to do a better job with this if there were some way to
923 * declare that a file should be sparse.
926 cifs_write_timeout(struct cifsInodeInfo
*cifsi
, loff_t offset
)
928 if (offset
<= cifsi
->server_eof
)
930 else if (offset
> (cifsi
->server_eof
+ (10 * 1024 * 1024)))
931 return CIFS_VLONG_OP
;
936 /* update the file size (if needed) after a write */
938 cifs_update_eof(struct cifsInodeInfo
*cifsi
, loff_t offset
,
939 unsigned int bytes_written
)
941 loff_t end_of_write
= offset
+ bytes_written
;
943 if (end_of_write
> cifsi
->server_eof
)
944 cifsi
->server_eof
= end_of_write
;
947 ssize_t
cifs_user_write(struct file
*file
, const char __user
*write_data
,
948 size_t write_size
, loff_t
*poffset
)
951 unsigned int bytes_written
= 0;
952 unsigned int total_written
;
953 struct cifs_sb_info
*cifs_sb
;
954 struct cifsTconInfo
*pTcon
;
956 struct cifsFileInfo
*open_file
;
957 struct cifsInodeInfo
*cifsi
= CIFS_I(file
->f_path
.dentry
->d_inode
);
959 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
961 pTcon
= cifs_sb
->tcon
;
963 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
964 *poffset, file->f_path.dentry->d_name.name); */
966 if (file
->private_data
== NULL
)
968 open_file
= file
->private_data
;
970 rc
= generic_write_checks(file
, poffset
, &write_size
, 0);
976 long_op
= cifs_write_timeout(cifsi
, *poffset
);
977 for (total_written
= 0; write_size
> total_written
;
978 total_written
+= bytes_written
) {
980 while (rc
== -EAGAIN
) {
981 if (file
->private_data
== NULL
) {
982 /* file has been closed on us */
984 /* if we have gotten here we have written some data
985 and blocked, and the file has been freed on us while
986 we blocked so return what we managed to write */
987 return total_written
;
989 if (open_file
->closePend
) {
992 return total_written
;
996 if (open_file
->invalidHandle
) {
997 /* we could deadlock if we called
998 filemap_fdatawait from here so tell
999 reopen_file not to flush data to server
1001 rc
= cifs_reopen_file(file
, false);
1006 rc
= CIFSSMBWrite(xid
, pTcon
,
1008 min_t(const int, cifs_sb
->wsize
,
1009 write_size
- total_written
),
1010 *poffset
, &bytes_written
,
1011 NULL
, write_data
+ total_written
, long_op
);
1013 if (rc
|| (bytes_written
== 0)) {
1021 cifs_update_eof(cifsi
, *poffset
, bytes_written
);
1022 *poffset
+= bytes_written
;
1024 long_op
= CIFS_STD_OP
; /* subsequent writes fast -
1025 15 seconds is plenty */
1028 cifs_stats_bytes_written(pTcon
, total_written
);
1030 /* since the write may have blocked check these pointers again */
1031 if ((file
->f_path
.dentry
) && (file
->f_path
.dentry
->d_inode
)) {
1032 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1033 /* Do not update local mtime - server will set its actual value on write
1034 * inode->i_ctime = inode->i_mtime =
1035 * current_fs_time(inode->i_sb);*/
1036 if (total_written
> 0) {
1037 spin_lock(&inode
->i_lock
);
1038 if (*poffset
> file
->f_path
.dentry
->d_inode
->i_size
)
1039 i_size_write(file
->f_path
.dentry
->d_inode
,
1041 spin_unlock(&inode
->i_lock
);
1043 mark_inode_dirty_sync(file
->f_path
.dentry
->d_inode
);
1046 return total_written
;
1049 static ssize_t
cifs_write(struct file
*file
, const char *write_data
,
1050 size_t write_size
, loff_t
*poffset
)
1053 unsigned int bytes_written
= 0;
1054 unsigned int total_written
;
1055 struct cifs_sb_info
*cifs_sb
;
1056 struct cifsTconInfo
*pTcon
;
1058 struct cifsFileInfo
*open_file
;
1059 struct cifsInodeInfo
*cifsi
= CIFS_I(file
->f_path
.dentry
->d_inode
);
1061 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1063 pTcon
= cifs_sb
->tcon
;
1065 cFYI(1, "write %zd bytes to offset %lld of %s", write_size
,
1066 *poffset
, file
->f_path
.dentry
->d_name
.name
);
1068 if (file
->private_data
== NULL
)
1070 open_file
= file
->private_data
;
1074 long_op
= cifs_write_timeout(cifsi
, *poffset
);
1075 for (total_written
= 0; write_size
> total_written
;
1076 total_written
+= bytes_written
) {
1078 while (rc
== -EAGAIN
) {
1079 if (file
->private_data
== NULL
) {
1080 /* file has been closed on us */
1082 /* if we have gotten here we have written some data
1083 and blocked, and the file has been freed on us
1084 while we blocked so return what we managed to
1086 return total_written
;
1088 if (open_file
->closePend
) {
1091 return total_written
;
1095 if (open_file
->invalidHandle
) {
1096 /* we could deadlock if we called
1097 filemap_fdatawait from here so tell
1098 reopen_file not to flush data to
1100 rc
= cifs_reopen_file(file
, false);
1104 if (experimEnabled
|| (pTcon
->ses
->server
&&
1105 ((pTcon
->ses
->server
->secMode
&
1106 (SECMODE_SIGN_REQUIRED
| SECMODE_SIGN_ENABLED
))
1111 len
= min((size_t)cifs_sb
->wsize
,
1112 write_size
- total_written
);
1113 /* iov[0] is reserved for smb header */
1114 iov
[1].iov_base
= (char *)write_data
+
1116 iov
[1].iov_len
= len
;
1117 rc
= CIFSSMBWrite2(xid
, pTcon
,
1118 open_file
->netfid
, len
,
1119 *poffset
, &bytes_written
,
1122 rc
= CIFSSMBWrite(xid
, pTcon
,
1124 min_t(const int, cifs_sb
->wsize
,
1125 write_size
- total_written
),
1126 *poffset
, &bytes_written
,
1127 write_data
+ total_written
,
1130 if (rc
|| (bytes_written
== 0)) {
1138 cifs_update_eof(cifsi
, *poffset
, bytes_written
);
1139 *poffset
+= bytes_written
;
1141 long_op
= CIFS_STD_OP
; /* subsequent writes fast -
1142 15 seconds is plenty */
1145 cifs_stats_bytes_written(pTcon
, total_written
);
1147 /* since the write may have blocked check these pointers again */
1148 if ((file
->f_path
.dentry
) && (file
->f_path
.dentry
->d_inode
)) {
1149 /*BB We could make this contingent on superblock ATIME flag too */
1150 /* file->f_path.dentry->d_inode->i_ctime =
1151 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1152 if (total_written
> 0) {
1153 spin_lock(&file
->f_path
.dentry
->d_inode
->i_lock
);
1154 if (*poffset
> file
->f_path
.dentry
->d_inode
->i_size
)
1155 i_size_write(file
->f_path
.dentry
->d_inode
,
1157 spin_unlock(&file
->f_path
.dentry
->d_inode
->i_lock
);
1159 mark_inode_dirty_sync(file
->f_path
.dentry
->d_inode
);
1162 return total_written
;
1165 #ifdef CONFIG_CIFS_EXPERIMENTAL
1166 struct cifsFileInfo
*find_readable_file(struct cifsInodeInfo
*cifs_inode
)
1168 struct cifsFileInfo
*open_file
= NULL
;
1170 read_lock(&GlobalSMBSeslock
);
1171 /* we could simply get the first_list_entry since write-only entries
1172 are always at the end of the list but since the first entry might
1173 have a close pending, we go through the whole list */
1174 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1175 if (open_file
->closePend
)
1177 if (open_file
->pfile
&& ((open_file
->pfile
->f_flags
& O_RDWR
) ||
1178 (open_file
->pfile
->f_flags
& O_RDONLY
))) {
1179 if (!open_file
->invalidHandle
) {
1180 /* found a good file */
1181 /* lock it so it will not be closed on us */
1182 cifsFileInfo_get(open_file
);
1183 read_unlock(&GlobalSMBSeslock
);
1185 } /* else might as well continue, and look for
1186 another, or simply have the caller reopen it
1187 again rather than trying to fix this handle */
1188 } else /* write only file */
1189 break; /* write only files are last so must be done */
1191 read_unlock(&GlobalSMBSeslock
);
1196 struct cifsFileInfo
*find_writable_file(struct cifsInodeInfo
*cifs_inode
)
1198 struct cifsFileInfo
*open_file
;
1199 bool any_available
= false;
1202 /* Having a null inode here (because mapping->host was set to zero by
1203 the VFS or MM) should not happen but we had reports of on oops (due to
1204 it being zero) during stress testcases so we need to check for it */
1206 if (cifs_inode
== NULL
) {
1207 cERROR(1, "Null inode passed to cifs_writeable_file");
1212 read_lock(&GlobalSMBSeslock
);
1214 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1215 if (open_file
->closePend
||
1216 (!any_available
&& open_file
->pid
!= current
->tgid
))
1219 if (open_file
->pfile
&&
1220 ((open_file
->pfile
->f_flags
& O_RDWR
) ||
1221 (open_file
->pfile
->f_flags
& O_WRONLY
))) {
1222 cifsFileInfo_get(open_file
);
1224 if (!open_file
->invalidHandle
) {
1225 /* found a good writable file */
1226 read_unlock(&GlobalSMBSeslock
);
1230 read_unlock(&GlobalSMBSeslock
);
1231 /* Had to unlock since following call can block */
1232 rc
= cifs_reopen_file(open_file
->pfile
, false);
1234 if (!open_file
->closePend
)
1236 else { /* start over in case this was deleted */
1237 /* since the list could be modified */
1238 read_lock(&GlobalSMBSeslock
);
1239 cifsFileInfo_put(open_file
);
1240 goto refind_writable
;
1244 /* if it fails, try another handle if possible -
1245 (we can not do this if closePending since
1246 loop could be modified - in which case we
1247 have to start at the beginning of the list
1248 again. Note that it would be bad
1249 to hold up writepages here (rather than
1250 in caller) with continuous retries */
1251 cFYI(1, "wp failed on reopen file");
1252 read_lock(&GlobalSMBSeslock
);
1253 /* can not use this handle, no write
1254 pending on this one after all */
1255 cifsFileInfo_put(open_file
);
1257 if (open_file
->closePend
) /* list could have changed */
1258 goto refind_writable
;
1259 /* else we simply continue to the next entry. Thus
1260 we do not loop on reopen errors. If we
1261 can not reopen the file, for example if we
1262 reconnected to a server with another client
1263 racing to delete or lock the file we would not
1264 make progress if we restarted before the beginning
1265 of the loop here. */
1268 /* couldn't find useable FH with same pid, try any available */
1269 if (!any_available
) {
1270 any_available
= true;
1271 goto refind_writable
;
1273 read_unlock(&GlobalSMBSeslock
);
1277 static int cifs_partialpagewrite(struct page
*page
, unsigned from
, unsigned to
)
1279 struct address_space
*mapping
= page
->mapping
;
1280 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
1283 int bytes_written
= 0;
1284 struct cifs_sb_info
*cifs_sb
;
1285 struct cifsTconInfo
*pTcon
;
1286 struct inode
*inode
;
1287 struct cifsFileInfo
*open_file
;
1289 if (!mapping
|| !mapping
->host
)
1292 inode
= page
->mapping
->host
;
1293 cifs_sb
= CIFS_SB(inode
->i_sb
);
1294 pTcon
= cifs_sb
->tcon
;
1296 offset
+= (loff_t
)from
;
1297 write_data
= kmap(page
);
1300 if ((to
> PAGE_CACHE_SIZE
) || (from
> to
)) {
1305 /* racing with truncate? */
1306 if (offset
> mapping
->host
->i_size
) {
1308 return 0; /* don't care */
1311 /* check to make sure that we are not extending the file */
1312 if (mapping
->host
->i_size
- offset
< (loff_t
)to
)
1313 to
= (unsigned)(mapping
->host
->i_size
- offset
);
1315 open_file
= find_writable_file(CIFS_I(mapping
->host
));
1317 bytes_written
= cifs_write(open_file
->pfile
, write_data
,
1319 cifsFileInfo_put(open_file
);
1320 /* Does mm or vfs already set times? */
1321 inode
->i_atime
= inode
->i_mtime
= current_fs_time(inode
->i_sb
);
1322 if ((bytes_written
> 0) && (offset
))
1324 else if (bytes_written
< 0)
1327 cFYI(1, "No writeable filehandles for inode");
1335 static int cifs_writepages(struct address_space
*mapping
,
1336 struct writeback_control
*wbc
)
1338 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
1339 unsigned int bytes_to_write
;
1340 unsigned int bytes_written
;
1341 struct cifs_sb_info
*cifs_sb
;
1345 int range_whole
= 0;
1352 struct cifsFileInfo
*open_file
;
1353 struct cifsInodeInfo
*cifsi
= CIFS_I(mapping
->host
);
1355 struct pagevec pvec
;
1360 cifs_sb
= CIFS_SB(mapping
->host
->i_sb
);
1363 * If wsize is smaller that the page cache size, default to writing
1364 * one page at a time via cifs_writepage
1366 if (cifs_sb
->wsize
< PAGE_CACHE_SIZE
)
1367 return generic_writepages(mapping
, wbc
);
1369 if ((cifs_sb
->tcon
->ses
) && (cifs_sb
->tcon
->ses
->server
))
1370 if (cifs_sb
->tcon
->ses
->server
->secMode
&
1371 (SECMODE_SIGN_REQUIRED
| SECMODE_SIGN_ENABLED
))
1372 if (!experimEnabled
)
1373 return generic_writepages(mapping
, wbc
);
1375 iov
= kmalloc(32 * sizeof(struct kvec
), GFP_KERNEL
);
1377 return generic_writepages(mapping
, wbc
);
1381 * BB: Is this meaningful for a non-block-device file system?
1382 * If it is, we should test it again after we do I/O
1384 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
1385 wbc
->encountered_congestion
= 1;
1392 pagevec_init(&pvec
, 0);
1393 if (wbc
->range_cyclic
) {
1394 index
= mapping
->writeback_index
; /* Start from prev offset */
1397 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1398 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1399 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1404 while (!done
&& (index
<= end
) &&
1405 (nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
1406 PAGECACHE_TAG_DIRTY
,
1407 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1))) {
1416 for (i
= 0; i
< nr_pages
; i
++) {
1417 page
= pvec
.pages
[i
];
1419 * At this point we hold neither mapping->tree_lock nor
1420 * lock on the page itself: the page may be truncated or
1421 * invalidated (changing page->mapping to NULL), or even
1422 * swizzled back from swapper_space to tmpfs file
1428 else if (!trylock_page(page
))
1431 if (unlikely(page
->mapping
!= mapping
)) {
1436 if (!wbc
->range_cyclic
&& page
->index
> end
) {
1442 if (next
&& (page
->index
!= next
)) {
1443 /* Not next consecutive page */
1448 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1449 wait_on_page_writeback(page
);
1451 if (PageWriteback(page
) ||
1452 !clear_page_dirty_for_io(page
)) {
1458 * This actually clears the dirty bit in the radix tree.
1459 * See cifs_writepage() for more commentary.
1461 set_page_writeback(page
);
1463 if (page_offset(page
) >= mapping
->host
->i_size
) {
1466 end_page_writeback(page
);
1471 * BB can we get rid of this? pages are held by pvec
1473 page_cache_get(page
);
1475 len
= min(mapping
->host
->i_size
- page_offset(page
),
1476 (loff_t
)PAGE_CACHE_SIZE
);
1478 /* reserve iov[0] for the smb header */
1480 iov
[n_iov
].iov_base
= kmap(page
);
1481 iov
[n_iov
].iov_len
= len
;
1482 bytes_to_write
+= len
;
1486 offset
= page_offset(page
);
1488 next
= page
->index
+ 1;
1489 if (bytes_to_write
+ PAGE_CACHE_SIZE
> cifs_sb
->wsize
)
1493 /* Search for a writable handle every time we call
1494 * CIFSSMBWrite2. We can't rely on the last handle
1495 * we used to still be valid
1497 open_file
= find_writable_file(CIFS_I(mapping
->host
));
1499 cERROR(1, "No writable handles for inode");
1502 long_op
= cifs_write_timeout(cifsi
, offset
);
1503 rc
= CIFSSMBWrite2(xid
, cifs_sb
->tcon
,
1505 bytes_to_write
, offset
,
1506 &bytes_written
, iov
, n_iov
,
1508 cifsFileInfo_put(open_file
);
1509 cifs_update_eof(cifsi
, offset
, bytes_written
);
1511 if (rc
|| bytes_written
< bytes_to_write
) {
1512 cERROR(1, "Write2 ret %d, wrote %d",
1514 /* BB what if continued retry is
1515 requested via mount flags? */
1517 set_bit(AS_ENOSPC
, &mapping
->flags
);
1519 set_bit(AS_EIO
, &mapping
->flags
);
1521 cifs_stats_bytes_written(cifs_sb
->tcon
,
1525 for (i
= 0; i
< n_iov
; i
++) {
1526 page
= pvec
.pages
[first
+ i
];
1527 /* Should we also set page error on
1528 success rc but too little data written? */
1529 /* BB investigate retry logic on temporary
1530 server crash cases and how recovery works
1531 when page marked as error */
1536 end_page_writeback(page
);
1537 page_cache_release(page
);
1539 if ((wbc
->nr_to_write
-= n_iov
) <= 0)
1543 /* Need to re-find the pages we skipped */
1544 index
= pvec
.pages
[0]->index
+ 1;
1546 pagevec_release(&pvec
);
1548 if (!scanned
&& !done
) {
1550 * We hit the last page and there is more work to be done: wrap
1551 * back to the start of the file
1557 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1558 mapping
->writeback_index
= index
;
1565 static int cifs_writepage(struct page
*page
, struct writeback_control
*wbc
)
1571 /* BB add check for wbc flags */
1572 page_cache_get(page
);
1573 if (!PageUptodate(page
))
1574 cFYI(1, "ppw - page not up to date");
1577 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1579 * A writepage() implementation always needs to do either this,
1580 * or re-dirty the page with "redirty_page_for_writepage()" in
1581 * the case of a failure.
1583 * Just unlocking the page will cause the radix tree tag-bits
1584 * to fail to update with the state of the page correctly.
1586 set_page_writeback(page
);
1587 rc
= cifs_partialpagewrite(page
, 0, PAGE_CACHE_SIZE
);
1588 SetPageUptodate(page
); /* BB add check for error and Clearuptodate? */
1590 end_page_writeback(page
);
1591 page_cache_release(page
);
1596 static int cifs_write_end(struct file
*file
, struct address_space
*mapping
,
1597 loff_t pos
, unsigned len
, unsigned copied
,
1598 struct page
*page
, void *fsdata
)
1601 struct inode
*inode
= mapping
->host
;
1603 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1606 if (PageChecked(page
)) {
1608 SetPageUptodate(page
);
1609 ClearPageChecked(page
);
1610 } else if (!PageUptodate(page
) && copied
== PAGE_CACHE_SIZE
)
1611 SetPageUptodate(page
);
1613 if (!PageUptodate(page
)) {
1615 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1619 /* this is probably better than directly calling
1620 partialpage_write since in this function the file handle is
1621 known which we might as well leverage */
1622 /* BB check if anything else missing out of ppw
1623 such as updating last write time */
1624 page_data
= kmap(page
);
1625 rc
= cifs_write(file
, page_data
+ offset
, copied
, &pos
);
1626 /* if (rc < 0) should we set writebehind rc? */
1633 set_page_dirty(page
);
1637 spin_lock(&inode
->i_lock
);
1638 if (pos
> inode
->i_size
)
1639 i_size_write(inode
, pos
);
1640 spin_unlock(&inode
->i_lock
);
1644 page_cache_release(page
);
1649 int cifs_fsync(struct file
*file
, int datasync
)
1653 struct cifsTconInfo
*tcon
;
1654 struct cifsFileInfo
*smbfile
= file
->private_data
;
1655 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1659 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1660 file
->f_path
.dentry
->d_name
.name
, datasync
);
1662 rc
= filemap_write_and_wait(inode
->i_mapping
);
1664 rc
= CIFS_I(inode
)->write_behind_rc
;
1665 CIFS_I(inode
)->write_behind_rc
= 0;
1666 tcon
= CIFS_SB(inode
->i_sb
)->tcon
;
1667 if (!rc
&& tcon
&& smbfile
&&
1668 !(CIFS_SB(inode
->i_sb
)->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
))
1669 rc
= CIFSSMBFlush(xid
, tcon
, smbfile
->netfid
);
1676 /* static void cifs_sync_page(struct page *page)
1678 struct address_space *mapping;
1679 struct inode *inode;
1680 unsigned long index = page->index;
1681 unsigned int rpages = 0;
1684 cFYI(1, "sync page %p", page);
1685 mapping = page->mapping;
1688 inode = mapping->host;
1692 /* fill in rpages then
1693 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1695 /* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1700 * As file closes, flush all cached write data for this inode checking
1701 * for write behind errors.
1703 int cifs_flush(struct file
*file
, fl_owner_t id
)
1705 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1708 /* Rather than do the steps manually:
1709 lock the inode for writing
1710 loop through pages looking for write behind data (dirty pages)
1711 coalesce into contiguous 16K (or smaller) chunks to write to server
1712 send to server (prefer in parallel)
1713 deal with writebehind errors
1714 unlock inode for writing
1715 filemapfdatawrite appears easier for the time being */
1717 rc
= filemap_fdatawrite(inode
->i_mapping
);
1718 /* reset wb rc if we were able to write out dirty pages */
1720 rc
= CIFS_I(inode
)->write_behind_rc
;
1721 CIFS_I(inode
)->write_behind_rc
= 0;
1724 cFYI(1, "Flush inode %p file %p rc %d", inode
, file
, rc
);
1729 ssize_t
cifs_user_read(struct file
*file
, char __user
*read_data
,
1730 size_t read_size
, loff_t
*poffset
)
1733 unsigned int bytes_read
= 0;
1734 unsigned int total_read
= 0;
1735 unsigned int current_read_size
;
1736 struct cifs_sb_info
*cifs_sb
;
1737 struct cifsTconInfo
*pTcon
;
1739 struct cifsFileInfo
*open_file
;
1740 char *smb_read_data
;
1741 char __user
*current_offset
;
1742 struct smb_com_read_rsp
*pSMBr
;
1745 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1746 pTcon
= cifs_sb
->tcon
;
1748 if (file
->private_data
== NULL
) {
1753 open_file
= file
->private_data
;
1755 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
1756 cFYI(1, "attempting read on write only file instance");
1758 for (total_read
= 0, current_offset
= read_data
;
1759 read_size
> total_read
;
1760 total_read
+= bytes_read
, current_offset
+= bytes_read
) {
1761 current_read_size
= min_t(const int, read_size
- total_read
,
1764 smb_read_data
= NULL
;
1765 while (rc
== -EAGAIN
) {
1766 int buf_type
= CIFS_NO_BUFFER
;
1767 if ((open_file
->invalidHandle
) &&
1768 (!open_file
->closePend
)) {
1769 rc
= cifs_reopen_file(file
, true);
1773 rc
= CIFSSMBRead(xid
, pTcon
,
1775 current_read_size
, *poffset
,
1776 &bytes_read
, &smb_read_data
,
1778 pSMBr
= (struct smb_com_read_rsp
*)smb_read_data
;
1779 if (smb_read_data
) {
1780 if (copy_to_user(current_offset
,
1782 4 /* RFC1001 length field */ +
1783 le16_to_cpu(pSMBr
->DataOffset
),
1787 if (buf_type
== CIFS_SMALL_BUFFER
)
1788 cifs_small_buf_release(smb_read_data
);
1789 else if (buf_type
== CIFS_LARGE_BUFFER
)
1790 cifs_buf_release(smb_read_data
);
1791 smb_read_data
= NULL
;
1794 if (rc
|| (bytes_read
== 0)) {
1802 cifs_stats_bytes_read(pTcon
, bytes_read
);
1803 *poffset
+= bytes_read
;
1811 static ssize_t
cifs_read(struct file
*file
, char *read_data
, size_t read_size
,
1815 unsigned int bytes_read
= 0;
1816 unsigned int total_read
;
1817 unsigned int current_read_size
;
1818 struct cifs_sb_info
*cifs_sb
;
1819 struct cifsTconInfo
*pTcon
;
1821 char *current_offset
;
1822 struct cifsFileInfo
*open_file
;
1823 int buf_type
= CIFS_NO_BUFFER
;
1826 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1827 pTcon
= cifs_sb
->tcon
;
1829 if (file
->private_data
== NULL
) {
1834 open_file
= file
->private_data
;
1836 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
1837 cFYI(1, "attempting read on write only file instance");
1839 for (total_read
= 0, current_offset
= read_data
;
1840 read_size
> total_read
;
1841 total_read
+= bytes_read
, current_offset
+= bytes_read
) {
1842 current_read_size
= min_t(const int, read_size
- total_read
,
1844 /* For windows me and 9x we do not want to request more
1845 than it negotiated since it will refuse the read then */
1847 !(pTcon
->ses
->capabilities
& CAP_LARGE_FILES
)) {
1848 current_read_size
= min_t(const int, current_read_size
,
1849 pTcon
->ses
->server
->maxBuf
- 128);
1852 while (rc
== -EAGAIN
) {
1853 if ((open_file
->invalidHandle
) &&
1854 (!open_file
->closePend
)) {
1855 rc
= cifs_reopen_file(file
, true);
1859 rc
= CIFSSMBRead(xid
, pTcon
,
1861 current_read_size
, *poffset
,
1862 &bytes_read
, ¤t_offset
,
1865 if (rc
|| (bytes_read
== 0)) {
1873 cifs_stats_bytes_read(pTcon
, total_read
);
1874 *poffset
+= bytes_read
;
1881 int cifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1886 rc
= cifs_revalidate_file(file
);
1888 cFYI(1, "Validation prior to mmap failed, error=%d", rc
);
1892 rc
= generic_file_mmap(file
, vma
);
1898 static void cifs_copy_cache_pages(struct address_space
*mapping
,
1899 struct list_head
*pages
, int bytes_read
, char *data
)
1904 while (bytes_read
> 0) {
1905 if (list_empty(pages
))
1908 page
= list_entry(pages
->prev
, struct page
, lru
);
1909 list_del(&page
->lru
);
1911 if (add_to_page_cache_lru(page
, mapping
, page
->index
,
1913 page_cache_release(page
);
1914 cFYI(1, "Add page cache failed");
1915 data
+= PAGE_CACHE_SIZE
;
1916 bytes_read
-= PAGE_CACHE_SIZE
;
1919 page_cache_release(page
);
1921 target
= kmap_atomic(page
, KM_USER0
);
1923 if (PAGE_CACHE_SIZE
> bytes_read
) {
1924 memcpy(target
, data
, bytes_read
);
1925 /* zero the tail end of this partial page */
1926 memset(target
+ bytes_read
, 0,
1927 PAGE_CACHE_SIZE
- bytes_read
);
1930 memcpy(target
, data
, PAGE_CACHE_SIZE
);
1931 bytes_read
-= PAGE_CACHE_SIZE
;
1933 kunmap_atomic(target
, KM_USER0
);
1935 flush_dcache_page(page
);
1936 SetPageUptodate(page
);
1938 data
+= PAGE_CACHE_SIZE
;
1940 /* add page to FS-Cache */
1941 cifs_readpage_to_fscache(mapping
->host
, page
);
1946 static int cifs_readpages(struct file
*file
, struct address_space
*mapping
,
1947 struct list_head
*page_list
, unsigned num_pages
)
1953 struct cifs_sb_info
*cifs_sb
;
1954 struct cifsTconInfo
*pTcon
;
1955 unsigned int bytes_read
= 0;
1956 unsigned int read_size
, i
;
1957 char *smb_read_data
= NULL
;
1958 struct smb_com_read_rsp
*pSMBr
;
1959 struct cifsFileInfo
*open_file
;
1960 int buf_type
= CIFS_NO_BUFFER
;
1963 if (file
->private_data
== NULL
) {
1968 open_file
= file
->private_data
;
1969 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1970 pTcon
= cifs_sb
->tcon
;
1973 * Reads as many pages as possible from fscache. Returns -ENOBUFS
1974 * immediately if the cookie is negative
1976 rc
= cifs_readpages_from_fscache(mapping
->host
, mapping
, page_list
,
1981 cFYI(DBG2
, "rpages: num pages %d", num_pages
);
1982 for (i
= 0; i
< num_pages
; ) {
1983 unsigned contig_pages
;
1984 struct page
*tmp_page
;
1985 unsigned long expected_index
;
1987 if (list_empty(page_list
))
1990 page
= list_entry(page_list
->prev
, struct page
, lru
);
1991 offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
1993 /* count adjacent pages that we will read into */
1996 list_entry(page_list
->prev
, struct page
, lru
)->index
;
1997 list_for_each_entry_reverse(tmp_page
, page_list
, lru
) {
1998 if (tmp_page
->index
== expected_index
) {
2004 if (contig_pages
+ i
> num_pages
)
2005 contig_pages
= num_pages
- i
;
2007 /* for reads over a certain size could initiate async
2010 read_size
= contig_pages
* PAGE_CACHE_SIZE
;
2011 /* Read size needs to be in multiples of one page */
2012 read_size
= min_t(const unsigned int, read_size
,
2013 cifs_sb
->rsize
& PAGE_CACHE_MASK
);
2014 cFYI(DBG2
, "rpages: read size 0x%x contiguous pages %d",
2015 read_size
, contig_pages
);
2017 while (rc
== -EAGAIN
) {
2018 if ((open_file
->invalidHandle
) &&
2019 (!open_file
->closePend
)) {
2020 rc
= cifs_reopen_file(file
, true);
2025 rc
= CIFSSMBRead(xid
, pTcon
,
2028 &bytes_read
, &smb_read_data
,
2030 /* BB more RC checks ? */
2031 if (rc
== -EAGAIN
) {
2032 if (smb_read_data
) {
2033 if (buf_type
== CIFS_SMALL_BUFFER
)
2034 cifs_small_buf_release(smb_read_data
);
2035 else if (buf_type
== CIFS_LARGE_BUFFER
)
2036 cifs_buf_release(smb_read_data
);
2037 smb_read_data
= NULL
;
2041 if ((rc
< 0) || (smb_read_data
== NULL
)) {
2042 cFYI(1, "Read error in readpages: %d", rc
);
2044 } else if (bytes_read
> 0) {
2045 task_io_account_read(bytes_read
);
2046 pSMBr
= (struct smb_com_read_rsp
*)smb_read_data
;
2047 cifs_copy_cache_pages(mapping
, page_list
, bytes_read
,
2048 smb_read_data
+ 4 /* RFC1001 hdr */ +
2049 le16_to_cpu(pSMBr
->DataOffset
));
2051 i
+= bytes_read
>> PAGE_CACHE_SHIFT
;
2052 cifs_stats_bytes_read(pTcon
, bytes_read
);
2053 if ((bytes_read
& PAGE_CACHE_MASK
) != bytes_read
) {
2054 i
++; /* account for partial page */
2056 /* server copy of file can have smaller size
2058 /* BB do we need to verify this common case ?
2059 this case is ok - if we are at server EOF
2060 we will hit it on next read */
2065 cFYI(1, "No bytes read (%d) at offset %lld . "
2066 "Cleaning remaining pages from readahead list",
2067 bytes_read
, offset
);
2068 /* BB turn off caching and do new lookup on
2069 file size at server? */
2072 if (smb_read_data
) {
2073 if (buf_type
== CIFS_SMALL_BUFFER
)
2074 cifs_small_buf_release(smb_read_data
);
2075 else if (buf_type
== CIFS_LARGE_BUFFER
)
2076 cifs_buf_release(smb_read_data
);
2077 smb_read_data
= NULL
;
2082 /* need to free smb_read_data buf before exit */
2083 if (smb_read_data
) {
2084 if (buf_type
== CIFS_SMALL_BUFFER
)
2085 cifs_small_buf_release(smb_read_data
);
2086 else if (buf_type
== CIFS_LARGE_BUFFER
)
2087 cifs_buf_release(smb_read_data
);
2088 smb_read_data
= NULL
;
2096 static int cifs_readpage_worker(struct file
*file
, struct page
*page
,
2102 /* Is the page cached? */
2103 rc
= cifs_readpage_from_fscache(file
->f_path
.dentry
->d_inode
, page
);
2107 page_cache_get(page
);
2108 read_data
= kmap(page
);
2109 /* for reads over a certain size could initiate async read ahead */
2111 rc
= cifs_read(file
, read_data
, PAGE_CACHE_SIZE
, poffset
);
2116 cFYI(1, "Bytes read %d", rc
);
2118 file
->f_path
.dentry
->d_inode
->i_atime
=
2119 current_fs_time(file
->f_path
.dentry
->d_inode
->i_sb
);
2121 if (PAGE_CACHE_SIZE
> rc
)
2122 memset(read_data
+ rc
, 0, PAGE_CACHE_SIZE
- rc
);
2124 flush_dcache_page(page
);
2125 SetPageUptodate(page
);
2127 /* send this page to the cache */
2128 cifs_readpage_to_fscache(file
->f_path
.dentry
->d_inode
, page
);
2134 page_cache_release(page
);
2140 static int cifs_readpage(struct file
*file
, struct page
*page
)
2142 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
2148 if (file
->private_data
== NULL
) {
2154 cFYI(1, "readpage %p at offset %d 0x%x\n",
2155 page
, (int)offset
, (int)offset
);
2157 rc
= cifs_readpage_worker(file
, page
, &offset
);
2165 static int is_inode_writable(struct cifsInodeInfo
*cifs_inode
)
2167 struct cifsFileInfo
*open_file
;
2169 read_lock(&GlobalSMBSeslock
);
2170 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
2171 if (open_file
->closePend
)
2173 if (open_file
->pfile
&&
2174 ((open_file
->pfile
->f_flags
& O_RDWR
) ||
2175 (open_file
->pfile
->f_flags
& O_WRONLY
))) {
2176 read_unlock(&GlobalSMBSeslock
);
2180 read_unlock(&GlobalSMBSeslock
);
2184 /* We do not want to update the file size from server for inodes
2185 open for write - to avoid races with writepage extending
2186 the file - in the future we could consider allowing
2187 refreshing the inode only on increases in the file size
2188 but this is tricky to do without racing with writebehind
2189 page caching in the current Linux kernel design */
2190 bool is_size_safe_to_change(struct cifsInodeInfo
*cifsInode
, __u64 end_of_file
)
2195 if (is_inode_writable(cifsInode
)) {
2196 /* This inode is open for write at least once */
2197 struct cifs_sb_info
*cifs_sb
;
2199 cifs_sb
= CIFS_SB(cifsInode
->vfs_inode
.i_sb
);
2200 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
) {
2201 /* since no page cache to corrupt on directio
2202 we can change size safely */
2206 if (i_size_read(&cifsInode
->vfs_inode
) < end_of_file
)
2214 static int cifs_write_begin(struct file
*file
, struct address_space
*mapping
,
2215 loff_t pos
, unsigned len
, unsigned flags
,
2216 struct page
**pagep
, void **fsdata
)
2218 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
2219 loff_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
2220 loff_t page_start
= pos
& PAGE_MASK
;
2225 cFYI(1, "write_begin from %lld len %d", (long long)pos
, len
);
2227 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
2233 if (PageUptodate(page
))
2237 * If we write a full page it will be up to date, no need to read from
2238 * the server. If the write is short, we'll end up doing a sync write
2241 if (len
== PAGE_CACHE_SIZE
)
2245 * optimize away the read when we have an oplock, and we're not
2246 * expecting to use any of the data we'd be reading in. That
2247 * is, when the page lies beyond the EOF, or straddles the EOF
2248 * and the write will cover all of the existing data.
2250 if (CIFS_I(mapping
->host
)->clientCanCacheRead
) {
2251 i_size
= i_size_read(mapping
->host
);
2252 if (page_start
>= i_size
||
2253 (offset
== 0 && (pos
+ len
) >= i_size
)) {
2254 zero_user_segments(page
, 0, offset
,
2258 * PageChecked means that the parts of the page
2259 * to which we're not writing are considered up
2260 * to date. Once the data is copied to the
2261 * page, it can be set uptodate.
2263 SetPageChecked(page
);
2268 if ((file
->f_flags
& O_ACCMODE
) != O_WRONLY
) {
2270 * might as well read a page, it is fast enough. If we get
2271 * an error, we don't need to return it. cifs_write_end will
2272 * do a sync write instead since PG_uptodate isn't set.
2274 cifs_readpage_worker(file
, page
, &page_start
);
2276 /* we could try using another file handle if there is one -
2277 but how would we lock it to prevent close of that handle
2278 racing with this read? In any case
2279 this will be written out by write_end so is fine */
2286 static int cifs_release_page(struct page
*page
, gfp_t gfp
)
2288 if (PagePrivate(page
))
2291 return cifs_fscache_release_page(page
, gfp
);
2294 static void cifs_invalidate_page(struct page
*page
, unsigned long offset
)
2296 struct cifsInodeInfo
*cifsi
= CIFS_I(page
->mapping
->host
);
2299 cifs_fscache_invalidate_page(page
, &cifsi
->vfs_inode
);
2302 void cifs_oplock_break(struct work_struct
*work
)
2304 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
2306 struct inode
*inode
= cfile
->pInode
;
2307 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2308 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->mnt
->mnt_sb
);
2311 if (inode
&& S_ISREG(inode
->i_mode
)) {
2312 if (cinode
->clientCanCacheRead
)
2313 break_lease(inode
, O_RDONLY
);
2315 break_lease(inode
, O_WRONLY
);
2316 rc
= filemap_fdatawrite(inode
->i_mapping
);
2317 if (cinode
->clientCanCacheRead
== 0) {
2318 waitrc
= filemap_fdatawait(inode
->i_mapping
);
2319 invalidate_remote_inode(inode
);
2324 cinode
->write_behind_rc
= rc
;
2325 cFYI(1, "Oplock flush inode %p rc %d", inode
, rc
);
2329 * releasing stale oplock after recent reconnect of smb session using
2330 * a now incorrect file handle is not a data integrity issue but do
2331 * not bother sending an oplock release if session to server still is
2332 * disconnected since oplock already released by the server
2334 if (!cfile
->closePend
&& !cfile
->oplock_break_cancelled
) {
2335 rc
= CIFSSMBLock(0, cifs_sb
->tcon
, cfile
->netfid
, 0, 0, 0, 0,
2336 LOCKING_ANDX_OPLOCK_RELEASE
, false);
2337 cFYI(1, "Oplock release rc = %d", rc
);
2341 * We might have kicked in before is_valid_oplock_break()
2342 * finished grabbing reference for us. Make sure it's done by
2343 * waiting for GlobalSMSSeslock.
2345 write_lock(&GlobalSMBSeslock
);
2346 write_unlock(&GlobalSMBSeslock
);
2348 cifs_oplock_break_put(cfile
);
2351 void cifs_oplock_break_get(struct cifsFileInfo
*cfile
)
2354 cifsFileInfo_get(cfile
);
2357 void cifs_oplock_break_put(struct cifsFileInfo
*cfile
)
2360 cifsFileInfo_put(cfile
);
2363 const struct address_space_operations cifs_addr_ops
= {
2364 .readpage
= cifs_readpage
,
2365 .readpages
= cifs_readpages
,
2366 .writepage
= cifs_writepage
,
2367 .writepages
= cifs_writepages
,
2368 .write_begin
= cifs_write_begin
,
2369 .write_end
= cifs_write_end
,
2370 .set_page_dirty
= __set_page_dirty_nobuffers
,
2371 .releasepage
= cifs_release_page
,
2372 .invalidatepage
= cifs_invalidate_page
,
2373 /* .sync_page = cifs_sync_page, */
2378 * cifs_readpages requires the server to support a buffer large enough to
2379 * contain the header plus one complete page of data. Otherwise, we need
2380 * to leave cifs_readpages out of the address space operations.
2382 const struct address_space_operations cifs_addr_ops_smallbuf
= {
2383 .readpage
= cifs_readpage
,
2384 .writepage
= cifs_writepage
,
2385 .writepages
= cifs_writepages
,
2386 .write_begin
= cifs_write_begin
,
2387 .write_end
= cifs_write_end
,
2388 .set_page_dirty
= __set_page_dirty_nobuffers
,
2389 .releasepage
= cifs_release_page
,
2390 .invalidatepage
= cifs_invalidate_page
,
2391 /* .sync_page = cifs_sync_page, */