4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
45 static inline int cifs_convert_flags(unsigned int flags
)
47 if ((flags
& O_ACCMODE
) == O_RDONLY
)
49 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
51 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ
| GENERIC_WRITE
);
58 return (READ_CONTROL
| FILE_WRITE_ATTRIBUTES
| FILE_READ_ATTRIBUTES
|
59 FILE_WRITE_EA
| FILE_APPEND_DATA
| FILE_WRITE_DATA
|
63 static u32
cifs_posix_convert_flags(unsigned int flags
)
67 if ((flags
& O_ACCMODE
) == O_RDONLY
)
68 posix_flags
= SMB_O_RDONLY
;
69 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
70 posix_flags
= SMB_O_WRONLY
;
71 else if ((flags
& O_ACCMODE
) == O_RDWR
)
72 posix_flags
= SMB_O_RDWR
;
75 posix_flags
|= SMB_O_CREAT
;
77 posix_flags
|= SMB_O_EXCL
;
79 posix_flags
|= SMB_O_TRUNC
;
80 /* be safe and imply O_SYNC for O_DSYNC */
82 posix_flags
|= SMB_O_SYNC
;
83 if (flags
& O_DIRECTORY
)
84 posix_flags
|= SMB_O_DIRECTORY
;
85 if (flags
& O_NOFOLLOW
)
86 posix_flags
|= SMB_O_NOFOLLOW
;
88 posix_flags
|= SMB_O_DIRECT
;
93 static inline int cifs_get_disposition(unsigned int flags
)
95 if ((flags
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
))
97 else if ((flags
& (O_CREAT
| O_TRUNC
)) == (O_CREAT
| O_TRUNC
))
98 return FILE_OVERWRITE_IF
;
99 else if ((flags
& O_CREAT
) == O_CREAT
)
101 else if ((flags
& O_TRUNC
) == O_TRUNC
)
102 return FILE_OVERWRITE
;
107 int cifs_posix_open(char *full_path
, struct inode
**pinode
,
108 struct super_block
*sb
, int mode
, unsigned int f_flags
,
109 __u32
*poplock
, __u16
*pnetfid
, int xid
)
112 FILE_UNIX_BASIC_INFO
*presp_data
;
113 __u32 posix_flags
= 0;
114 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
115 struct cifs_fattr fattr
;
116 struct tcon_link
*tlink
;
117 struct cifs_tcon
*tcon
;
119 cFYI(1, "posix open %s", full_path
);
121 presp_data
= kzalloc(sizeof(FILE_UNIX_BASIC_INFO
), GFP_KERNEL
);
122 if (presp_data
== NULL
)
125 tlink
= cifs_sb_tlink(cifs_sb
);
131 tcon
= tlink_tcon(tlink
);
132 mode
&= ~current_umask();
134 posix_flags
= cifs_posix_convert_flags(f_flags
);
135 rc
= CIFSPOSIXCreate(xid
, tcon
, posix_flags
, mode
, pnetfid
, presp_data
,
136 poplock
, full_path
, cifs_sb
->local_nls
,
137 cifs_sb
->mnt_cifs_flags
&
138 CIFS_MOUNT_MAP_SPECIAL_CHR
);
139 cifs_put_tlink(tlink
);
144 if (presp_data
->Type
== cpu_to_le32(-1))
145 goto posix_open_ret
; /* open ok, caller does qpathinfo */
148 goto posix_open_ret
; /* caller does not need info */
150 cifs_unix_basic_to_fattr(&fattr
, presp_data
, cifs_sb
);
152 /* get new inode and set it up */
153 if (*pinode
== NULL
) {
154 cifs_fill_uniqueid(sb
, &fattr
);
155 *pinode
= cifs_iget(sb
, &fattr
);
161 cifs_fattr_to_inode(*pinode
, &fattr
);
170 cifs_nt_open(char *full_path
, struct inode
*inode
, struct cifs_sb_info
*cifs_sb
,
171 struct cifs_tcon
*tcon
, unsigned int f_flags
, __u32
*poplock
,
172 __u16
*pnetfid
, int xid
)
179 desiredAccess
= cifs_convert_flags(f_flags
);
181 /*********************************************************************
182 * open flag mapping table:
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
205 disposition
= cifs_get_disposition(f_flags
);
207 /* BB pass O_SYNC flag through on file attributes .. BB */
209 buf
= kmalloc(sizeof(FILE_ALL_INFO
), GFP_KERNEL
);
213 if (tcon
->ses
->capabilities
& CAP_NT_SMBS
)
214 rc
= CIFSSMBOpen(xid
, tcon
, full_path
, disposition
,
215 desiredAccess
, CREATE_NOT_DIR
, pnetfid
, poplock
, buf
,
216 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR
);
219 rc
= SMBLegacyOpen(xid
, tcon
, full_path
, disposition
,
220 desiredAccess
, CREATE_NOT_DIR
, pnetfid
, poplock
, buf
,
221 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR
);
228 rc
= cifs_get_inode_info_unix(&inode
, full_path
, inode
->i_sb
,
231 rc
= cifs_get_inode_info(&inode
, full_path
, buf
, inode
->i_sb
,
239 struct cifsFileInfo
*
240 cifs_new_fileinfo(__u16 fileHandle
, struct file
*file
,
241 struct tcon_link
*tlink
, __u32 oplock
)
243 struct dentry
*dentry
= file
->f_path
.dentry
;
244 struct inode
*inode
= dentry
->d_inode
;
245 struct cifsInodeInfo
*pCifsInode
= CIFS_I(inode
);
246 struct cifsFileInfo
*pCifsFile
;
248 pCifsFile
= kzalloc(sizeof(struct cifsFileInfo
), GFP_KERNEL
);
249 if (pCifsFile
== NULL
)
252 pCifsFile
->count
= 1;
253 pCifsFile
->netfid
= fileHandle
;
254 pCifsFile
->pid
= current
->tgid
;
255 pCifsFile
->uid
= current_fsuid();
256 pCifsFile
->dentry
= dget(dentry
);
257 pCifsFile
->f_flags
= file
->f_flags
;
258 pCifsFile
->invalidHandle
= false;
259 pCifsFile
->tlink
= cifs_get_tlink(tlink
);
260 mutex_init(&pCifsFile
->fh_mutex
);
261 mutex_init(&pCifsFile
->lock_mutex
);
262 INIT_LIST_HEAD(&pCifsFile
->llist
);
263 INIT_WORK(&pCifsFile
->oplock_break
, cifs_oplock_break
);
265 spin_lock(&cifs_file_list_lock
);
266 list_add(&pCifsFile
->tlist
, &(tlink_tcon(tlink
)->openFileList
));
267 /* if readable file instance put first in list*/
268 if (file
->f_mode
& FMODE_READ
)
269 list_add(&pCifsFile
->flist
, &pCifsInode
->openFileList
);
271 list_add_tail(&pCifsFile
->flist
, &pCifsInode
->openFileList
);
272 spin_unlock(&cifs_file_list_lock
);
274 cifs_set_oplock_level(pCifsInode
, oplock
);
276 file
->private_data
= pCifsFile
;
281 * Release a reference on the file private data. This may involve closing
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
285 void cifsFileInfo_put(struct cifsFileInfo
*cifs_file
)
287 struct inode
*inode
= cifs_file
->dentry
->d_inode
;
288 struct cifs_tcon
*tcon
= tlink_tcon(cifs_file
->tlink
);
289 struct cifsInodeInfo
*cifsi
= CIFS_I(inode
);
290 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
291 struct cifsLockInfo
*li
, *tmp
;
293 spin_lock(&cifs_file_list_lock
);
294 if (--cifs_file
->count
> 0) {
295 spin_unlock(&cifs_file_list_lock
);
299 /* remove it from the lists */
300 list_del(&cifs_file
->flist
);
301 list_del(&cifs_file
->tlist
);
303 if (list_empty(&cifsi
->openFileList
)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file
->dentry
->d_inode
);
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
)
311 CIFS_I(inode
)->invalid_mapping
= true;
313 cifs_set_oplock_level(cifsi
, 0);
315 spin_unlock(&cifs_file_list_lock
);
317 cancel_work_sync(&cifs_file
->oplock_break
);
319 if (!tcon
->need_reconnect
&& !cifs_file
->invalidHandle
) {
323 rc
= CIFSSMBClose(xid
, tcon
, cifs_file
->netfid
);
327 /* Delete any outstanding lock records. We'll lose them when the file
330 mutex_lock(&cifs_file
->lock_mutex
);
331 list_for_each_entry_safe(li
, tmp
, &cifs_file
->llist
, llist
) {
332 list_del(&li
->llist
);
335 mutex_unlock(&cifs_file
->lock_mutex
);
337 cifs_put_tlink(cifs_file
->tlink
);
338 dput(cifs_file
->dentry
);
342 int cifs_open(struct inode
*inode
, struct file
*file
)
347 struct cifs_sb_info
*cifs_sb
;
348 struct cifs_tcon
*tcon
;
349 struct tcon_link
*tlink
;
350 struct cifsFileInfo
*pCifsFile
= NULL
;
351 char *full_path
= NULL
;
352 bool posix_open_ok
= false;
357 cifs_sb
= CIFS_SB(inode
->i_sb
);
358 tlink
= cifs_sb_tlink(cifs_sb
);
361 return PTR_ERR(tlink
);
363 tcon
= tlink_tcon(tlink
);
365 full_path
= build_path_from_dentry(file
->f_path
.dentry
);
366 if (full_path
== NULL
) {
371 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
372 inode
, file
->f_flags
, full_path
);
379 if (!tcon
->broken_posix_open
&& tcon
->unix_ext
&&
380 (tcon
->ses
->capabilities
& CAP_UNIX
) &&
381 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
382 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
383 /* can not refresh inode info since size could be stale */
384 rc
= cifs_posix_open(full_path
, &inode
, inode
->i_sb
,
385 cifs_sb
->mnt_file_mode
/* ignored */,
386 file
->f_flags
, &oplock
, &netfid
, xid
);
388 cFYI(1, "posix open succeeded");
389 posix_open_ok
= true;
390 } else if ((rc
== -EINVAL
) || (rc
== -EOPNOTSUPP
)) {
391 if (tcon
->ses
->serverNOS
)
392 cERROR(1, "server %s of type %s returned"
393 " unexpected error on SMB posix open"
394 ", disabling posix open support."
395 " Check if server update available.",
396 tcon
->ses
->serverName
,
397 tcon
->ses
->serverNOS
);
398 tcon
->broken_posix_open
= true;
399 } else if ((rc
!= -EIO
) && (rc
!= -EREMOTE
) &&
400 (rc
!= -EOPNOTSUPP
)) /* path not found or net err */
402 /* else fallthrough to retry open the old way on network i/o
406 if (!posix_open_ok
) {
407 rc
= cifs_nt_open(full_path
, inode
, cifs_sb
, tcon
,
408 file
->f_flags
, &oplock
, &netfid
, xid
);
413 pCifsFile
= cifs_new_fileinfo(netfid
, file
, tlink
, oplock
);
414 if (pCifsFile
== NULL
) {
415 CIFSSMBClose(xid
, tcon
, netfid
);
420 cifs_fscache_set_inode_cookie(inode
, file
);
422 if ((oplock
& CIFS_CREATE_ACTION
) && !posix_open_ok
&& tcon
->unix_ext
) {
423 /* time to set mode which we can not set earlier due to
424 problems creating new read-only files */
425 struct cifs_unix_set_info_args args
= {
426 .mode
= inode
->i_mode
,
429 .ctime
= NO_CHANGE_64
,
430 .atime
= NO_CHANGE_64
,
431 .mtime
= NO_CHANGE_64
,
434 CIFSSMBUnixSetFileInfo(xid
, tcon
, &args
, netfid
,
441 cifs_put_tlink(tlink
);
445 /* Try to reacquire byte range locks that were released when session */
446 /* to server was lost */
447 static int cifs_relock_file(struct cifsFileInfo
*cifsFile
)
451 /* BB list all locks open on this file and relock */
456 static int cifs_reopen_file(struct cifsFileInfo
*pCifsFile
, bool can_flush
)
461 struct cifs_sb_info
*cifs_sb
;
462 struct cifs_tcon
*tcon
;
463 struct cifsInodeInfo
*pCifsInode
;
465 char *full_path
= NULL
;
467 int disposition
= FILE_OPEN
;
471 mutex_lock(&pCifsFile
->fh_mutex
);
472 if (!pCifsFile
->invalidHandle
) {
473 mutex_unlock(&pCifsFile
->fh_mutex
);
479 inode
= pCifsFile
->dentry
->d_inode
;
480 cifs_sb
= CIFS_SB(inode
->i_sb
);
481 tcon
= tlink_tcon(pCifsFile
->tlink
);
483 /* can not grab rename sem here because various ops, including
484 those that already have the rename sem can end up causing writepage
485 to get called and if the server was down that means we end up here,
486 and we can never tell if the caller already has the rename_sem */
487 full_path
= build_path_from_dentry(pCifsFile
->dentry
);
488 if (full_path
== NULL
) {
490 mutex_unlock(&pCifsFile
->fh_mutex
);
495 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
496 inode
, pCifsFile
->f_flags
, full_path
);
503 if (tcon
->unix_ext
&& (tcon
->ses
->capabilities
& CAP_UNIX
) &&
504 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
505 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
508 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
509 * original open. Must mask them off for a reopen.
511 unsigned int oflags
= pCifsFile
->f_flags
&
512 ~(O_CREAT
| O_EXCL
| O_TRUNC
);
514 rc
= cifs_posix_open(full_path
, NULL
, inode
->i_sb
,
515 cifs_sb
->mnt_file_mode
/* ignored */,
516 oflags
, &oplock
, &netfid
, xid
);
518 cFYI(1, "posix reopen succeeded");
521 /* fallthrough to retry open the old way on errors, especially
522 in the reconnect path it is important to retry hard */
525 desiredAccess
= cifs_convert_flags(pCifsFile
->f_flags
);
527 /* Can not refresh inode by passing in file_info buf to be returned
528 by SMBOpen and then calling get_inode_info with returned buf
529 since file might have write behind data that needs to be flushed
530 and server version of file size can be stale. If we knew for sure
531 that inode was not dirty locally we could do this */
533 rc
= CIFSSMBOpen(xid
, tcon
, full_path
, disposition
, desiredAccess
,
534 CREATE_NOT_DIR
, &netfid
, &oplock
, NULL
,
535 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
&
536 CIFS_MOUNT_MAP_SPECIAL_CHR
);
538 mutex_unlock(&pCifsFile
->fh_mutex
);
539 cFYI(1, "cifs_open returned 0x%x", rc
);
540 cFYI(1, "oplock: %d", oplock
);
541 goto reopen_error_exit
;
545 pCifsFile
->netfid
= netfid
;
546 pCifsFile
->invalidHandle
= false;
547 mutex_unlock(&pCifsFile
->fh_mutex
);
548 pCifsInode
= CIFS_I(inode
);
551 rc
= filemap_write_and_wait(inode
->i_mapping
);
552 mapping_set_error(inode
->i_mapping
, rc
);
555 rc
= cifs_get_inode_info_unix(&inode
,
556 full_path
, inode
->i_sb
, xid
);
558 rc
= cifs_get_inode_info(&inode
,
559 full_path
, NULL
, inode
->i_sb
,
561 } /* else we are writing out data to server already
562 and could deadlock if we tried to flush data, and
563 since we do not know if we have data that would
564 invalidate the current end of file on the server
565 we can not go to the server to get the new inod
568 cifs_set_oplock_level(pCifsInode
, oplock
);
570 cifs_relock_file(pCifsFile
);
578 int cifs_close(struct inode
*inode
, struct file
*file
)
580 if (file
->private_data
!= NULL
) {
581 cifsFileInfo_put(file
->private_data
);
582 file
->private_data
= NULL
;
585 /* return code from the ->release op is always ignored */
589 int cifs_closedir(struct inode
*inode
, struct file
*file
)
593 struct cifsFileInfo
*pCFileStruct
= file
->private_data
;
596 cFYI(1, "Closedir inode = 0x%p", inode
);
601 struct cifs_tcon
*pTcon
= tlink_tcon(pCFileStruct
->tlink
);
603 cFYI(1, "Freeing private data in close dir");
604 spin_lock(&cifs_file_list_lock
);
605 if (!pCFileStruct
->srch_inf
.endOfSearch
&&
606 !pCFileStruct
->invalidHandle
) {
607 pCFileStruct
->invalidHandle
= true;
608 spin_unlock(&cifs_file_list_lock
);
609 rc
= CIFSFindClose(xid
, pTcon
, pCFileStruct
->netfid
);
610 cFYI(1, "Closing uncompleted readdir with rc %d",
612 /* not much we can do if it fails anyway, ignore rc */
615 spin_unlock(&cifs_file_list_lock
);
616 ptmp
= pCFileStruct
->srch_inf
.ntwrk_buf_start
;
618 cFYI(1, "closedir free smb buf in srch struct");
619 pCFileStruct
->srch_inf
.ntwrk_buf_start
= NULL
;
620 if (pCFileStruct
->srch_inf
.smallBuf
)
621 cifs_small_buf_release(ptmp
);
623 cifs_buf_release(ptmp
);
625 cifs_put_tlink(pCFileStruct
->tlink
);
626 kfree(file
->private_data
);
627 file
->private_data
= NULL
;
629 /* BB can we lock the filestruct while this is going on? */
634 static int store_file_lock(struct cifsFileInfo
*fid
, __u64 len
,
635 __u64 offset
, __u8 lockType
)
637 struct cifsLockInfo
*li
=
638 kmalloc(sizeof(struct cifsLockInfo
), GFP_KERNEL
);
644 mutex_lock(&fid
->lock_mutex
);
645 list_add(&li
->llist
, &fid
->llist
);
646 mutex_unlock(&fid
->lock_mutex
);
650 int cifs_lock(struct file
*file
, int cmd
, struct file_lock
*pfLock
)
656 bool wait_flag
= false;
657 struct cifs_sb_info
*cifs_sb
;
658 struct cifs_tcon
*tcon
;
660 __u8 lockType
= LOCKING_ANDX_LARGE_FILES
;
661 bool posix_locking
= 0;
663 length
= 1 + pfLock
->fl_end
- pfLock
->fl_start
;
667 cFYI(1, "Lock parm: 0x%x flockflags: "
668 "0x%x flocktype: 0x%x start: %lld end: %lld",
669 cmd
, pfLock
->fl_flags
, pfLock
->fl_type
, pfLock
->fl_start
,
672 if (pfLock
->fl_flags
& FL_POSIX
)
674 if (pfLock
->fl_flags
& FL_FLOCK
)
676 if (pfLock
->fl_flags
& FL_SLEEP
) {
677 cFYI(1, "Blocking lock");
680 if (pfLock
->fl_flags
& FL_ACCESS
)
681 cFYI(1, "Process suspended by mandatory locking - "
682 "not implemented yet");
683 if (pfLock
->fl_flags
& FL_LEASE
)
684 cFYI(1, "Lease on file - not implemented yet");
685 if (pfLock
->fl_flags
&
686 (~(FL_POSIX
| FL_FLOCK
| FL_SLEEP
| FL_ACCESS
| FL_LEASE
)))
687 cFYI(1, "Unknown lock flags 0x%x", pfLock
->fl_flags
);
689 if (pfLock
->fl_type
== F_WRLCK
) {
692 } else if (pfLock
->fl_type
== F_UNLCK
) {
695 /* Check if unlock includes more than
697 } else if (pfLock
->fl_type
== F_RDLCK
) {
699 lockType
|= LOCKING_ANDX_SHARED_LOCK
;
701 } else if (pfLock
->fl_type
== F_EXLCK
) {
704 } else if (pfLock
->fl_type
== F_SHLCK
) {
706 lockType
|= LOCKING_ANDX_SHARED_LOCK
;
709 cFYI(1, "Unknown type of lock");
711 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
712 tcon
= tlink_tcon(((struct cifsFileInfo
*)file
->private_data
)->tlink
);
713 netfid
= ((struct cifsFileInfo
*)file
->private_data
)->netfid
;
715 if ((tcon
->ses
->capabilities
& CAP_UNIX
) &&
716 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
717 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
719 /* BB add code here to normalize offset and length to
720 account for negative length which we can not accept over the
725 if (lockType
& LOCKING_ANDX_SHARED_LOCK
)
726 posix_lock_type
= CIFS_RDLCK
;
728 posix_lock_type
= CIFS_WRLCK
;
729 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
, 1 /* get */,
730 length
, pfLock
, posix_lock_type
,
736 /* BB we could chain these into one lock request BB */
737 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
, pfLock
->fl_start
,
738 0, 1, lockType
, 0 /* wait flag */, 0);
740 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
741 pfLock
->fl_start
, 1 /* numUnlock */ ,
742 0 /* numLock */ , lockType
,
743 0 /* wait flag */, 0);
744 pfLock
->fl_type
= F_UNLCK
;
746 cERROR(1, "Error unlocking previously locked "
747 "range %d during test of lock", rc
);
751 /* if rc == ERR_SHARING_VIOLATION ? */
754 if (lockType
& LOCKING_ANDX_SHARED_LOCK
) {
755 pfLock
->fl_type
= F_WRLCK
;
757 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
758 pfLock
->fl_start
, 0, 1,
759 lockType
| LOCKING_ANDX_SHARED_LOCK
,
760 0 /* wait flag */, 0);
762 rc
= CIFSSMBLock(xid
, tcon
, netfid
,
763 length
, pfLock
->fl_start
, 1, 0,
765 LOCKING_ANDX_SHARED_LOCK
,
766 0 /* wait flag */, 0);
767 pfLock
->fl_type
= F_RDLCK
;
769 cERROR(1, "Error unlocking "
770 "previously locked range %d "
771 "during test of lock", rc
);
774 pfLock
->fl_type
= F_WRLCK
;
784 if (!numLock
&& !numUnlock
) {
785 /* if no lock or unlock then nothing
786 to do since we do not know what it is */
793 if (lockType
& LOCKING_ANDX_SHARED_LOCK
)
794 posix_lock_type
= CIFS_RDLCK
;
796 posix_lock_type
= CIFS_WRLCK
;
799 posix_lock_type
= CIFS_UNLCK
;
801 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
, 0 /* set */,
802 length
, pfLock
, posix_lock_type
,
805 struct cifsFileInfo
*fid
= file
->private_data
;
808 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
809 pfLock
->fl_start
, 0, numLock
, lockType
,
813 /* For Windows locks we must store them. */
814 rc
= store_file_lock(fid
, length
,
815 pfLock
->fl_start
, lockType
);
817 } else if (numUnlock
) {
818 /* For each stored lock that this unlock overlaps
819 completely, unlock it. */
821 struct cifsLockInfo
*li
, *tmp
;
824 mutex_lock(&fid
->lock_mutex
);
825 list_for_each_entry_safe(li
, tmp
, &fid
->llist
, llist
) {
826 if (pfLock
->fl_start
<= li
->offset
&&
827 (pfLock
->fl_start
+ length
) >=
828 (li
->offset
+ li
->length
)) {
829 stored_rc
= CIFSSMBLock(xid
, tcon
,
836 list_del(&li
->llist
);
841 mutex_unlock(&fid
->lock_mutex
);
845 if (pfLock
->fl_flags
& FL_POSIX
)
846 posix_lock_file_wait(file
, pfLock
);
851 /* update the file size (if needed) after a write */
853 cifs_update_eof(struct cifsInodeInfo
*cifsi
, loff_t offset
,
854 unsigned int bytes_written
)
856 loff_t end_of_write
= offset
+ bytes_written
;
858 if (end_of_write
> cifsi
->server_eof
)
859 cifsi
->server_eof
= end_of_write
;
862 static ssize_t
cifs_write(struct cifsFileInfo
*open_file
, __u32 pid
,
863 const char *write_data
, size_t write_size
,
867 unsigned int bytes_written
= 0;
868 unsigned int total_written
;
869 struct cifs_sb_info
*cifs_sb
;
870 struct cifs_tcon
*pTcon
;
872 struct dentry
*dentry
= open_file
->dentry
;
873 struct cifsInodeInfo
*cifsi
= CIFS_I(dentry
->d_inode
);
874 struct cifs_io_parms io_parms
;
876 cifs_sb
= CIFS_SB(dentry
->d_sb
);
878 cFYI(1, "write %zd bytes to offset %lld of %s", write_size
,
879 *poffset
, dentry
->d_name
.name
);
881 pTcon
= tlink_tcon(open_file
->tlink
);
885 for (total_written
= 0; write_size
> total_written
;
886 total_written
+= bytes_written
) {
888 while (rc
== -EAGAIN
) {
892 if (open_file
->invalidHandle
) {
893 /* we could deadlock if we called
894 filemap_fdatawait from here so tell
895 reopen_file not to flush data to
897 rc
= cifs_reopen_file(open_file
, false);
902 len
= min((size_t)cifs_sb
->wsize
,
903 write_size
- total_written
);
904 /* iov[0] is reserved for smb header */
905 iov
[1].iov_base
= (char *)write_data
+ total_written
;
906 iov
[1].iov_len
= len
;
907 io_parms
.netfid
= open_file
->netfid
;
909 io_parms
.tcon
= pTcon
;
910 io_parms
.offset
= *poffset
;
911 io_parms
.length
= len
;
912 rc
= CIFSSMBWrite2(xid
, &io_parms
, &bytes_written
, iov
,
915 if (rc
|| (bytes_written
== 0)) {
923 cifs_update_eof(cifsi
, *poffset
, bytes_written
);
924 *poffset
+= bytes_written
;
928 cifs_stats_bytes_written(pTcon
, total_written
);
930 if (total_written
> 0) {
931 spin_lock(&dentry
->d_inode
->i_lock
);
932 if (*poffset
> dentry
->d_inode
->i_size
)
933 i_size_write(dentry
->d_inode
, *poffset
);
934 spin_unlock(&dentry
->d_inode
->i_lock
);
936 mark_inode_dirty_sync(dentry
->d_inode
);
938 return total_written
;
941 struct cifsFileInfo
*find_readable_file(struct cifsInodeInfo
*cifs_inode
,
944 struct cifsFileInfo
*open_file
= NULL
;
945 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cifs_inode
->vfs_inode
.i_sb
);
947 /* only filter by fsuid on multiuser mounts */
948 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
951 spin_lock(&cifs_file_list_lock
);
952 /* we could simply get the first_list_entry since write-only entries
953 are always at the end of the list but since the first entry might
954 have a close pending, we go through the whole list */
955 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
956 if (fsuid_only
&& open_file
->uid
!= current_fsuid())
958 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_READ
) {
959 if (!open_file
->invalidHandle
) {
960 /* found a good file */
961 /* lock it so it will not be closed on us */
962 cifsFileInfo_get(open_file
);
963 spin_unlock(&cifs_file_list_lock
);
965 } /* else might as well continue, and look for
966 another, or simply have the caller reopen it
967 again rather than trying to fix this handle */
968 } else /* write only file */
969 break; /* write only files are last so must be done */
971 spin_unlock(&cifs_file_list_lock
);
975 struct cifsFileInfo
*find_writable_file(struct cifsInodeInfo
*cifs_inode
,
978 struct cifsFileInfo
*open_file
;
979 struct cifs_sb_info
*cifs_sb
;
980 bool any_available
= false;
983 /* Having a null inode here (because mapping->host was set to zero by
984 the VFS or MM) should not happen but we had reports of on oops (due to
985 it being zero) during stress testcases so we need to check for it */
987 if (cifs_inode
== NULL
) {
988 cERROR(1, "Null inode passed to cifs_writeable_file");
993 cifs_sb
= CIFS_SB(cifs_inode
->vfs_inode
.i_sb
);
995 /* only filter by fsuid on multiuser mounts */
996 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
999 spin_lock(&cifs_file_list_lock
);
1001 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1002 if (!any_available
&& open_file
->pid
!= current
->tgid
)
1004 if (fsuid_only
&& open_file
->uid
!= current_fsuid())
1006 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
1007 cifsFileInfo_get(open_file
);
1009 if (!open_file
->invalidHandle
) {
1010 /* found a good writable file */
1011 spin_unlock(&cifs_file_list_lock
);
1015 spin_unlock(&cifs_file_list_lock
);
1017 /* Had to unlock since following call can block */
1018 rc
= cifs_reopen_file(open_file
, false);
1022 /* if it fails, try another handle if possible */
1023 cFYI(1, "wp failed on reopen file");
1024 cifsFileInfo_put(open_file
);
1026 spin_lock(&cifs_file_list_lock
);
1028 /* else we simply continue to the next entry. Thus
1029 we do not loop on reopen errors. If we
1030 can not reopen the file, for example if we
1031 reconnected to a server with another client
1032 racing to delete or lock the file we would not
1033 make progress if we restarted before the beginning
1034 of the loop here. */
1037 /* couldn't find useable FH with same pid, try any available */
1038 if (!any_available
) {
1039 any_available
= true;
1040 goto refind_writable
;
1042 spin_unlock(&cifs_file_list_lock
);
1046 static int cifs_partialpagewrite(struct page
*page
, unsigned from
, unsigned to
)
1048 struct address_space
*mapping
= page
->mapping
;
1049 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
1052 int bytes_written
= 0;
1053 struct inode
*inode
;
1054 struct cifsFileInfo
*open_file
;
1056 if (!mapping
|| !mapping
->host
)
1059 inode
= page
->mapping
->host
;
1061 offset
+= (loff_t
)from
;
1062 write_data
= kmap(page
);
1065 if ((to
> PAGE_CACHE_SIZE
) || (from
> to
)) {
1070 /* racing with truncate? */
1071 if (offset
> mapping
->host
->i_size
) {
1073 return 0; /* don't care */
1076 /* check to make sure that we are not extending the file */
1077 if (mapping
->host
->i_size
- offset
< (loff_t
)to
)
1078 to
= (unsigned)(mapping
->host
->i_size
- offset
);
1080 open_file
= find_writable_file(CIFS_I(mapping
->host
), false);
1082 bytes_written
= cifs_write(open_file
, open_file
->pid
,
1083 write_data
, to
- from
, &offset
);
1084 cifsFileInfo_put(open_file
);
1085 /* Does mm or vfs already set times? */
1086 inode
->i_atime
= inode
->i_mtime
= current_fs_time(inode
->i_sb
);
1087 if ((bytes_written
> 0) && (offset
))
1089 else if (bytes_written
< 0)
1092 cFYI(1, "No writeable filehandles for inode");
1100 static int cifs_writepages(struct address_space
*mapping
,
1101 struct writeback_control
*wbc
)
1103 struct cifs_sb_info
*cifs_sb
= CIFS_SB(mapping
->host
->i_sb
);
1104 bool done
= false, scanned
= false, range_whole
= false;
1106 struct cifs_writedata
*wdata
;
1111 * If wsize is smaller than the page cache size, default to writing
1112 * one page at a time via cifs_writepage
1114 if (cifs_sb
->wsize
< PAGE_CACHE_SIZE
)
1115 return generic_writepages(mapping
, wbc
);
1117 if (wbc
->range_cyclic
) {
1118 index
= mapping
->writeback_index
; /* Start from prev offset */
1121 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1122 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1123 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1128 while (!done
&& index
<= end
) {
1129 unsigned int i
, nr_pages
, found_pages
;
1130 pgoff_t next
= 0, tofind
;
1131 struct page
**pages
;
1133 tofind
= min((cifs_sb
->wsize
/ PAGE_CACHE_SIZE
) - 1,
1136 wdata
= cifs_writedata_alloc((unsigned int)tofind
);
1143 * find_get_pages_tag seems to return a max of 256 on each
1144 * iteration, so we must call it several times in order to
1145 * fill the array or the wsize is effectively limited to
1146 * 256 * PAGE_CACHE_SIZE.
1149 pages
= wdata
->pages
;
1151 nr_pages
= find_get_pages_tag(mapping
, &index
,
1152 PAGECACHE_TAG_DIRTY
,
1154 found_pages
+= nr_pages
;
1157 } while (nr_pages
&& tofind
&& index
<= end
);
1159 if (found_pages
== 0) {
1160 kref_put(&wdata
->refcount
, cifs_writedata_release
);
1165 for (i
= 0; i
< found_pages
; i
++) {
1166 page
= wdata
->pages
[i
];
1168 * At this point we hold neither mapping->tree_lock nor
1169 * lock on the page itself: the page may be truncated or
1170 * invalidated (changing page->mapping to NULL), or even
1171 * swizzled back from swapper_space to tmpfs file
1177 else if (!trylock_page(page
))
1180 if (unlikely(page
->mapping
!= mapping
)) {
1185 if (!wbc
->range_cyclic
&& page
->index
> end
) {
1191 if (next
&& (page
->index
!= next
)) {
1192 /* Not next consecutive page */
1197 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1198 wait_on_page_writeback(page
);
1200 if (PageWriteback(page
) ||
1201 !clear_page_dirty_for_io(page
)) {
1207 * This actually clears the dirty bit in the radix tree.
1208 * See cifs_writepage() for more commentary.
1210 set_page_writeback(page
);
1212 if (page_offset(page
) >= mapping
->host
->i_size
) {
1215 end_page_writeback(page
);
1219 wdata
->pages
[i
] = page
;
1220 next
= page
->index
+ 1;
1224 /* reset index to refind any pages skipped */
1226 index
= wdata
->pages
[0]->index
+ 1;
1228 /* put any pages we aren't going to use */
1229 for (i
= nr_pages
; i
< found_pages
; i
++) {
1230 page_cache_release(wdata
->pages
[i
]);
1231 wdata
->pages
[i
] = NULL
;
1234 /* nothing to write? */
1235 if (nr_pages
== 0) {
1236 kref_put(&wdata
->refcount
, cifs_writedata_release
);
1240 wdata
->sync_mode
= wbc
->sync_mode
;
1241 wdata
->nr_pages
= nr_pages
;
1242 wdata
->offset
= page_offset(wdata
->pages
[0]);
1245 if (wdata
->cfile
!= NULL
)
1246 cifsFileInfo_put(wdata
->cfile
);
1247 wdata
->cfile
= find_writable_file(CIFS_I(mapping
->host
),
1249 if (!wdata
->cfile
) {
1250 cERROR(1, "No writable handles for inode");
1254 rc
= cifs_async_writev(wdata
);
1255 } while (wbc
->sync_mode
== WB_SYNC_ALL
&& rc
== -EAGAIN
);
1257 for (i
= 0; i
< nr_pages
; ++i
)
1258 unlock_page(wdata
->pages
[i
]);
1260 /* send failure -- clean up the mess */
1262 for (i
= 0; i
< nr_pages
; ++i
) {
1264 redirty_page_for_writepage(wbc
,
1267 SetPageError(wdata
->pages
[i
]);
1268 end_page_writeback(wdata
->pages
[i
]);
1269 page_cache_release(wdata
->pages
[i
]);
1272 mapping_set_error(mapping
, rc
);
1274 kref_put(&wdata
->refcount
, cifs_writedata_release
);
1276 wbc
->nr_to_write
-= nr_pages
;
1277 if (wbc
->nr_to_write
<= 0)
1283 if (!scanned
&& !done
) {
1285 * We hit the last page and there is more work to be done: wrap
1286 * back to the start of the file
1293 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1294 mapping
->writeback_index
= index
;
1300 cifs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
1306 /* BB add check for wbc flags */
1307 page_cache_get(page
);
1308 if (!PageUptodate(page
))
1309 cFYI(1, "ppw - page not up to date");
1312 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1314 * A writepage() implementation always needs to do either this,
1315 * or re-dirty the page with "redirty_page_for_writepage()" in
1316 * the case of a failure.
1318 * Just unlocking the page will cause the radix tree tag-bits
1319 * to fail to update with the state of the page correctly.
1321 set_page_writeback(page
);
1323 rc
= cifs_partialpagewrite(page
, 0, PAGE_CACHE_SIZE
);
1324 if (rc
== -EAGAIN
&& wbc
->sync_mode
== WB_SYNC_ALL
)
1326 else if (rc
== -EAGAIN
)
1327 redirty_page_for_writepage(wbc
, page
);
1331 SetPageUptodate(page
);
1332 end_page_writeback(page
);
1333 page_cache_release(page
);
1338 static int cifs_writepage(struct page
*page
, struct writeback_control
*wbc
)
1340 int rc
= cifs_writepage_locked(page
, wbc
);
1345 static int cifs_write_end(struct file
*file
, struct address_space
*mapping
,
1346 loff_t pos
, unsigned len
, unsigned copied
,
1347 struct page
*page
, void *fsdata
)
1350 struct inode
*inode
= mapping
->host
;
1351 struct cifsFileInfo
*cfile
= file
->private_data
;
1352 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
1355 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
1358 pid
= current
->tgid
;
1360 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1363 if (PageChecked(page
)) {
1365 SetPageUptodate(page
);
1366 ClearPageChecked(page
);
1367 } else if (!PageUptodate(page
) && copied
== PAGE_CACHE_SIZE
)
1368 SetPageUptodate(page
);
1370 if (!PageUptodate(page
)) {
1372 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1376 /* this is probably better than directly calling
1377 partialpage_write since in this function the file handle is
1378 known which we might as well leverage */
1379 /* BB check if anything else missing out of ppw
1380 such as updating last write time */
1381 page_data
= kmap(page
);
1382 rc
= cifs_write(cfile
, pid
, page_data
+ offset
, copied
, &pos
);
1383 /* if (rc < 0) should we set writebehind rc? */
1390 set_page_dirty(page
);
1394 spin_lock(&inode
->i_lock
);
1395 if (pos
> inode
->i_size
)
1396 i_size_write(inode
, pos
);
1397 spin_unlock(&inode
->i_lock
);
1401 page_cache_release(page
);
1406 int cifs_strict_fsync(struct file
*file
, loff_t start
, loff_t end
,
1411 struct cifs_tcon
*tcon
;
1412 struct cifsFileInfo
*smbfile
= file
->private_data
;
1413 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1414 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
1416 rc
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1419 mutex_lock(&inode
->i_mutex
);
1423 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1424 file
->f_path
.dentry
->d_name
.name
, datasync
);
1426 if (!CIFS_I(inode
)->clientCanCacheRead
) {
1427 rc
= cifs_invalidate_mapping(inode
);
1429 cFYI(1, "rc: %d during invalidate phase", rc
);
1430 rc
= 0; /* don't care about it in fsync */
1434 tcon
= tlink_tcon(smbfile
->tlink
);
1435 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
))
1436 rc
= CIFSSMBFlush(xid
, tcon
, smbfile
->netfid
);
1439 mutex_unlock(&inode
->i_mutex
);
1443 int cifs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1447 struct cifs_tcon
*tcon
;
1448 struct cifsFileInfo
*smbfile
= file
->private_data
;
1449 struct cifs_sb_info
*cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1450 struct inode
*inode
= file
->f_mapping
->host
;
1452 rc
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1455 mutex_lock(&inode
->i_mutex
);
1459 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1460 file
->f_path
.dentry
->d_name
.name
, datasync
);
1462 tcon
= tlink_tcon(smbfile
->tlink
);
1463 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
))
1464 rc
= CIFSSMBFlush(xid
, tcon
, smbfile
->netfid
);
1467 mutex_unlock(&inode
->i_mutex
);
1472 * As file closes, flush all cached write data for this inode checking
1473 * for write behind errors.
1475 int cifs_flush(struct file
*file
, fl_owner_t id
)
1477 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1480 if (file
->f_mode
& FMODE_WRITE
)
1481 rc
= filemap_write_and_wait(inode
->i_mapping
);
1483 cFYI(1, "Flush inode %p file %p rc %d", inode
, file
, rc
);
1489 cifs_write_allocate_pages(struct page
**pages
, unsigned long num_pages
)
1494 for (i
= 0; i
< num_pages
; i
++) {
1495 pages
[i
] = alloc_page(__GFP_HIGHMEM
);
1498 * save number of pages we have already allocated and
1499 * return with ENOMEM error
1510 for (i
= 0; i
< num_pages
; i
++)
1516 size_t get_numpages(const size_t wsize
, const size_t len
, size_t *cur_len
)
1521 clen
= min_t(const size_t, len
, wsize
);
1522 num_pages
= clen
/ PAGE_CACHE_SIZE
;
1523 if (clen
% PAGE_CACHE_SIZE
)
1533 cifs_iovec_write(struct file
*file
, const struct iovec
*iov
,
1534 unsigned long nr_segs
, loff_t
*poffset
)
1536 unsigned int written
;
1537 unsigned long num_pages
, npages
, i
;
1538 size_t copied
, len
, cur_len
;
1539 ssize_t total_written
= 0;
1540 struct kvec
*to_send
;
1541 struct page
**pages
;
1543 struct inode
*inode
;
1544 struct cifsFileInfo
*open_file
;
1545 struct cifs_tcon
*pTcon
;
1546 struct cifs_sb_info
*cifs_sb
;
1547 struct cifs_io_parms io_parms
;
1551 len
= iov_length(iov
, nr_segs
);
1555 rc
= generic_write_checks(file
, poffset
, &len
, 0);
1559 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1560 num_pages
= get_numpages(cifs_sb
->wsize
, len
, &cur_len
);
1562 pages
= kmalloc(sizeof(struct pages
*)*num_pages
, GFP_KERNEL
);
1566 to_send
= kmalloc(sizeof(struct kvec
)*(num_pages
+ 1), GFP_KERNEL
);
1572 rc
= cifs_write_allocate_pages(pages
, num_pages
);
1580 open_file
= file
->private_data
;
1582 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
1583 pid
= open_file
->pid
;
1585 pid
= current
->tgid
;
1587 pTcon
= tlink_tcon(open_file
->tlink
);
1588 inode
= file
->f_path
.dentry
->d_inode
;
1590 iov_iter_init(&it
, iov
, nr_segs
, len
, 0);
1594 size_t save_len
= cur_len
;
1595 for (i
= 0; i
< npages
; i
++) {
1596 copied
= min_t(const size_t, cur_len
, PAGE_CACHE_SIZE
);
1597 copied
= iov_iter_copy_from_user(pages
[i
], &it
, 0,
1600 iov_iter_advance(&it
, copied
);
1601 to_send
[i
+1].iov_base
= kmap(pages
[i
]);
1602 to_send
[i
+1].iov_len
= copied
;
1605 cur_len
= save_len
- cur_len
;
1608 if (open_file
->invalidHandle
) {
1609 rc
= cifs_reopen_file(open_file
, false);
1613 io_parms
.netfid
= open_file
->netfid
;
1615 io_parms
.tcon
= pTcon
;
1616 io_parms
.offset
= *poffset
;
1617 io_parms
.length
= cur_len
;
1618 rc
= CIFSSMBWrite2(xid
, &io_parms
, &written
, to_send
,
1620 } while (rc
== -EAGAIN
);
1622 for (i
= 0; i
< npages
; i
++)
1627 total_written
+= written
;
1628 cifs_update_eof(CIFS_I(inode
), *poffset
, written
);
1629 *poffset
+= written
;
1630 } else if (rc
< 0) {
1636 /* get length and number of kvecs of the next write */
1637 npages
= get_numpages(cifs_sb
->wsize
, len
, &cur_len
);
1640 if (total_written
> 0) {
1641 spin_lock(&inode
->i_lock
);
1642 if (*poffset
> inode
->i_size
)
1643 i_size_write(inode
, *poffset
);
1644 spin_unlock(&inode
->i_lock
);
1647 cifs_stats_bytes_written(pTcon
, total_written
);
1648 mark_inode_dirty_sync(inode
);
1650 for (i
= 0; i
< num_pages
; i
++)
1655 return total_written
;
1658 ssize_t
cifs_user_writev(struct kiocb
*iocb
, const struct iovec
*iov
,
1659 unsigned long nr_segs
, loff_t pos
)
1662 struct inode
*inode
;
1664 inode
= iocb
->ki_filp
->f_path
.dentry
->d_inode
;
1667 * BB - optimize the way when signing is disabled. We can drop this
1668 * extra memory-to-memory copying and use iovec buffers for constructing
1672 written
= cifs_iovec_write(iocb
->ki_filp
, iov
, nr_segs
, &pos
);
1674 CIFS_I(inode
)->invalid_mapping
= true;
1681 ssize_t
cifs_strict_writev(struct kiocb
*iocb
, const struct iovec
*iov
,
1682 unsigned long nr_segs
, loff_t pos
)
1684 struct inode
*inode
;
1686 inode
= iocb
->ki_filp
->f_path
.dentry
->d_inode
;
1688 if (CIFS_I(inode
)->clientCanCacheAll
)
1689 return generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
1692 * In strict cache mode we need to write the data to the server exactly
1693 * from the pos to pos+len-1 rather than flush all affected pages
1694 * because it may cause a error with mandatory locks on these pages but
1695 * not on the region from pos to ppos+len-1.
1698 return cifs_user_writev(iocb
, iov
, nr_segs
, pos
);
1702 cifs_iovec_read(struct file
*file
, const struct iovec
*iov
,
1703 unsigned long nr_segs
, loff_t
*poffset
)
1708 unsigned int bytes_read
= 0;
1709 size_t len
, cur_len
;
1711 struct cifs_sb_info
*cifs_sb
;
1712 struct cifs_tcon
*pTcon
;
1713 struct cifsFileInfo
*open_file
;
1714 struct smb_com_read_rsp
*pSMBr
;
1715 struct cifs_io_parms io_parms
;
1722 len
= iov_length(iov
, nr_segs
);
1727 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1729 open_file
= file
->private_data
;
1730 pTcon
= tlink_tcon(open_file
->tlink
);
1732 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
1733 pid
= open_file
->pid
;
1735 pid
= current
->tgid
;
1737 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
1738 cFYI(1, "attempting read on write only file instance");
1740 for (total_read
= 0; total_read
< len
; total_read
+= bytes_read
) {
1741 cur_len
= min_t(const size_t, len
- total_read
, cifs_sb
->rsize
);
1745 while (rc
== -EAGAIN
) {
1746 int buf_type
= CIFS_NO_BUFFER
;
1747 if (open_file
->invalidHandle
) {
1748 rc
= cifs_reopen_file(open_file
, true);
1752 io_parms
.netfid
= open_file
->netfid
;
1754 io_parms
.tcon
= pTcon
;
1755 io_parms
.offset
= *poffset
;
1756 io_parms
.length
= cur_len
;
1757 rc
= CIFSSMBRead(xid
, &io_parms
, &bytes_read
,
1758 &read_data
, &buf_type
);
1759 pSMBr
= (struct smb_com_read_rsp
*)read_data
;
1761 char *data_offset
= read_data
+ 4 +
1762 le16_to_cpu(pSMBr
->DataOffset
);
1763 if (memcpy_toiovecend(iov
, data_offset
,
1764 iov_offset
, bytes_read
))
1766 if (buf_type
== CIFS_SMALL_BUFFER
)
1767 cifs_small_buf_release(read_data
);
1768 else if (buf_type
== CIFS_LARGE_BUFFER
)
1769 cifs_buf_release(read_data
);
1771 iov_offset
+= bytes_read
;
1775 if (rc
|| (bytes_read
== 0)) {
1783 cifs_stats_bytes_read(pTcon
, bytes_read
);
1784 *poffset
+= bytes_read
;
1792 ssize_t
cifs_user_readv(struct kiocb
*iocb
, const struct iovec
*iov
,
1793 unsigned long nr_segs
, loff_t pos
)
1797 read
= cifs_iovec_read(iocb
->ki_filp
, iov
, nr_segs
, &pos
);
1804 ssize_t
cifs_strict_readv(struct kiocb
*iocb
, const struct iovec
*iov
,
1805 unsigned long nr_segs
, loff_t pos
)
1807 struct inode
*inode
;
1809 inode
= iocb
->ki_filp
->f_path
.dentry
->d_inode
;
1811 if (CIFS_I(inode
)->clientCanCacheRead
)
1812 return generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
1815 * In strict cache mode we need to read from the server all the time
1816 * if we don't have level II oplock because the server can delay mtime
1817 * change - so we can't make a decision about inode invalidating.
1818 * And we can also fail with pagereading if there are mandatory locks
1819 * on pages affected by this read but not on the region from pos to
1823 return cifs_user_readv(iocb
, iov
, nr_segs
, pos
);
1826 static ssize_t
cifs_read(struct file
*file
, char *read_data
, size_t read_size
,
1830 unsigned int bytes_read
= 0;
1831 unsigned int total_read
;
1832 unsigned int current_read_size
;
1833 struct cifs_sb_info
*cifs_sb
;
1834 struct cifs_tcon
*pTcon
;
1836 char *current_offset
;
1837 struct cifsFileInfo
*open_file
;
1838 struct cifs_io_parms io_parms
;
1839 int buf_type
= CIFS_NO_BUFFER
;
1843 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1845 if (file
->private_data
== NULL
) {
1850 open_file
= file
->private_data
;
1851 pTcon
= tlink_tcon(open_file
->tlink
);
1853 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
1854 pid
= open_file
->pid
;
1856 pid
= current
->tgid
;
1858 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
1859 cFYI(1, "attempting read on write only file instance");
1861 for (total_read
= 0, current_offset
= read_data
;
1862 read_size
> total_read
;
1863 total_read
+= bytes_read
, current_offset
+= bytes_read
) {
1864 current_read_size
= min_t(const int, read_size
- total_read
,
1866 /* For windows me and 9x we do not want to request more
1867 than it negotiated since it will refuse the read then */
1869 !(pTcon
->ses
->capabilities
& CAP_LARGE_FILES
)) {
1870 current_read_size
= min_t(const int, current_read_size
,
1871 pTcon
->ses
->server
->maxBuf
- 128);
1874 while (rc
== -EAGAIN
) {
1875 if (open_file
->invalidHandle
) {
1876 rc
= cifs_reopen_file(open_file
, true);
1880 io_parms
.netfid
= open_file
->netfid
;
1882 io_parms
.tcon
= pTcon
;
1883 io_parms
.offset
= *poffset
;
1884 io_parms
.length
= current_read_size
;
1885 rc
= CIFSSMBRead(xid
, &io_parms
, &bytes_read
,
1886 ¤t_offset
, &buf_type
);
1888 if (rc
|| (bytes_read
== 0)) {
1896 cifs_stats_bytes_read(pTcon
, total_read
);
1897 *poffset
+= bytes_read
;
1905 * If the page is mmap'ed into a process' page tables, then we need to make
1906 * sure that it doesn't change while being written back.
1909 cifs_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1911 struct page
*page
= vmf
->page
;
1914 return VM_FAULT_LOCKED
;
1917 static struct vm_operations_struct cifs_file_vm_ops
= {
1918 .fault
= filemap_fault
,
1919 .page_mkwrite
= cifs_page_mkwrite
,
1922 int cifs_file_strict_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1925 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1929 if (!CIFS_I(inode
)->clientCanCacheRead
) {
1930 rc
= cifs_invalidate_mapping(inode
);
1935 rc
= generic_file_mmap(file
, vma
);
1937 vma
->vm_ops
= &cifs_file_vm_ops
;
1942 int cifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1947 rc
= cifs_revalidate_file(file
);
1949 cFYI(1, "Validation prior to mmap failed, error=%d", rc
);
1953 rc
= generic_file_mmap(file
, vma
);
1955 vma
->vm_ops
= &cifs_file_vm_ops
;
1961 static void cifs_copy_cache_pages(struct address_space
*mapping
,
1962 struct list_head
*pages
, int bytes_read
, char *data
)
1967 while (bytes_read
> 0) {
1968 if (list_empty(pages
))
1971 page
= list_entry(pages
->prev
, struct page
, lru
);
1972 list_del(&page
->lru
);
1974 if (add_to_page_cache_lru(page
, mapping
, page
->index
,
1976 page_cache_release(page
);
1977 cFYI(1, "Add page cache failed");
1978 data
+= PAGE_CACHE_SIZE
;
1979 bytes_read
-= PAGE_CACHE_SIZE
;
1982 page_cache_release(page
);
1984 target
= kmap_atomic(page
, KM_USER0
);
1986 if (PAGE_CACHE_SIZE
> bytes_read
) {
1987 memcpy(target
, data
, bytes_read
);
1988 /* zero the tail end of this partial page */
1989 memset(target
+ bytes_read
, 0,
1990 PAGE_CACHE_SIZE
- bytes_read
);
1993 memcpy(target
, data
, PAGE_CACHE_SIZE
);
1994 bytes_read
-= PAGE_CACHE_SIZE
;
1996 kunmap_atomic(target
, KM_USER0
);
1998 flush_dcache_page(page
);
1999 SetPageUptodate(page
);
2001 data
+= PAGE_CACHE_SIZE
;
2003 /* add page to FS-Cache */
2004 cifs_readpage_to_fscache(mapping
->host
, page
);
2009 static int cifs_readpages(struct file
*file
, struct address_space
*mapping
,
2010 struct list_head
*page_list
, unsigned num_pages
)
2016 struct cifs_sb_info
*cifs_sb
;
2017 struct cifs_tcon
*pTcon
;
2018 unsigned int bytes_read
= 0;
2019 unsigned int read_size
, i
;
2020 char *smb_read_data
= NULL
;
2021 struct smb_com_read_rsp
*pSMBr
;
2022 struct cifsFileInfo
*open_file
;
2023 struct cifs_io_parms io_parms
;
2024 int buf_type
= CIFS_NO_BUFFER
;
2028 if (file
->private_data
== NULL
) {
2033 open_file
= file
->private_data
;
2034 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
2035 pTcon
= tlink_tcon(open_file
->tlink
);
2038 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2039 * immediately if the cookie is negative
2041 rc
= cifs_readpages_from_fscache(mapping
->host
, mapping
, page_list
,
2046 cFYI(DBG2
, "rpages: num pages %d", num_pages
);
2047 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
2048 pid
= open_file
->pid
;
2050 pid
= current
->tgid
;
2052 for (i
= 0; i
< num_pages
; ) {
2053 unsigned contig_pages
;
2054 struct page
*tmp_page
;
2055 unsigned long expected_index
;
2057 if (list_empty(page_list
))
2060 page
= list_entry(page_list
->prev
, struct page
, lru
);
2061 offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
2063 /* count adjacent pages that we will read into */
2066 list_entry(page_list
->prev
, struct page
, lru
)->index
;
2067 list_for_each_entry_reverse(tmp_page
, page_list
, lru
) {
2068 if (tmp_page
->index
== expected_index
) {
2074 if (contig_pages
+ i
> num_pages
)
2075 contig_pages
= num_pages
- i
;
2077 /* for reads over a certain size could initiate async
2080 read_size
= contig_pages
* PAGE_CACHE_SIZE
;
2081 /* Read size needs to be in multiples of one page */
2082 read_size
= min_t(const unsigned int, read_size
,
2083 cifs_sb
->rsize
& PAGE_CACHE_MASK
);
2084 cFYI(DBG2
, "rpages: read size 0x%x contiguous pages %d",
2085 read_size
, contig_pages
);
2087 while (rc
== -EAGAIN
) {
2088 if (open_file
->invalidHandle
) {
2089 rc
= cifs_reopen_file(open_file
, true);
2093 io_parms
.netfid
= open_file
->netfid
;
2095 io_parms
.tcon
= pTcon
;
2096 io_parms
.offset
= offset
;
2097 io_parms
.length
= read_size
;
2098 rc
= CIFSSMBRead(xid
, &io_parms
, &bytes_read
,
2099 &smb_read_data
, &buf_type
);
2100 /* BB more RC checks ? */
2101 if (rc
== -EAGAIN
) {
2102 if (smb_read_data
) {
2103 if (buf_type
== CIFS_SMALL_BUFFER
)
2104 cifs_small_buf_release(smb_read_data
);
2105 else if (buf_type
== CIFS_LARGE_BUFFER
)
2106 cifs_buf_release(smb_read_data
);
2107 smb_read_data
= NULL
;
2111 if ((rc
< 0) || (smb_read_data
== NULL
)) {
2112 cFYI(1, "Read error in readpages: %d", rc
);
2114 } else if (bytes_read
> 0) {
2115 task_io_account_read(bytes_read
);
2116 pSMBr
= (struct smb_com_read_rsp
*)smb_read_data
;
2117 cifs_copy_cache_pages(mapping
, page_list
, bytes_read
,
2118 smb_read_data
+ 4 /* RFC1001 hdr */ +
2119 le16_to_cpu(pSMBr
->DataOffset
));
2121 i
+= bytes_read
>> PAGE_CACHE_SHIFT
;
2122 cifs_stats_bytes_read(pTcon
, bytes_read
);
2123 if ((bytes_read
& PAGE_CACHE_MASK
) != bytes_read
) {
2124 i
++; /* account for partial page */
2126 /* server copy of file can have smaller size
2128 /* BB do we need to verify this common case ?
2129 this case is ok - if we are at server EOF
2130 we will hit it on next read */
2135 cFYI(1, "No bytes read (%d) at offset %lld . "
2136 "Cleaning remaining pages from readahead list",
2137 bytes_read
, offset
);
2138 /* BB turn off caching and do new lookup on
2139 file size at server? */
2142 if (smb_read_data
) {
2143 if (buf_type
== CIFS_SMALL_BUFFER
)
2144 cifs_small_buf_release(smb_read_data
);
2145 else if (buf_type
== CIFS_LARGE_BUFFER
)
2146 cifs_buf_release(smb_read_data
);
2147 smb_read_data
= NULL
;
2152 /* need to free smb_read_data buf before exit */
2153 if (smb_read_data
) {
2154 if (buf_type
== CIFS_SMALL_BUFFER
)
2155 cifs_small_buf_release(smb_read_data
);
2156 else if (buf_type
== CIFS_LARGE_BUFFER
)
2157 cifs_buf_release(smb_read_data
);
2158 smb_read_data
= NULL
;
2166 static int cifs_readpage_worker(struct file
*file
, struct page
*page
,
2172 /* Is the page cached? */
2173 rc
= cifs_readpage_from_fscache(file
->f_path
.dentry
->d_inode
, page
);
2177 page_cache_get(page
);
2178 read_data
= kmap(page
);
2179 /* for reads over a certain size could initiate async read ahead */
2181 rc
= cifs_read(file
, read_data
, PAGE_CACHE_SIZE
, poffset
);
2186 cFYI(1, "Bytes read %d", rc
);
2188 file
->f_path
.dentry
->d_inode
->i_atime
=
2189 current_fs_time(file
->f_path
.dentry
->d_inode
->i_sb
);
2191 if (PAGE_CACHE_SIZE
> rc
)
2192 memset(read_data
+ rc
, 0, PAGE_CACHE_SIZE
- rc
);
2194 flush_dcache_page(page
);
2195 SetPageUptodate(page
);
2197 /* send this page to the cache */
2198 cifs_readpage_to_fscache(file
->f_path
.dentry
->d_inode
, page
);
2204 page_cache_release(page
);
2210 static int cifs_readpage(struct file
*file
, struct page
*page
)
2212 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
2218 if (file
->private_data
== NULL
) {
2224 cFYI(1, "readpage %p at offset %d 0x%x\n",
2225 page
, (int)offset
, (int)offset
);
2227 rc
= cifs_readpage_worker(file
, page
, &offset
);
2235 static int is_inode_writable(struct cifsInodeInfo
*cifs_inode
)
2237 struct cifsFileInfo
*open_file
;
2239 spin_lock(&cifs_file_list_lock
);
2240 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
2241 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
2242 spin_unlock(&cifs_file_list_lock
);
2246 spin_unlock(&cifs_file_list_lock
);
2250 /* We do not want to update the file size from server for inodes
2251 open for write - to avoid races with writepage extending
2252 the file - in the future we could consider allowing
2253 refreshing the inode only on increases in the file size
2254 but this is tricky to do without racing with writebehind
2255 page caching in the current Linux kernel design */
2256 bool is_size_safe_to_change(struct cifsInodeInfo
*cifsInode
, __u64 end_of_file
)
2261 if (is_inode_writable(cifsInode
)) {
2262 /* This inode is open for write at least once */
2263 struct cifs_sb_info
*cifs_sb
;
2265 cifs_sb
= CIFS_SB(cifsInode
->vfs_inode
.i_sb
);
2266 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
) {
2267 /* since no page cache to corrupt on directio
2268 we can change size safely */
2272 if (i_size_read(&cifsInode
->vfs_inode
) < end_of_file
)
2280 static int cifs_write_begin(struct file
*file
, struct address_space
*mapping
,
2281 loff_t pos
, unsigned len
, unsigned flags
,
2282 struct page
**pagep
, void **fsdata
)
2284 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
2285 loff_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
2286 loff_t page_start
= pos
& PAGE_MASK
;
2291 cFYI(1, "write_begin from %lld len %d", (long long)pos
, len
);
2293 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
2299 if (PageUptodate(page
))
2303 * If we write a full page it will be up to date, no need to read from
2304 * the server. If the write is short, we'll end up doing a sync write
2307 if (len
== PAGE_CACHE_SIZE
)
2311 * optimize away the read when we have an oplock, and we're not
2312 * expecting to use any of the data we'd be reading in. That
2313 * is, when the page lies beyond the EOF, or straddles the EOF
2314 * and the write will cover all of the existing data.
2316 if (CIFS_I(mapping
->host
)->clientCanCacheRead
) {
2317 i_size
= i_size_read(mapping
->host
);
2318 if (page_start
>= i_size
||
2319 (offset
== 0 && (pos
+ len
) >= i_size
)) {
2320 zero_user_segments(page
, 0, offset
,
2324 * PageChecked means that the parts of the page
2325 * to which we're not writing are considered up
2326 * to date. Once the data is copied to the
2327 * page, it can be set uptodate.
2329 SetPageChecked(page
);
2334 if ((file
->f_flags
& O_ACCMODE
) != O_WRONLY
) {
2336 * might as well read a page, it is fast enough. If we get
2337 * an error, we don't need to return it. cifs_write_end will
2338 * do a sync write instead since PG_uptodate isn't set.
2340 cifs_readpage_worker(file
, page
, &page_start
);
2342 /* we could try using another file handle if there is one -
2343 but how would we lock it to prevent close of that handle
2344 racing with this read? In any case
2345 this will be written out by write_end so is fine */
2352 static int cifs_release_page(struct page
*page
, gfp_t gfp
)
2354 if (PagePrivate(page
))
2357 return cifs_fscache_release_page(page
, gfp
);
2360 static void cifs_invalidate_page(struct page
*page
, unsigned long offset
)
2362 struct cifsInodeInfo
*cifsi
= CIFS_I(page
->mapping
->host
);
2365 cifs_fscache_invalidate_page(page
, &cifsi
->vfs_inode
);
2368 static int cifs_launder_page(struct page
*page
)
2371 loff_t range_start
= page_offset(page
);
2372 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
2373 struct writeback_control wbc
= {
2374 .sync_mode
= WB_SYNC_ALL
,
2376 .range_start
= range_start
,
2377 .range_end
= range_end
,
2380 cFYI(1, "Launder page: %p", page
);
2382 if (clear_page_dirty_for_io(page
))
2383 rc
= cifs_writepage_locked(page
, &wbc
);
2385 cifs_fscache_invalidate_page(page
, page
->mapping
->host
);
2389 void cifs_oplock_break(struct work_struct
*work
)
2391 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
2393 struct inode
*inode
= cfile
->dentry
->d_inode
;
2394 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2397 if (inode
&& S_ISREG(inode
->i_mode
)) {
2398 if (cinode
->clientCanCacheRead
)
2399 break_lease(inode
, O_RDONLY
);
2401 break_lease(inode
, O_WRONLY
);
2402 rc
= filemap_fdatawrite(inode
->i_mapping
);
2403 if (cinode
->clientCanCacheRead
== 0) {
2404 rc
= filemap_fdatawait(inode
->i_mapping
);
2405 mapping_set_error(inode
->i_mapping
, rc
);
2406 invalidate_remote_inode(inode
);
2408 cFYI(1, "Oplock flush inode %p rc %d", inode
, rc
);
2412 * releasing stale oplock after recent reconnect of smb session using
2413 * a now incorrect file handle is not a data integrity issue but do
2414 * not bother sending an oplock release if session to server still is
2415 * disconnected since oplock already released by the server
2417 if (!cfile
->oplock_break_cancelled
) {
2418 rc
= CIFSSMBLock(0, tlink_tcon(cfile
->tlink
), cfile
->netfid
, 0,
2419 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE
, false,
2420 cinode
->clientCanCacheRead
? 1 : 0);
2421 cFYI(1, "Oplock release rc = %d", rc
);
2425 const struct address_space_operations cifs_addr_ops
= {
2426 .readpage
= cifs_readpage
,
2427 .readpages
= cifs_readpages
,
2428 .writepage
= cifs_writepage
,
2429 .writepages
= cifs_writepages
,
2430 .write_begin
= cifs_write_begin
,
2431 .write_end
= cifs_write_end
,
2432 .set_page_dirty
= __set_page_dirty_nobuffers
,
2433 .releasepage
= cifs_release_page
,
2434 .invalidatepage
= cifs_invalidate_page
,
2435 .launder_page
= cifs_launder_page
,
2439 * cifs_readpages requires the server to support a buffer large enough to
2440 * contain the header plus one complete page of data. Otherwise, we need
2441 * to leave cifs_readpages out of the address space operations.
2443 const struct address_space_operations cifs_addr_ops_smallbuf
= {
2444 .readpage
= cifs_readpage
,
2445 .writepage
= cifs_writepage
,
2446 .writepages
= cifs_writepages
,
2447 .write_begin
= cifs_write_begin
,
2448 .write_end
= cifs_write_end
,
2449 .set_page_dirty
= __set_page_dirty_nobuffers
,
2450 .releasepage
= cifs_release_page
,
2451 .invalidatepage
= cifs_invalidate_page
,
2452 .launder_page
= cifs_launder_page
,