4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2007
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
44 static inline int cifs_convert_flags(unsigned int flags
)
46 if ((flags
& O_ACCMODE
) == O_RDONLY
)
48 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
50 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
51 /* GENERIC_ALL is too much permission to request
52 can cause unnecessary access denied on create */
53 /* return GENERIC_ALL; */
54 return (GENERIC_READ
| GENERIC_WRITE
);
57 return (READ_CONTROL
| FILE_WRITE_ATTRIBUTES
| FILE_READ_ATTRIBUTES
|
58 FILE_WRITE_EA
| FILE_APPEND_DATA
| FILE_WRITE_DATA
|
62 static inline fmode_t
cifs_posix_convert_flags(unsigned int flags
)
64 fmode_t posix_flags
= 0;
66 if ((flags
& O_ACCMODE
) == O_RDONLY
)
67 posix_flags
= FMODE_READ
;
68 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
69 posix_flags
= FMODE_WRITE
;
70 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
71 /* GENERIC_ALL is too much permission to request
72 can cause unnecessary access denied on create */
73 /* return GENERIC_ALL; */
74 posix_flags
= FMODE_READ
| FMODE_WRITE
;
76 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
77 reopening a file. They had their effect on the original open */
79 posix_flags
|= (fmode_t
)O_APPEND
;
81 posix_flags
|= (fmode_t
)O_DSYNC
;
83 posix_flags
|= (fmode_t
)__O_SYNC
;
84 if (flags
& O_DIRECTORY
)
85 posix_flags
|= (fmode_t
)O_DIRECTORY
;
86 if (flags
& O_NOFOLLOW
)
87 posix_flags
|= (fmode_t
)O_NOFOLLOW
;
89 posix_flags
|= (fmode_t
)O_DIRECT
;
94 static inline int cifs_get_disposition(unsigned int flags
)
96 if ((flags
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
))
98 else if ((flags
& (O_CREAT
| O_TRUNC
)) == (O_CREAT
| O_TRUNC
))
99 return FILE_OVERWRITE_IF
;
100 else if ((flags
& O_CREAT
) == O_CREAT
)
102 else if ((flags
& O_TRUNC
) == O_TRUNC
)
103 return FILE_OVERWRITE
;
108 /* all arguments to this function must be checked for validity in caller */
110 cifs_posix_open_inode_helper(struct inode
*inode
, struct file
*file
,
111 struct cifsInodeInfo
*pCifsInode
,
112 struct cifsFileInfo
*pCifsFile
, __u32 oplock
,
116 write_lock(&GlobalSMBSeslock
);
118 pCifsInode
= CIFS_I(file
->f_path
.dentry
->d_inode
);
119 if (pCifsInode
== NULL
) {
120 write_unlock(&GlobalSMBSeslock
);
124 if (pCifsInode
->clientCanCacheRead
) {
125 /* we have the inode open somewhere else
126 no need to discard cache data */
127 goto psx_client_can_cache
;
130 /* BB FIXME need to fix this check to move it earlier into posix_open
131 BB fIX following section BB FIXME */
133 /* if not oplocked, invalidate inode pages if mtime or file
135 /* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
136 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
137 (file->f_path.dentry->d_inode->i_size ==
138 (loff_t)le64_to_cpu(buf->EndOfFile))) {
139 cFYI(1, ("inode unchanged on server"));
141 if (file->f_path.dentry->d_inode->i_mapping) {
142 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
144 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
146 cFYI(1, ("invalidating remote inode since open detected it "
148 invalidate_remote_inode(file->f_path.dentry->d_inode);
151 psx_client_can_cache
:
152 if ((oplock
& 0xF) == OPLOCK_EXCLUSIVE
) {
153 pCifsInode
->clientCanCacheAll
= true;
154 pCifsInode
->clientCanCacheRead
= true;
155 cFYI(1, ("Exclusive Oplock granted on inode %p",
156 file
->f_path
.dentry
->d_inode
));
157 } else if ((oplock
& 0xF) == OPLOCK_READ
)
158 pCifsInode
->clientCanCacheRead
= true;
160 /* will have to change the unlock if we reenable the
161 filemap_fdatawrite (which does not seem necessary */
162 write_unlock(&GlobalSMBSeslock
);
166 static struct cifsFileInfo
*
167 cifs_fill_filedata(struct file
*file
)
169 struct list_head
*tmp
;
170 struct cifsFileInfo
*pCifsFile
= NULL
;
171 struct cifsInodeInfo
*pCifsInode
= NULL
;
173 /* search inode for this file and fill in file->private_data */
174 pCifsInode
= CIFS_I(file
->f_path
.dentry
->d_inode
);
175 read_lock(&GlobalSMBSeslock
);
176 list_for_each(tmp
, &pCifsInode
->openFileList
) {
177 pCifsFile
= list_entry(tmp
, struct cifsFileInfo
, flist
);
178 if ((pCifsFile
->pfile
== NULL
) &&
179 (pCifsFile
->pid
== current
->tgid
)) {
180 /* mode set in cifs_create */
182 /* needed for writepage */
183 pCifsFile
->pfile
= file
;
184 file
->private_data
= pCifsFile
;
188 read_unlock(&GlobalSMBSeslock
);
190 if (file
->private_data
!= NULL
) {
192 } else if ((file
->f_flags
& O_CREAT
) && (file
->f_flags
& O_EXCL
))
193 cERROR(1, ("could not find file instance for "
194 "new file %p", file
));
198 /* all arguments to this function must be checked for validity in caller */
199 static inline int cifs_open_inode_helper(struct inode
*inode
, struct file
*file
,
200 struct cifsInodeInfo
*pCifsInode
, struct cifsFileInfo
*pCifsFile
,
201 struct cifsTconInfo
*pTcon
, int *oplock
, FILE_ALL_INFO
*buf
,
202 char *full_path
, int xid
)
204 struct timespec temp
;
207 if (pCifsInode
->clientCanCacheRead
) {
208 /* we have the inode open somewhere else
209 no need to discard cache data */
210 goto client_can_cache
;
213 /* BB need same check in cifs_create too? */
214 /* if not oplocked, invalidate inode pages if mtime or file
216 temp
= cifs_NTtimeToUnix(buf
->LastWriteTime
);
217 if (timespec_equal(&file
->f_path
.dentry
->d_inode
->i_mtime
, &temp
) &&
218 (file
->f_path
.dentry
->d_inode
->i_size
==
219 (loff_t
)le64_to_cpu(buf
->EndOfFile
))) {
220 cFYI(1, ("inode unchanged on server"));
222 if (file
->f_path
.dentry
->d_inode
->i_mapping
) {
223 /* BB no need to lock inode until after invalidate
224 since namei code should already have it locked? */
225 rc
= filemap_write_and_wait(file
->f_path
.dentry
->d_inode
->i_mapping
);
227 CIFS_I(file
->f_path
.dentry
->d_inode
)->write_behind_rc
= rc
;
229 cFYI(1, ("invalidating remote inode since open detected it "
231 invalidate_remote_inode(file
->f_path
.dentry
->d_inode
);
236 rc
= cifs_get_inode_info_unix(&file
->f_path
.dentry
->d_inode
,
237 full_path
, inode
->i_sb
, xid
);
239 rc
= cifs_get_inode_info(&file
->f_path
.dentry
->d_inode
,
240 full_path
, buf
, inode
->i_sb
, xid
, NULL
);
242 if ((*oplock
& 0xF) == OPLOCK_EXCLUSIVE
) {
243 pCifsInode
->clientCanCacheAll
= true;
244 pCifsInode
->clientCanCacheRead
= true;
245 cFYI(1, ("Exclusive Oplock granted on inode %p",
246 file
->f_path
.dentry
->d_inode
));
247 } else if ((*oplock
& 0xF) == OPLOCK_READ
)
248 pCifsInode
->clientCanCacheRead
= true;
253 int cifs_open(struct inode
*inode
, struct file
*file
)
258 struct cifs_sb_info
*cifs_sb
;
259 struct cifsTconInfo
*tcon
;
260 struct cifsFileInfo
*pCifsFile
;
261 struct cifsInodeInfo
*pCifsInode
;
262 char *full_path
= NULL
;
266 FILE_ALL_INFO
*buf
= NULL
;
270 cifs_sb
= CIFS_SB(inode
->i_sb
);
271 tcon
= cifs_sb
->tcon
;
273 pCifsInode
= CIFS_I(file
->f_path
.dentry
->d_inode
);
274 pCifsFile
= cifs_fill_filedata(file
);
281 full_path
= build_path_from_dentry(file
->f_path
.dentry
);
282 if (full_path
== NULL
) {
288 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
289 inode
, file
->f_flags
, full_path
));
296 if (!tcon
->broken_posix_open
&& tcon
->unix_ext
&&
297 (tcon
->ses
->capabilities
& CAP_UNIX
) &&
298 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
299 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
300 int oflags
= (int) cifs_posix_convert_flags(file
->f_flags
);
301 oflags
|= SMB_O_CREAT
;
302 /* can not refresh inode info since size could be stale */
303 rc
= cifs_posix_open(full_path
, &inode
, file
->f_path
.mnt
,
305 cifs_sb
->mnt_file_mode
/* ignored */,
306 oflags
, &oplock
, &netfid
, xid
);
308 cFYI(1, ("posix open succeeded"));
309 /* no need for special case handling of setting mode
310 on read only files needed here */
312 pCifsFile
= cifs_fill_filedata(file
);
313 cifs_posix_open_inode_helper(inode
, file
, pCifsInode
,
314 pCifsFile
, oplock
, netfid
);
316 } else if ((rc
== -EINVAL
) || (rc
== -EOPNOTSUPP
)) {
317 if (tcon
->ses
->serverNOS
)
318 cERROR(1, ("server %s of type %s returned"
319 " unexpected error on SMB posix open"
320 ", disabling posix open support."
321 " Check if server update available.",
322 tcon
->ses
->serverName
,
323 tcon
->ses
->serverNOS
));
324 tcon
->broken_posix_open
= true;
325 } else if ((rc
!= -EIO
) && (rc
!= -EREMOTE
) &&
326 (rc
!= -EOPNOTSUPP
)) /* path not found or net err */
328 /* else fallthrough to retry open the old way on network i/o
332 desiredAccess
= cifs_convert_flags(file
->f_flags
);
334 /*********************************************************************
335 * open flag mapping table:
337 * POSIX Flag CIFS Disposition
338 * ---------- ----------------
339 * O_CREAT FILE_OPEN_IF
340 * O_CREAT | O_EXCL FILE_CREATE
341 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
342 * O_TRUNC FILE_OVERWRITE
343 * none of the above FILE_OPEN
345 * Note that there is not a direct match between disposition
346 * FILE_SUPERSEDE (ie create whether or not file exists although
347 * O_CREAT | O_TRUNC is similar but truncates the existing
348 * file rather than creating a new file as FILE_SUPERSEDE does
349 * (which uses the attributes / metadata passed in on open call)
351 *? O_SYNC is a reasonable match to CIFS writethrough flag
352 *? and the read write flags match reasonably. O_LARGEFILE
353 *? is irrelevant because largefile support is always used
354 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
355 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
356 *********************************************************************/
358 disposition
= cifs_get_disposition(file
->f_flags
);
360 /* BB pass O_SYNC flag through on file attributes .. BB */
362 /* Also refresh inode by passing in file_info buf returned by SMBOpen
363 and calling get_inode_info with returned buf (at least helps
364 non-Unix server case) */
366 /* BB we can not do this if this is the second open of a file
367 and the first handle has writebehind data, we might be
368 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
369 buf
= kmalloc(sizeof(FILE_ALL_INFO
), GFP_KERNEL
);
375 if (cifs_sb
->tcon
->ses
->capabilities
& CAP_NT_SMBS
)
376 rc
= CIFSSMBOpen(xid
, tcon
, full_path
, disposition
,
377 desiredAccess
, CREATE_NOT_DIR
, &netfid
, &oplock
, buf
,
378 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
379 & CIFS_MOUNT_MAP_SPECIAL_CHR
);
381 rc
= -EIO
; /* no NT SMB support fall into legacy open below */
384 /* Old server, try legacy style OpenX */
385 rc
= SMBLegacyOpen(xid
, tcon
, full_path
, disposition
,
386 desiredAccess
, CREATE_NOT_DIR
, &netfid
, &oplock
, buf
,
387 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
388 & CIFS_MOUNT_MAP_SPECIAL_CHR
);
391 cFYI(1, ("cifs_open returned 0x%x", rc
));
395 pCifsFile
= cifs_new_fileinfo(inode
, netfid
, file
, file
->f_path
.mnt
,
397 file
->private_data
= pCifsFile
;
398 if (file
->private_data
== NULL
) {
403 rc
= cifs_open_inode_helper(inode
, file
, pCifsInode
, pCifsFile
, tcon
,
404 &oplock
, buf
, full_path
, xid
);
406 if (oplock
& CIFS_CREATE_ACTION
) {
407 /* time to set mode which we can not set earlier due to
408 problems creating new read-only files */
409 if (tcon
->unix_ext
) {
410 struct cifs_unix_set_info_args args
= {
411 .mode
= inode
->i_mode
,
414 .ctime
= NO_CHANGE_64
,
415 .atime
= NO_CHANGE_64
,
416 .mtime
= NO_CHANGE_64
,
419 CIFSSMBUnixSetPathInfo(xid
, tcon
, full_path
, &args
,
421 cifs_sb
->mnt_cifs_flags
&
422 CIFS_MOUNT_MAP_SPECIAL_CHR
);
433 /* Try to reacquire byte range locks that were released when session */
434 /* to server was lost */
435 static int cifs_relock_file(struct cifsFileInfo
*cifsFile
)
439 /* BB list all locks open on this file and relock */
444 static int cifs_reopen_file(struct file
*file
, bool can_flush
)
449 struct cifs_sb_info
*cifs_sb
;
450 struct cifsTconInfo
*tcon
;
451 struct cifsFileInfo
*pCifsFile
;
452 struct cifsInodeInfo
*pCifsInode
;
454 char *full_path
= NULL
;
456 int disposition
= FILE_OPEN
;
459 if (file
->private_data
)
460 pCifsFile
= (struct cifsFileInfo
*)file
->private_data
;
465 mutex_lock(&pCifsFile
->fh_mutex
);
466 if (!pCifsFile
->invalidHandle
) {
467 mutex_unlock(&pCifsFile
->fh_mutex
);
473 if (file
->f_path
.dentry
== NULL
) {
474 cERROR(1, ("no valid name if dentry freed"));
477 goto reopen_error_exit
;
480 inode
= file
->f_path
.dentry
->d_inode
;
482 cERROR(1, ("inode not valid"));
485 goto reopen_error_exit
;
488 cifs_sb
= CIFS_SB(inode
->i_sb
);
489 tcon
= cifs_sb
->tcon
;
491 /* can not grab rename sem here because various ops, including
492 those that already have the rename sem can end up causing writepage
493 to get called and if the server was down that means we end up here,
494 and we can never tell if the caller already has the rename_sem */
495 full_path
= build_path_from_dentry(file
->f_path
.dentry
);
496 if (full_path
== NULL
) {
499 mutex_unlock(&pCifsFile
->fh_mutex
);
504 cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
505 inode
, file
->f_flags
, full_path
));
512 if (tcon
->unix_ext
&& (tcon
->ses
->capabilities
& CAP_UNIX
) &&
513 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
514 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
515 int oflags
= (int) cifs_posix_convert_flags(file
->f_flags
);
516 /* can not refresh inode info since size could be stale */
517 rc
= cifs_posix_open(full_path
, NULL
, file
->f_path
.mnt
,
519 cifs_sb
->mnt_file_mode
/* ignored */,
520 oflags
, &oplock
, &netfid
, xid
);
522 cFYI(1, ("posix reopen succeeded"));
525 /* fallthrough to retry open the old way on errors, especially
526 in the reconnect path it is important to retry hard */
529 desiredAccess
= cifs_convert_flags(file
->f_flags
);
531 /* Can not refresh inode by passing in file_info buf to be returned
532 by SMBOpen and then calling get_inode_info with returned buf
533 since file might have write behind data that needs to be flushed
534 and server version of file size can be stale. If we knew for sure
535 that inode was not dirty locally we could do this */
537 rc
= CIFSSMBOpen(xid
, tcon
, full_path
, disposition
, desiredAccess
,
538 CREATE_NOT_DIR
, &netfid
, &oplock
, NULL
,
539 cifs_sb
->local_nls
, cifs_sb
->mnt_cifs_flags
&
540 CIFS_MOUNT_MAP_SPECIAL_CHR
);
542 mutex_unlock(&pCifsFile
->fh_mutex
);
543 cFYI(1, ("cifs_open returned 0x%x", rc
));
544 cFYI(1, ("oplock: %d", oplock
));
547 pCifsFile
->netfid
= netfid
;
548 pCifsFile
->invalidHandle
= false;
549 mutex_unlock(&pCifsFile
->fh_mutex
);
550 pCifsInode
= CIFS_I(inode
);
553 rc
= filemap_write_and_wait(inode
->i_mapping
);
555 CIFS_I(inode
)->write_behind_rc
= rc
;
556 /* temporarily disable caching while we
557 go to server to get inode info */
558 pCifsInode
->clientCanCacheAll
= false;
559 pCifsInode
->clientCanCacheRead
= false;
561 rc
= cifs_get_inode_info_unix(&inode
,
562 full_path
, inode
->i_sb
, xid
);
564 rc
= cifs_get_inode_info(&inode
,
565 full_path
, NULL
, inode
->i_sb
,
567 } /* else we are writing out data to server already
568 and could deadlock if we tried to flush data, and
569 since we do not know if we have data that would
570 invalidate the current end of file on the server
571 we can not go to the server to get the new inod
573 if ((oplock
& 0xF) == OPLOCK_EXCLUSIVE
) {
574 pCifsInode
->clientCanCacheAll
= true;
575 pCifsInode
->clientCanCacheRead
= true;
576 cFYI(1, ("Exclusive Oplock granted on inode %p",
577 file
->f_path
.dentry
->d_inode
));
578 } else if ((oplock
& 0xF) == OPLOCK_READ
) {
579 pCifsInode
->clientCanCacheRead
= true;
580 pCifsInode
->clientCanCacheAll
= false;
582 pCifsInode
->clientCanCacheRead
= false;
583 pCifsInode
->clientCanCacheAll
= false;
585 cifs_relock_file(pCifsFile
);
593 int cifs_close(struct inode
*inode
, struct file
*file
)
597 struct cifs_sb_info
*cifs_sb
;
598 struct cifsTconInfo
*pTcon
;
599 struct cifsFileInfo
*pSMBFile
=
600 (struct cifsFileInfo
*)file
->private_data
;
604 cifs_sb
= CIFS_SB(inode
->i_sb
);
605 pTcon
= cifs_sb
->tcon
;
607 struct cifsLockInfo
*li
, *tmp
;
608 write_lock(&GlobalSMBSeslock
);
609 pSMBFile
->closePend
= true;
611 /* no sense reconnecting to close a file that is
613 if (!pTcon
->need_reconnect
) {
614 write_unlock(&GlobalSMBSeslock
);
616 while ((atomic_read(&pSMBFile
->count
) != 1)
617 && (timeout
<= 2048)) {
618 /* Give write a better chance to get to
619 server ahead of the close. We do not
620 want to add a wait_q here as it would
621 increase the memory utilization as
622 the struct would be in each open file,
623 but this should give enough time to
626 ("close delay, write pending"));
630 if (!pTcon
->need_reconnect
&&
631 !pSMBFile
->invalidHandle
)
632 rc
= CIFSSMBClose(xid
, pTcon
,
635 write_unlock(&GlobalSMBSeslock
);
637 write_unlock(&GlobalSMBSeslock
);
639 /* Delete any outstanding lock records.
640 We'll lose them when the file is closed anyway. */
641 mutex_lock(&pSMBFile
->lock_mutex
);
642 list_for_each_entry_safe(li
, tmp
, &pSMBFile
->llist
, llist
) {
643 list_del(&li
->llist
);
646 mutex_unlock(&pSMBFile
->lock_mutex
);
648 write_lock(&GlobalSMBSeslock
);
649 list_del(&pSMBFile
->flist
);
650 list_del(&pSMBFile
->tlist
);
651 write_unlock(&GlobalSMBSeslock
);
652 cifsFileInfo_put(file
->private_data
);
653 file
->private_data
= NULL
;
657 read_lock(&GlobalSMBSeslock
);
658 if (list_empty(&(CIFS_I(inode
)->openFileList
))) {
659 cFYI(1, ("closing last open instance for inode %p", inode
));
660 /* if the file is not open we do not know if we can cache info
661 on this inode, much less write behind and read ahead */
662 CIFS_I(inode
)->clientCanCacheRead
= false;
663 CIFS_I(inode
)->clientCanCacheAll
= false;
665 read_unlock(&GlobalSMBSeslock
);
666 if ((rc
== 0) && CIFS_I(inode
)->write_behind_rc
)
667 rc
= CIFS_I(inode
)->write_behind_rc
;
672 int cifs_closedir(struct inode
*inode
, struct file
*file
)
676 struct cifsFileInfo
*pCFileStruct
=
677 (struct cifsFileInfo
*)file
->private_data
;
680 cFYI(1, ("Closedir inode = 0x%p", inode
));
685 struct cifsTconInfo
*pTcon
;
686 struct cifs_sb_info
*cifs_sb
=
687 CIFS_SB(file
->f_path
.dentry
->d_sb
);
689 pTcon
= cifs_sb
->tcon
;
691 cFYI(1, ("Freeing private data in close dir"));
692 write_lock(&GlobalSMBSeslock
);
693 if (!pCFileStruct
->srch_inf
.endOfSearch
&&
694 !pCFileStruct
->invalidHandle
) {
695 pCFileStruct
->invalidHandle
= true;
696 write_unlock(&GlobalSMBSeslock
);
697 rc
= CIFSFindClose(xid
, pTcon
, pCFileStruct
->netfid
);
698 cFYI(1, ("Closing uncompleted readdir with rc %d",
700 /* not much we can do if it fails anyway, ignore rc */
703 write_unlock(&GlobalSMBSeslock
);
704 ptmp
= pCFileStruct
->srch_inf
.ntwrk_buf_start
;
706 cFYI(1, ("closedir free smb buf in srch struct"));
707 pCFileStruct
->srch_inf
.ntwrk_buf_start
= NULL
;
708 if (pCFileStruct
->srch_inf
.smallBuf
)
709 cifs_small_buf_release(ptmp
);
711 cifs_buf_release(ptmp
);
713 kfree(file
->private_data
);
714 file
->private_data
= NULL
;
716 /* BB can we lock the filestruct while this is going on? */
721 static int store_file_lock(struct cifsFileInfo
*fid
, __u64 len
,
722 __u64 offset
, __u8 lockType
)
724 struct cifsLockInfo
*li
=
725 kmalloc(sizeof(struct cifsLockInfo
), GFP_KERNEL
);
731 mutex_lock(&fid
->lock_mutex
);
732 list_add(&li
->llist
, &fid
->llist
);
733 mutex_unlock(&fid
->lock_mutex
);
737 int cifs_lock(struct file
*file
, int cmd
, struct file_lock
*pfLock
)
743 bool wait_flag
= false;
744 struct cifs_sb_info
*cifs_sb
;
745 struct cifsTconInfo
*tcon
;
747 __u8 lockType
= LOCKING_ANDX_LARGE_FILES
;
748 bool posix_locking
= 0;
750 length
= 1 + pfLock
->fl_end
- pfLock
->fl_start
;
754 cFYI(1, ("Lock parm: 0x%x flockflags: "
755 "0x%x flocktype: 0x%x start: %lld end: %lld",
756 cmd
, pfLock
->fl_flags
, pfLock
->fl_type
, pfLock
->fl_start
,
759 if (pfLock
->fl_flags
& FL_POSIX
)
761 if (pfLock
->fl_flags
& FL_FLOCK
)
763 if (pfLock
->fl_flags
& FL_SLEEP
) {
764 cFYI(1, ("Blocking lock"));
767 if (pfLock
->fl_flags
& FL_ACCESS
)
768 cFYI(1, ("Process suspended by mandatory locking - "
769 "not implemented yet"));
770 if (pfLock
->fl_flags
& FL_LEASE
)
771 cFYI(1, ("Lease on file - not implemented yet"));
772 if (pfLock
->fl_flags
&
773 (~(FL_POSIX
| FL_FLOCK
| FL_SLEEP
| FL_ACCESS
| FL_LEASE
)))
774 cFYI(1, ("Unknown lock flags 0x%x", pfLock
->fl_flags
));
776 if (pfLock
->fl_type
== F_WRLCK
) {
777 cFYI(1, ("F_WRLCK "));
779 } else if (pfLock
->fl_type
== F_UNLCK
) {
780 cFYI(1, ("F_UNLCK"));
782 /* Check if unlock includes more than
784 } else if (pfLock
->fl_type
== F_RDLCK
) {
785 cFYI(1, ("F_RDLCK"));
786 lockType
|= LOCKING_ANDX_SHARED_LOCK
;
788 } else if (pfLock
->fl_type
== F_EXLCK
) {
789 cFYI(1, ("F_EXLCK"));
791 } else if (pfLock
->fl_type
== F_SHLCK
) {
792 cFYI(1, ("F_SHLCK"));
793 lockType
|= LOCKING_ANDX_SHARED_LOCK
;
796 cFYI(1, ("Unknown type of lock"));
798 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
799 tcon
= cifs_sb
->tcon
;
801 if (file
->private_data
== NULL
) {
806 netfid
= ((struct cifsFileInfo
*)file
->private_data
)->netfid
;
808 if ((tcon
->ses
->capabilities
& CAP_UNIX
) &&
809 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
810 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
812 /* BB add code here to normalize offset and length to
813 account for negative length which we can not accept over the
818 if (lockType
& LOCKING_ANDX_SHARED_LOCK
)
819 posix_lock_type
= CIFS_RDLCK
;
821 posix_lock_type
= CIFS_WRLCK
;
822 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
, 1 /* get */,
824 posix_lock_type
, wait_flag
);
829 /* BB we could chain these into one lock request BB */
830 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
, pfLock
->fl_start
,
831 0, 1, lockType
, 0 /* wait flag */ );
833 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
834 pfLock
->fl_start
, 1 /* numUnlock */ ,
835 0 /* numLock */ , lockType
,
837 pfLock
->fl_type
= F_UNLCK
;
839 cERROR(1, ("Error unlocking previously locked "
840 "range %d during test of lock", rc
));
844 /* if rc == ERR_SHARING_VIOLATION ? */
847 if (lockType
& LOCKING_ANDX_SHARED_LOCK
) {
848 pfLock
->fl_type
= F_WRLCK
;
850 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
851 pfLock
->fl_start
, 0, 1,
852 lockType
| LOCKING_ANDX_SHARED_LOCK
,
855 rc
= CIFSSMBLock(xid
, tcon
, netfid
,
856 length
, pfLock
->fl_start
, 1, 0,
858 LOCKING_ANDX_SHARED_LOCK
,
860 pfLock
->fl_type
= F_RDLCK
;
862 cERROR(1, ("Error unlocking "
863 "previously locked range %d "
864 "during test of lock", rc
));
867 pfLock
->fl_type
= F_WRLCK
;
877 if (!numLock
&& !numUnlock
) {
878 /* if no lock or unlock then nothing
879 to do since we do not know what it is */
886 if (lockType
& LOCKING_ANDX_SHARED_LOCK
)
887 posix_lock_type
= CIFS_RDLCK
;
889 posix_lock_type
= CIFS_WRLCK
;
892 posix_lock_type
= CIFS_UNLCK
;
894 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
, 0 /* set */,
896 posix_lock_type
, wait_flag
);
898 struct cifsFileInfo
*fid
=
899 (struct cifsFileInfo
*)file
->private_data
;
902 rc
= CIFSSMBLock(xid
, tcon
, netfid
, length
,
904 0, numLock
, lockType
, wait_flag
);
907 /* For Windows locks we must store them. */
908 rc
= store_file_lock(fid
, length
,
909 pfLock
->fl_start
, lockType
);
911 } else if (numUnlock
) {
912 /* For each stored lock that this unlock overlaps
913 completely, unlock it. */
915 struct cifsLockInfo
*li
, *tmp
;
918 mutex_lock(&fid
->lock_mutex
);
919 list_for_each_entry_safe(li
, tmp
, &fid
->llist
, llist
) {
920 if (pfLock
->fl_start
<= li
->offset
&&
921 (pfLock
->fl_start
+ length
) >=
922 (li
->offset
+ li
->length
)) {
923 stored_rc
= CIFSSMBLock(xid
, tcon
,
925 li
->length
, li
->offset
,
926 1, 0, li
->type
, false);
930 list_del(&li
->llist
);
934 mutex_unlock(&fid
->lock_mutex
);
938 if (pfLock
->fl_flags
& FL_POSIX
)
939 posix_lock_file_wait(file
, pfLock
);
945 * Set the timeout on write requests past EOF. For some servers (Windows)
946 * these calls can be very long.
948 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
949 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
950 * The 10M cutoff is totally arbitrary. A better scheme for this would be
951 * welcome if someone wants to suggest one.
953 * We may be able to do a better job with this if there were some way to
954 * declare that a file should be sparse.
957 cifs_write_timeout(struct cifsInodeInfo
*cifsi
, loff_t offset
)
959 if (offset
<= cifsi
->server_eof
)
961 else if (offset
> (cifsi
->server_eof
+ (10 * 1024 * 1024)))
962 return CIFS_VLONG_OP
;
967 /* update the file size (if needed) after a write */
969 cifs_update_eof(struct cifsInodeInfo
*cifsi
, loff_t offset
,
970 unsigned int bytes_written
)
972 loff_t end_of_write
= offset
+ bytes_written
;
974 if (end_of_write
> cifsi
->server_eof
)
975 cifsi
->server_eof
= end_of_write
;
978 ssize_t
cifs_user_write(struct file
*file
, const char __user
*write_data
,
979 size_t write_size
, loff_t
*poffset
)
982 unsigned int bytes_written
= 0;
983 unsigned int total_written
;
984 struct cifs_sb_info
*cifs_sb
;
985 struct cifsTconInfo
*pTcon
;
987 struct cifsFileInfo
*open_file
;
988 struct cifsInodeInfo
*cifsi
= CIFS_I(file
->f_path
.dentry
->d_inode
);
990 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
992 pTcon
= cifs_sb
->tcon
;
995 (" write %d bytes to offset %lld of %s", write_size,
996 *poffset, file->f_path.dentry->d_name.name)); */
998 if (file
->private_data
== NULL
)
1000 open_file
= (struct cifsFileInfo
*) file
->private_data
;
1002 rc
= generic_write_checks(file
, poffset
, &write_size
, 0);
1008 long_op
= cifs_write_timeout(cifsi
, *poffset
);
1009 for (total_written
= 0; write_size
> total_written
;
1010 total_written
+= bytes_written
) {
1012 while (rc
== -EAGAIN
) {
1013 if (file
->private_data
== NULL
) {
1014 /* file has been closed on us */
1016 /* if we have gotten here we have written some data
1017 and blocked, and the file has been freed on us while
1018 we blocked so return what we managed to write */
1019 return total_written
;
1021 if (open_file
->closePend
) {
1024 return total_written
;
1028 if (open_file
->invalidHandle
) {
1029 /* we could deadlock if we called
1030 filemap_fdatawait from here so tell
1031 reopen_file not to flush data to server
1033 rc
= cifs_reopen_file(file
, false);
1038 rc
= CIFSSMBWrite(xid
, pTcon
,
1040 min_t(const int, cifs_sb
->wsize
,
1041 write_size
- total_written
),
1042 *poffset
, &bytes_written
,
1043 NULL
, write_data
+ total_written
, long_op
);
1045 if (rc
|| (bytes_written
== 0)) {
1053 cifs_update_eof(cifsi
, *poffset
, bytes_written
);
1054 *poffset
+= bytes_written
;
1056 long_op
= CIFS_STD_OP
; /* subsequent writes fast -
1057 15 seconds is plenty */
1060 cifs_stats_bytes_written(pTcon
, total_written
);
1062 /* since the write may have blocked check these pointers again */
1063 if ((file
->f_path
.dentry
) && (file
->f_path
.dentry
->d_inode
)) {
1064 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1065 /* Do not update local mtime - server will set its actual value on write
1066 * inode->i_ctime = inode->i_mtime =
1067 * current_fs_time(inode->i_sb);*/
1068 if (total_written
> 0) {
1069 spin_lock(&inode
->i_lock
);
1070 if (*poffset
> file
->f_path
.dentry
->d_inode
->i_size
)
1071 i_size_write(file
->f_path
.dentry
->d_inode
,
1073 spin_unlock(&inode
->i_lock
);
1075 mark_inode_dirty_sync(file
->f_path
.dentry
->d_inode
);
1078 return total_written
;
1081 static ssize_t
cifs_write(struct file
*file
, const char *write_data
,
1082 size_t write_size
, loff_t
*poffset
)
1085 unsigned int bytes_written
= 0;
1086 unsigned int total_written
;
1087 struct cifs_sb_info
*cifs_sb
;
1088 struct cifsTconInfo
*pTcon
;
1090 struct cifsFileInfo
*open_file
;
1091 struct cifsInodeInfo
*cifsi
= CIFS_I(file
->f_path
.dentry
->d_inode
);
1093 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1095 pTcon
= cifs_sb
->tcon
;
1097 cFYI(1, ("write %zd bytes to offset %lld of %s", write_size
,
1098 *poffset
, file
->f_path
.dentry
->d_name
.name
));
1100 if (file
->private_data
== NULL
)
1102 open_file
= (struct cifsFileInfo
*)file
->private_data
;
1106 long_op
= cifs_write_timeout(cifsi
, *poffset
);
1107 for (total_written
= 0; write_size
> total_written
;
1108 total_written
+= bytes_written
) {
1110 while (rc
== -EAGAIN
) {
1111 if (file
->private_data
== NULL
) {
1112 /* file has been closed on us */
1114 /* if we have gotten here we have written some data
1115 and blocked, and the file has been freed on us
1116 while we blocked so return what we managed to
1118 return total_written
;
1120 if (open_file
->closePend
) {
1123 return total_written
;
1127 if (open_file
->invalidHandle
) {
1128 /* we could deadlock if we called
1129 filemap_fdatawait from here so tell
1130 reopen_file not to flush data to
1132 rc
= cifs_reopen_file(file
, false);
1136 if (experimEnabled
|| (pTcon
->ses
->server
&&
1137 ((pTcon
->ses
->server
->secMode
&
1138 (SECMODE_SIGN_REQUIRED
| SECMODE_SIGN_ENABLED
))
1143 len
= min((size_t)cifs_sb
->wsize
,
1144 write_size
- total_written
);
1145 /* iov[0] is reserved for smb header */
1146 iov
[1].iov_base
= (char *)write_data
+
1148 iov
[1].iov_len
= len
;
1149 rc
= CIFSSMBWrite2(xid
, pTcon
,
1150 open_file
->netfid
, len
,
1151 *poffset
, &bytes_written
,
1154 rc
= CIFSSMBWrite(xid
, pTcon
,
1156 min_t(const int, cifs_sb
->wsize
,
1157 write_size
- total_written
),
1158 *poffset
, &bytes_written
,
1159 write_data
+ total_written
,
1162 if (rc
|| (bytes_written
== 0)) {
1170 cifs_update_eof(cifsi
, *poffset
, bytes_written
);
1171 *poffset
+= bytes_written
;
1173 long_op
= CIFS_STD_OP
; /* subsequent writes fast -
1174 15 seconds is plenty */
1177 cifs_stats_bytes_written(pTcon
, total_written
);
1179 /* since the write may have blocked check these pointers again */
1180 if ((file
->f_path
.dentry
) && (file
->f_path
.dentry
->d_inode
)) {
1181 /*BB We could make this contingent on superblock ATIME flag too */
1182 /* file->f_path.dentry->d_inode->i_ctime =
1183 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1184 if (total_written
> 0) {
1185 spin_lock(&file
->f_path
.dentry
->d_inode
->i_lock
);
1186 if (*poffset
> file
->f_path
.dentry
->d_inode
->i_size
)
1187 i_size_write(file
->f_path
.dentry
->d_inode
,
1189 spin_unlock(&file
->f_path
.dentry
->d_inode
->i_lock
);
1191 mark_inode_dirty_sync(file
->f_path
.dentry
->d_inode
);
1194 return total_written
;
1197 #ifdef CONFIG_CIFS_EXPERIMENTAL
1198 struct cifsFileInfo
*find_readable_file(struct cifsInodeInfo
*cifs_inode
)
1200 struct cifsFileInfo
*open_file
= NULL
;
1202 read_lock(&GlobalSMBSeslock
);
1203 /* we could simply get the first_list_entry since write-only entries
1204 are always at the end of the list but since the first entry might
1205 have a close pending, we go through the whole list */
1206 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1207 if (open_file
->closePend
)
1209 if (open_file
->pfile
&& ((open_file
->pfile
->f_flags
& O_RDWR
) ||
1210 (open_file
->pfile
->f_flags
& O_RDONLY
))) {
1211 if (!open_file
->invalidHandle
) {
1212 /* found a good file */
1213 /* lock it so it will not be closed on us */
1214 cifsFileInfo_get(open_file
);
1215 read_unlock(&GlobalSMBSeslock
);
1217 } /* else might as well continue, and look for
1218 another, or simply have the caller reopen it
1219 again rather than trying to fix this handle */
1220 } else /* write only file */
1221 break; /* write only files are last so must be done */
1223 read_unlock(&GlobalSMBSeslock
);
1228 struct cifsFileInfo
*find_writable_file(struct cifsInodeInfo
*cifs_inode
)
1230 struct cifsFileInfo
*open_file
;
1231 bool any_available
= false;
1234 /* Having a null inode here (because mapping->host was set to zero by
1235 the VFS or MM) should not happen but we had reports of on oops (due to
1236 it being zero) during stress testcases so we need to check for it */
1238 if (cifs_inode
== NULL
) {
1239 cERROR(1, ("Null inode passed to cifs_writeable_file"));
1244 read_lock(&GlobalSMBSeslock
);
1246 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1247 if (open_file
->closePend
||
1248 (!any_available
&& open_file
->pid
!= current
->tgid
))
1251 if (open_file
->pfile
&&
1252 ((open_file
->pfile
->f_flags
& O_RDWR
) ||
1253 (open_file
->pfile
->f_flags
& O_WRONLY
))) {
1254 cifsFileInfo_get(open_file
);
1256 if (!open_file
->invalidHandle
) {
1257 /* found a good writable file */
1258 read_unlock(&GlobalSMBSeslock
);
1262 read_unlock(&GlobalSMBSeslock
);
1263 /* Had to unlock since following call can block */
1264 rc
= cifs_reopen_file(open_file
->pfile
, false);
1266 if (!open_file
->closePend
)
1268 else { /* start over in case this was deleted */
1269 /* since the list could be modified */
1270 read_lock(&GlobalSMBSeslock
);
1271 cifsFileInfo_put(open_file
);
1272 goto refind_writable
;
1276 /* if it fails, try another handle if possible -
1277 (we can not do this if closePending since
1278 loop could be modified - in which case we
1279 have to start at the beginning of the list
1280 again. Note that it would be bad
1281 to hold up writepages here (rather than
1282 in caller) with continuous retries */
1283 cFYI(1, ("wp failed on reopen file"));
1284 read_lock(&GlobalSMBSeslock
);
1285 /* can not use this handle, no write
1286 pending on this one after all */
1287 cifsFileInfo_put(open_file
);
1289 if (open_file
->closePend
) /* list could have changed */
1290 goto refind_writable
;
1291 /* else we simply continue to the next entry. Thus
1292 we do not loop on reopen errors. If we
1293 can not reopen the file, for example if we
1294 reconnected to a server with another client
1295 racing to delete or lock the file we would not
1296 make progress if we restarted before the beginning
1297 of the loop here. */
1300 /* couldn't find useable FH with same pid, try any available */
1301 if (!any_available
) {
1302 any_available
= true;
1303 goto refind_writable
;
1305 read_unlock(&GlobalSMBSeslock
);
1309 static int cifs_partialpagewrite(struct page
*page
, unsigned from
, unsigned to
)
1311 struct address_space
*mapping
= page
->mapping
;
1312 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
1315 int bytes_written
= 0;
1316 struct cifs_sb_info
*cifs_sb
;
1317 struct cifsTconInfo
*pTcon
;
1318 struct inode
*inode
;
1319 struct cifsFileInfo
*open_file
;
1321 if (!mapping
|| !mapping
->host
)
1324 inode
= page
->mapping
->host
;
1325 cifs_sb
= CIFS_SB(inode
->i_sb
);
1326 pTcon
= cifs_sb
->tcon
;
1328 offset
+= (loff_t
)from
;
1329 write_data
= kmap(page
);
1332 if ((to
> PAGE_CACHE_SIZE
) || (from
> to
)) {
1337 /* racing with truncate? */
1338 if (offset
> mapping
->host
->i_size
) {
1340 return 0; /* don't care */
1343 /* check to make sure that we are not extending the file */
1344 if (mapping
->host
->i_size
- offset
< (loff_t
)to
)
1345 to
= (unsigned)(mapping
->host
->i_size
- offset
);
1347 open_file
= find_writable_file(CIFS_I(mapping
->host
));
1349 bytes_written
= cifs_write(open_file
->pfile
, write_data
,
1351 cifsFileInfo_put(open_file
);
1352 /* Does mm or vfs already set times? */
1353 inode
->i_atime
= inode
->i_mtime
= current_fs_time(inode
->i_sb
);
1354 if ((bytes_written
> 0) && (offset
))
1356 else if (bytes_written
< 0)
1359 cFYI(1, ("No writeable filehandles for inode"));
1367 static int cifs_writepages(struct address_space
*mapping
,
1368 struct writeback_control
*wbc
)
1370 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
1371 unsigned int bytes_to_write
;
1372 unsigned int bytes_written
;
1373 struct cifs_sb_info
*cifs_sb
;
1377 int range_whole
= 0;
1384 struct cifsFileInfo
*open_file
;
1385 struct cifsInodeInfo
*cifsi
= CIFS_I(mapping
->host
);
1387 struct pagevec pvec
;
1392 cifs_sb
= CIFS_SB(mapping
->host
->i_sb
);
1395 * If wsize is smaller that the page cache size, default to writing
1396 * one page at a time via cifs_writepage
1398 if (cifs_sb
->wsize
< PAGE_CACHE_SIZE
)
1399 return generic_writepages(mapping
, wbc
);
1401 if ((cifs_sb
->tcon
->ses
) && (cifs_sb
->tcon
->ses
->server
))
1402 if (cifs_sb
->tcon
->ses
->server
->secMode
&
1403 (SECMODE_SIGN_REQUIRED
| SECMODE_SIGN_ENABLED
))
1404 if (!experimEnabled
)
1405 return generic_writepages(mapping
, wbc
);
1407 iov
= kmalloc(32 * sizeof(struct kvec
), GFP_KERNEL
);
1409 return generic_writepages(mapping
, wbc
);
1413 * BB: Is this meaningful for a non-block-device file system?
1414 * If it is, we should test it again after we do I/O
1416 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
1417 wbc
->encountered_congestion
= 1;
1424 pagevec_init(&pvec
, 0);
1425 if (wbc
->range_cyclic
) {
1426 index
= mapping
->writeback_index
; /* Start from prev offset */
1429 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1430 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1431 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1436 while (!done
&& (index
<= end
) &&
1437 (nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
1438 PAGECACHE_TAG_DIRTY
,
1439 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1))) {
1448 for (i
= 0; i
< nr_pages
; i
++) {
1449 page
= pvec
.pages
[i
];
1451 * At this point we hold neither mapping->tree_lock nor
1452 * lock on the page itself: the page may be truncated or
1453 * invalidated (changing page->mapping to NULL), or even
1454 * swizzled back from swapper_space to tmpfs file
1460 else if (!trylock_page(page
))
1463 if (unlikely(page
->mapping
!= mapping
)) {
1468 if (!wbc
->range_cyclic
&& page
->index
> end
) {
1474 if (next
&& (page
->index
!= next
)) {
1475 /* Not next consecutive page */
1480 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1481 wait_on_page_writeback(page
);
1483 if (PageWriteback(page
) ||
1484 !clear_page_dirty_for_io(page
)) {
1490 * This actually clears the dirty bit in the radix tree.
1491 * See cifs_writepage() for more commentary.
1493 set_page_writeback(page
);
1495 if (page_offset(page
) >= mapping
->host
->i_size
) {
1498 end_page_writeback(page
);
1503 * BB can we get rid of this? pages are held by pvec
1505 page_cache_get(page
);
1507 len
= min(mapping
->host
->i_size
- page_offset(page
),
1508 (loff_t
)PAGE_CACHE_SIZE
);
1510 /* reserve iov[0] for the smb header */
1512 iov
[n_iov
].iov_base
= kmap(page
);
1513 iov
[n_iov
].iov_len
= len
;
1514 bytes_to_write
+= len
;
1518 offset
= page_offset(page
);
1520 next
= page
->index
+ 1;
1521 if (bytes_to_write
+ PAGE_CACHE_SIZE
> cifs_sb
->wsize
)
1525 /* Search for a writable handle every time we call
1526 * CIFSSMBWrite2. We can't rely on the last handle
1527 * we used to still be valid
1529 open_file
= find_writable_file(CIFS_I(mapping
->host
));
1531 cERROR(1, ("No writable handles for inode"));
1534 long_op
= cifs_write_timeout(cifsi
, offset
);
1535 rc
= CIFSSMBWrite2(xid
, cifs_sb
->tcon
,
1537 bytes_to_write
, offset
,
1538 &bytes_written
, iov
, n_iov
,
1540 cifsFileInfo_put(open_file
);
1541 cifs_update_eof(cifsi
, offset
, bytes_written
);
1543 if (rc
|| bytes_written
< bytes_to_write
) {
1544 cERROR(1, ("Write2 ret %d, wrote %d",
1545 rc
, bytes_written
));
1546 /* BB what if continued retry is
1547 requested via mount flags? */
1549 set_bit(AS_ENOSPC
, &mapping
->flags
);
1551 set_bit(AS_EIO
, &mapping
->flags
);
1553 cifs_stats_bytes_written(cifs_sb
->tcon
,
1557 for (i
= 0; i
< n_iov
; i
++) {
1558 page
= pvec
.pages
[first
+ i
];
1559 /* Should we also set page error on
1560 success rc but too little data written? */
1561 /* BB investigate retry logic on temporary
1562 server crash cases and how recovery works
1563 when page marked as error */
1568 end_page_writeback(page
);
1569 page_cache_release(page
);
1571 if ((wbc
->nr_to_write
-= n_iov
) <= 0)
1575 /* Need to re-find the pages we skipped */
1576 index
= pvec
.pages
[0]->index
+ 1;
1578 pagevec_release(&pvec
);
1580 if (!scanned
&& !done
) {
1582 * We hit the last page and there is more work to be done: wrap
1583 * back to the start of the file
1589 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1590 mapping
->writeback_index
= index
;
1597 static int cifs_writepage(struct page
*page
, struct writeback_control
*wbc
)
1603 /* BB add check for wbc flags */
1604 page_cache_get(page
);
1605 if (!PageUptodate(page
))
1606 cFYI(1, ("ppw - page not up to date"));
1609 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1611 * A writepage() implementation always needs to do either this,
1612 * or re-dirty the page with "redirty_page_for_writepage()" in
1613 * the case of a failure.
1615 * Just unlocking the page will cause the radix tree tag-bits
1616 * to fail to update with the state of the page correctly.
1618 set_page_writeback(page
);
1619 rc
= cifs_partialpagewrite(page
, 0, PAGE_CACHE_SIZE
);
1620 SetPageUptodate(page
); /* BB add check for error and Clearuptodate? */
1622 end_page_writeback(page
);
1623 page_cache_release(page
);
1628 static int cifs_write_end(struct file
*file
, struct address_space
*mapping
,
1629 loff_t pos
, unsigned len
, unsigned copied
,
1630 struct page
*page
, void *fsdata
)
1633 struct inode
*inode
= mapping
->host
;
1635 cFYI(1, ("write_end for page %p from pos %lld with %d bytes",
1636 page
, pos
, copied
));
1638 if (PageChecked(page
)) {
1640 SetPageUptodate(page
);
1641 ClearPageChecked(page
);
1642 } else if (!PageUptodate(page
) && copied
== PAGE_CACHE_SIZE
)
1643 SetPageUptodate(page
);
1645 if (!PageUptodate(page
)) {
1647 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1651 /* this is probably better than directly calling
1652 partialpage_write since in this function the file handle is
1653 known which we might as well leverage */
1654 /* BB check if anything else missing out of ppw
1655 such as updating last write time */
1656 page_data
= kmap(page
);
1657 rc
= cifs_write(file
, page_data
+ offset
, copied
, &pos
);
1658 /* if (rc < 0) should we set writebehind rc? */
1665 set_page_dirty(page
);
1669 spin_lock(&inode
->i_lock
);
1670 if (pos
> inode
->i_size
)
1671 i_size_write(inode
, pos
);
1672 spin_unlock(&inode
->i_lock
);
1676 page_cache_release(page
);
1681 int cifs_fsync(struct file
*file
, struct dentry
*dentry
, int datasync
)
1685 struct cifsTconInfo
*tcon
;
1686 struct cifsFileInfo
*smbfile
=
1687 (struct cifsFileInfo
*)file
->private_data
;
1688 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1692 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1693 dentry
->d_name
.name
, datasync
));
1695 rc
= filemap_write_and_wait(inode
->i_mapping
);
1697 rc
= CIFS_I(inode
)->write_behind_rc
;
1698 CIFS_I(inode
)->write_behind_rc
= 0;
1699 tcon
= CIFS_SB(inode
->i_sb
)->tcon
;
1700 if (!rc
&& tcon
&& smbfile
&&
1701 !(CIFS_SB(inode
->i_sb
)->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
))
1702 rc
= CIFSSMBFlush(xid
, tcon
, smbfile
->netfid
);
1709 /* static void cifs_sync_page(struct page *page)
1711 struct address_space *mapping;
1712 struct inode *inode;
1713 unsigned long index = page->index;
1714 unsigned int rpages = 0;
1717 cFYI(1, ("sync page %p",page));
1718 mapping = page->mapping;
1721 inode = mapping->host;
1725 /* fill in rpages then
1726 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1728 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1738 * As file closes, flush all cached write data for this inode checking
1739 * for write behind errors.
1741 int cifs_flush(struct file
*file
, fl_owner_t id
)
1743 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1746 /* Rather than do the steps manually:
1747 lock the inode for writing
1748 loop through pages looking for write behind data (dirty pages)
1749 coalesce into contiguous 16K (or smaller) chunks to write to server
1750 send to server (prefer in parallel)
1751 deal with writebehind errors
1752 unlock inode for writing
1753 filemapfdatawrite appears easier for the time being */
1755 rc
= filemap_fdatawrite(inode
->i_mapping
);
1756 /* reset wb rc if we were able to write out dirty pages */
1758 rc
= CIFS_I(inode
)->write_behind_rc
;
1759 CIFS_I(inode
)->write_behind_rc
= 0;
1762 cFYI(1, ("Flush inode %p file %p rc %d", inode
, file
, rc
));
1767 ssize_t
cifs_user_read(struct file
*file
, char __user
*read_data
,
1768 size_t read_size
, loff_t
*poffset
)
1771 unsigned int bytes_read
= 0;
1772 unsigned int total_read
= 0;
1773 unsigned int current_read_size
;
1774 struct cifs_sb_info
*cifs_sb
;
1775 struct cifsTconInfo
*pTcon
;
1777 struct cifsFileInfo
*open_file
;
1778 char *smb_read_data
;
1779 char __user
*current_offset
;
1780 struct smb_com_read_rsp
*pSMBr
;
1783 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1784 pTcon
= cifs_sb
->tcon
;
1786 if (file
->private_data
== NULL
) {
1791 open_file
= (struct cifsFileInfo
*)file
->private_data
;
1793 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
1794 cFYI(1, ("attempting read on write only file instance"));
1796 for (total_read
= 0, current_offset
= read_data
;
1797 read_size
> total_read
;
1798 total_read
+= bytes_read
, current_offset
+= bytes_read
) {
1799 current_read_size
= min_t(const int, read_size
- total_read
,
1802 smb_read_data
= NULL
;
1803 while (rc
== -EAGAIN
) {
1804 int buf_type
= CIFS_NO_BUFFER
;
1805 if ((open_file
->invalidHandle
) &&
1806 (!open_file
->closePend
)) {
1807 rc
= cifs_reopen_file(file
, true);
1811 rc
= CIFSSMBRead(xid
, pTcon
,
1813 current_read_size
, *poffset
,
1814 &bytes_read
, &smb_read_data
,
1816 pSMBr
= (struct smb_com_read_rsp
*)smb_read_data
;
1817 if (smb_read_data
) {
1818 if (copy_to_user(current_offset
,
1820 4 /* RFC1001 length field */ +
1821 le16_to_cpu(pSMBr
->DataOffset
),
1825 if (buf_type
== CIFS_SMALL_BUFFER
)
1826 cifs_small_buf_release(smb_read_data
);
1827 else if (buf_type
== CIFS_LARGE_BUFFER
)
1828 cifs_buf_release(smb_read_data
);
1829 smb_read_data
= NULL
;
1832 if (rc
|| (bytes_read
== 0)) {
1840 cifs_stats_bytes_read(pTcon
, bytes_read
);
1841 *poffset
+= bytes_read
;
1849 static ssize_t
cifs_read(struct file
*file
, char *read_data
, size_t read_size
,
1853 unsigned int bytes_read
= 0;
1854 unsigned int total_read
;
1855 unsigned int current_read_size
;
1856 struct cifs_sb_info
*cifs_sb
;
1857 struct cifsTconInfo
*pTcon
;
1859 char *current_offset
;
1860 struct cifsFileInfo
*open_file
;
1861 int buf_type
= CIFS_NO_BUFFER
;
1864 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
1865 pTcon
= cifs_sb
->tcon
;
1867 if (file
->private_data
== NULL
) {
1872 open_file
= (struct cifsFileInfo
*)file
->private_data
;
1874 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
1875 cFYI(1, ("attempting read on write only file instance"));
1877 for (total_read
= 0, current_offset
= read_data
;
1878 read_size
> total_read
;
1879 total_read
+= bytes_read
, current_offset
+= bytes_read
) {
1880 current_read_size
= min_t(const int, read_size
- total_read
,
1882 /* For windows me and 9x we do not want to request more
1883 than it negotiated since it will refuse the read then */
1885 !(pTcon
->ses
->capabilities
& CAP_LARGE_FILES
)) {
1886 current_read_size
= min_t(const int, current_read_size
,
1887 pTcon
->ses
->server
->maxBuf
- 128);
1890 while (rc
== -EAGAIN
) {
1891 if ((open_file
->invalidHandle
) &&
1892 (!open_file
->closePend
)) {
1893 rc
= cifs_reopen_file(file
, true);
1897 rc
= CIFSSMBRead(xid
, pTcon
,
1899 current_read_size
, *poffset
,
1900 &bytes_read
, ¤t_offset
,
1903 if (rc
|| (bytes_read
== 0)) {
1911 cifs_stats_bytes_read(pTcon
, total_read
);
1912 *poffset
+= bytes_read
;
1919 int cifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1924 rc
= cifs_revalidate_file(file
);
1926 cFYI(1, ("Validation prior to mmap failed, error=%d", rc
));
1930 rc
= generic_file_mmap(file
, vma
);
1936 static void cifs_copy_cache_pages(struct address_space
*mapping
,
1937 struct list_head
*pages
, int bytes_read
, char *data
,
1938 struct pagevec
*plru_pvec
)
1943 while (bytes_read
> 0) {
1944 if (list_empty(pages
))
1947 page
= list_entry(pages
->prev
, struct page
, lru
);
1948 list_del(&page
->lru
);
1950 if (add_to_page_cache(page
, mapping
, page
->index
,
1952 page_cache_release(page
);
1953 cFYI(1, ("Add page cache failed"));
1954 data
+= PAGE_CACHE_SIZE
;
1955 bytes_read
-= PAGE_CACHE_SIZE
;
1959 target
= kmap_atomic(page
, KM_USER0
);
1961 if (PAGE_CACHE_SIZE
> bytes_read
) {
1962 memcpy(target
, data
, bytes_read
);
1963 /* zero the tail end of this partial page */
1964 memset(target
+ bytes_read
, 0,
1965 PAGE_CACHE_SIZE
- bytes_read
);
1968 memcpy(target
, data
, PAGE_CACHE_SIZE
);
1969 bytes_read
-= PAGE_CACHE_SIZE
;
1971 kunmap_atomic(target
, KM_USER0
);
1973 flush_dcache_page(page
);
1974 SetPageUptodate(page
);
1976 if (!pagevec_add(plru_pvec
, page
))
1977 __pagevec_lru_add_file(plru_pvec
);
1978 data
+= PAGE_CACHE_SIZE
;
1983 static int cifs_readpages(struct file
*file
, struct address_space
*mapping
,
1984 struct list_head
*page_list
, unsigned num_pages
)
1990 struct cifs_sb_info
*cifs_sb
;
1991 struct cifsTconInfo
*pTcon
;
1992 unsigned int bytes_read
= 0;
1993 unsigned int read_size
, i
;
1994 char *smb_read_data
= NULL
;
1995 struct smb_com_read_rsp
*pSMBr
;
1996 struct pagevec lru_pvec
;
1997 struct cifsFileInfo
*open_file
;
1998 int buf_type
= CIFS_NO_BUFFER
;
2001 if (file
->private_data
== NULL
) {
2006 open_file
= (struct cifsFileInfo
*)file
->private_data
;
2007 cifs_sb
= CIFS_SB(file
->f_path
.dentry
->d_sb
);
2008 pTcon
= cifs_sb
->tcon
;
2010 pagevec_init(&lru_pvec
, 0);
2011 cFYI(DBG2
, ("rpages: num pages %d", num_pages
));
2012 for (i
= 0; i
< num_pages
; ) {
2013 unsigned contig_pages
;
2014 struct page
*tmp_page
;
2015 unsigned long expected_index
;
2017 if (list_empty(page_list
))
2020 page
= list_entry(page_list
->prev
, struct page
, lru
);
2021 offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
2023 /* count adjacent pages that we will read into */
2026 list_entry(page_list
->prev
, struct page
, lru
)->index
;
2027 list_for_each_entry_reverse(tmp_page
, page_list
, lru
) {
2028 if (tmp_page
->index
== expected_index
) {
2034 if (contig_pages
+ i
> num_pages
)
2035 contig_pages
= num_pages
- i
;
2037 /* for reads over a certain size could initiate async
2040 read_size
= contig_pages
* PAGE_CACHE_SIZE
;
2041 /* Read size needs to be in multiples of one page */
2042 read_size
= min_t(const unsigned int, read_size
,
2043 cifs_sb
->rsize
& PAGE_CACHE_MASK
);
2044 cFYI(DBG2
, ("rpages: read size 0x%x contiguous pages %d",
2045 read_size
, contig_pages
));
2047 while (rc
== -EAGAIN
) {
2048 if ((open_file
->invalidHandle
) &&
2049 (!open_file
->closePend
)) {
2050 rc
= cifs_reopen_file(file
, true);
2055 rc
= CIFSSMBRead(xid
, pTcon
,
2058 &bytes_read
, &smb_read_data
,
2060 /* BB more RC checks ? */
2061 if (rc
== -EAGAIN
) {
2062 if (smb_read_data
) {
2063 if (buf_type
== CIFS_SMALL_BUFFER
)
2064 cifs_small_buf_release(smb_read_data
);
2065 else if (buf_type
== CIFS_LARGE_BUFFER
)
2066 cifs_buf_release(smb_read_data
);
2067 smb_read_data
= NULL
;
2071 if ((rc
< 0) || (smb_read_data
== NULL
)) {
2072 cFYI(1, ("Read error in readpages: %d", rc
));
2074 } else if (bytes_read
> 0) {
2075 task_io_account_read(bytes_read
);
2076 pSMBr
= (struct smb_com_read_rsp
*)smb_read_data
;
2077 cifs_copy_cache_pages(mapping
, page_list
, bytes_read
,
2078 smb_read_data
+ 4 /* RFC1001 hdr */ +
2079 le16_to_cpu(pSMBr
->DataOffset
), &lru_pvec
);
2081 i
+= bytes_read
>> PAGE_CACHE_SHIFT
;
2082 cifs_stats_bytes_read(pTcon
, bytes_read
);
2083 if ((bytes_read
& PAGE_CACHE_MASK
) != bytes_read
) {
2084 i
++; /* account for partial page */
2086 /* server copy of file can have smaller size
2088 /* BB do we need to verify this common case ?
2089 this case is ok - if we are at server EOF
2090 we will hit it on next read */
2095 cFYI(1, ("No bytes read (%d) at offset %lld . "
2096 "Cleaning remaining pages from readahead list",
2097 bytes_read
, offset
));
2098 /* BB turn off caching and do new lookup on
2099 file size at server? */
2102 if (smb_read_data
) {
2103 if (buf_type
== CIFS_SMALL_BUFFER
)
2104 cifs_small_buf_release(smb_read_data
);
2105 else if (buf_type
== CIFS_LARGE_BUFFER
)
2106 cifs_buf_release(smb_read_data
);
2107 smb_read_data
= NULL
;
2112 pagevec_lru_add_file(&lru_pvec
);
2114 /* need to free smb_read_data buf before exit */
2115 if (smb_read_data
) {
2116 if (buf_type
== CIFS_SMALL_BUFFER
)
2117 cifs_small_buf_release(smb_read_data
);
2118 else if (buf_type
== CIFS_LARGE_BUFFER
)
2119 cifs_buf_release(smb_read_data
);
2120 smb_read_data
= NULL
;
2127 static int cifs_readpage_worker(struct file
*file
, struct page
*page
,
2133 page_cache_get(page
);
2134 read_data
= kmap(page
);
2135 /* for reads over a certain size could initiate async read ahead */
2137 rc
= cifs_read(file
, read_data
, PAGE_CACHE_SIZE
, poffset
);
2142 cFYI(1, ("Bytes read %d", rc
));
2144 file
->f_path
.dentry
->d_inode
->i_atime
=
2145 current_fs_time(file
->f_path
.dentry
->d_inode
->i_sb
);
2147 if (PAGE_CACHE_SIZE
> rc
)
2148 memset(read_data
+ rc
, 0, PAGE_CACHE_SIZE
- rc
);
2150 flush_dcache_page(page
);
2151 SetPageUptodate(page
);
2156 page_cache_release(page
);
2160 static int cifs_readpage(struct file
*file
, struct page
*page
)
2162 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
2168 if (file
->private_data
== NULL
) {
2174 cFYI(1, ("readpage %p at offset %d 0x%x\n",
2175 page
, (int)offset
, (int)offset
));
2177 rc
= cifs_readpage_worker(file
, page
, &offset
);
2185 static int is_inode_writable(struct cifsInodeInfo
*cifs_inode
)
2187 struct cifsFileInfo
*open_file
;
2189 read_lock(&GlobalSMBSeslock
);
2190 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
2191 if (open_file
->closePend
)
2193 if (open_file
->pfile
&&
2194 ((open_file
->pfile
->f_flags
& O_RDWR
) ||
2195 (open_file
->pfile
->f_flags
& O_WRONLY
))) {
2196 read_unlock(&GlobalSMBSeslock
);
2200 read_unlock(&GlobalSMBSeslock
);
2204 /* We do not want to update the file size from server for inodes
2205 open for write - to avoid races with writepage extending
2206 the file - in the future we could consider allowing
2207 refreshing the inode only on increases in the file size
2208 but this is tricky to do without racing with writebehind
2209 page caching in the current Linux kernel design */
2210 bool is_size_safe_to_change(struct cifsInodeInfo
*cifsInode
, __u64 end_of_file
)
2215 if (is_inode_writable(cifsInode
)) {
2216 /* This inode is open for write at least once */
2217 struct cifs_sb_info
*cifs_sb
;
2219 cifs_sb
= CIFS_SB(cifsInode
->vfs_inode
.i_sb
);
2220 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
) {
2221 /* since no page cache to corrupt on directio
2222 we can change size safely */
2226 if (i_size_read(&cifsInode
->vfs_inode
) < end_of_file
)
2234 static int cifs_write_begin(struct file
*file
, struct address_space
*mapping
,
2235 loff_t pos
, unsigned len
, unsigned flags
,
2236 struct page
**pagep
, void **fsdata
)
2238 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
2239 loff_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
2240 loff_t page_start
= pos
& PAGE_MASK
;
2245 cFYI(1, ("write_begin from %lld len %d", (long long)pos
, len
));
2247 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
2253 if (PageUptodate(page
))
2257 * If we write a full page it will be up to date, no need to read from
2258 * the server. If the write is short, we'll end up doing a sync write
2261 if (len
== PAGE_CACHE_SIZE
)
2265 * optimize away the read when we have an oplock, and we're not
2266 * expecting to use any of the data we'd be reading in. That
2267 * is, when the page lies beyond the EOF, or straddles the EOF
2268 * and the write will cover all of the existing data.
2270 if (CIFS_I(mapping
->host
)->clientCanCacheRead
) {
2271 i_size
= i_size_read(mapping
->host
);
2272 if (page_start
>= i_size
||
2273 (offset
== 0 && (pos
+ len
) >= i_size
)) {
2274 zero_user_segments(page
, 0, offset
,
2278 * PageChecked means that the parts of the page
2279 * to which we're not writing are considered up
2280 * to date. Once the data is copied to the
2281 * page, it can be set uptodate.
2283 SetPageChecked(page
);
2288 if ((file
->f_flags
& O_ACCMODE
) != O_WRONLY
) {
2290 * might as well read a page, it is fast enough. If we get
2291 * an error, we don't need to return it. cifs_write_end will
2292 * do a sync write instead since PG_uptodate isn't set.
2294 cifs_readpage_worker(file
, page
, &page_start
);
2296 /* we could try using another file handle if there is one -
2297 but how would we lock it to prevent close of that handle
2298 racing with this read? In any case
2299 this will be written out by write_end so is fine */
2307 cifs_oplock_break(struct slow_work
*work
)
2309 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
2311 struct inode
*inode
= cfile
->pInode
;
2312 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2313 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->mnt
->mnt_sb
);
2316 if (inode
&& S_ISREG(inode
->i_mode
)) {
2317 #ifdef CONFIG_CIFS_EXPERIMENTAL
2318 if (cinode
->clientCanCacheAll
== 0)
2319 break_lease(inode
, O_RDONLY
);
2320 else if (cinode
->clientCanCacheRead
== 0)
2321 break_lease(inode
, O_WRONLY
);
2323 rc
= filemap_fdatawrite(inode
->i_mapping
);
2324 if (cinode
->clientCanCacheRead
== 0) {
2325 waitrc
= filemap_fdatawait(inode
->i_mapping
);
2326 invalidate_remote_inode(inode
);
2331 cinode
->write_behind_rc
= rc
;
2332 cFYI(1, ("Oplock flush inode %p rc %d", inode
, rc
));
2336 * releasing stale oplock after recent reconnect of smb session using
2337 * a now incorrect file handle is not a data integrity issue but do
2338 * not bother sending an oplock release if session to server still is
2339 * disconnected since oplock already released by the server
2341 if (!cfile
->closePend
&& !cfile
->oplock_break_cancelled
) {
2342 rc
= CIFSSMBLock(0, cifs_sb
->tcon
, cfile
->netfid
, 0, 0, 0, 0,
2343 LOCKING_ANDX_OPLOCK_RELEASE
, false);
2344 cFYI(1, ("Oplock release rc = %d", rc
));
2349 cifs_oplock_break_get(struct slow_work
*work
)
2351 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
2354 cifsFileInfo_get(cfile
);
2359 cifs_oplock_break_put(struct slow_work
*work
)
2361 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
2364 cifsFileInfo_put(cfile
);
2367 const struct slow_work_ops cifs_oplock_break_ops
= {
2368 .get_ref
= cifs_oplock_break_get
,
2369 .put_ref
= cifs_oplock_break_put
,
2370 .execute
= cifs_oplock_break
,
2373 const struct address_space_operations cifs_addr_ops
= {
2374 .readpage
= cifs_readpage
,
2375 .readpages
= cifs_readpages
,
2376 .writepage
= cifs_writepage
,
2377 .writepages
= cifs_writepages
,
2378 .write_begin
= cifs_write_begin
,
2379 .write_end
= cifs_write_end
,
2380 .set_page_dirty
= __set_page_dirty_nobuffers
,
2381 /* .sync_page = cifs_sync_page, */
2386 * cifs_readpages requires the server to support a buffer large enough to
2387 * contain the header plus one complete page of data. Otherwise, we need
2388 * to leave cifs_readpages out of the address space operations.
2390 const struct address_space_operations cifs_addr_ops_smallbuf
= {
2391 .readpage
= cifs_readpage
,
2392 .writepage
= cifs_writepage
,
2393 .writepages
= cifs_writepages
,
2394 .write_begin
= cifs_write_begin
,
2395 .write_end
= cifs_write_end
,
2396 .set_page_dirty
= __set_page_dirty_nobuffers
,
2397 /* .sync_page = cifs_sync_page, */