[PATCH] Modularize generic HDLC
[linux-2.6.git] / fs / cifs / file.c
blobddb012a68023fe0818d50670f2fb81a990dbab56
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/mpage.h>
29 #include <linux/pagemap.h>
30 #include <linux/pagevec.h>
31 #include <linux/smp_lock.h>
32 #include <linux/writeback.h>
33 #include <linux/delay.h>
34 #include <asm/div64.h>
35 #include "cifsfs.h"
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_unicode.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
43 static inline struct cifsFileInfo *cifs_init_private(
44 struct cifsFileInfo *private_data, struct inode *inode,
45 struct file *file, __u16 netfid)
47 memset(private_data, 0, sizeof(struct cifsFileInfo));
48 private_data->netfid = netfid;
49 private_data->pid = current->tgid;
50 init_MUTEX(&private_data->fh_sem);
51 init_MUTEX(&private_data->lock_sem);
52 INIT_LIST_HEAD(&private_data->llist);
53 private_data->pfile = file; /* needed for writepage */
54 private_data->pInode = inode;
55 private_data->invalidHandle = FALSE;
56 private_data->closePend = FALSE;
57 /* we have to track num writers to the inode, since writepages
58 does not tell us which handle the write is for so there can
59 be a close (overlapping with write) of the filehandle that
60 cifs_writepages chose to use */
61 atomic_set(&private_data->wrtPending,0);
63 return private_data;
66 static inline int cifs_convert_flags(unsigned int flags)
68 if ((flags & O_ACCMODE) == O_RDONLY)
69 return GENERIC_READ;
70 else if ((flags & O_ACCMODE) == O_WRONLY)
71 return GENERIC_WRITE;
72 else if ((flags & O_ACCMODE) == O_RDWR) {
73 /* GENERIC_ALL is too much permission to request
74 can cause unnecessary access denied on create */
75 /* return GENERIC_ALL; */
76 return (GENERIC_READ | GENERIC_WRITE);
79 return 0x20197;
82 static inline int cifs_get_disposition(unsigned int flags)
84 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
85 return FILE_CREATE;
86 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
87 return FILE_OVERWRITE_IF;
88 else if ((flags & O_CREAT) == O_CREAT)
89 return FILE_OPEN_IF;
90 else if ((flags & O_TRUNC) == O_TRUNC)
91 return FILE_OVERWRITE;
92 else
93 return FILE_OPEN;
96 /* all arguments to this function must be checked for validity in caller */
97 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
98 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
99 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
100 char *full_path, int xid)
102 struct timespec temp;
103 int rc;
105 /* want handles we can use to read with first
106 in the list so we do not have to walk the
107 list to search for one in prepare_write */
108 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
109 list_add_tail(&pCifsFile->flist,
110 &pCifsInode->openFileList);
111 } else {
112 list_add(&pCifsFile->flist,
113 &pCifsInode->openFileList);
115 write_unlock(&GlobalSMBSeslock);
116 if (pCifsInode->clientCanCacheRead) {
117 /* we have the inode open somewhere else
118 no need to discard cache data */
119 goto client_can_cache;
122 /* BB need same check in cifs_create too? */
123 /* if not oplocked, invalidate inode pages if mtime or file
124 size changed */
125 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
126 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
127 (file->f_dentry->d_inode->i_size ==
128 (loff_t)le64_to_cpu(buf->EndOfFile))) {
129 cFYI(1, ("inode unchanged on server"));
130 } else {
131 if (file->f_dentry->d_inode->i_mapping) {
132 /* BB no need to lock inode until after invalidate
133 since namei code should already have it locked? */
134 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
136 cFYI(1, ("invalidating remote inode since open detected it "
137 "changed"));
138 invalidate_remote_inode(file->f_dentry->d_inode);
141 client_can_cache:
142 if (pTcon->ses->capabilities & CAP_UNIX)
143 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
144 full_path, inode->i_sb, xid);
145 else
146 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
147 full_path, buf, inode->i_sb, xid);
149 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
150 pCifsInode->clientCanCacheAll = TRUE;
151 pCifsInode->clientCanCacheRead = TRUE;
152 cFYI(1, ("Exclusive Oplock granted on inode %p",
153 file->f_dentry->d_inode));
154 } else if ((*oplock & 0xF) == OPLOCK_READ)
155 pCifsInode->clientCanCacheRead = TRUE;
157 return rc;
160 int cifs_open(struct inode *inode, struct file *file)
162 int rc = -EACCES;
163 int xid, oplock;
164 struct cifs_sb_info *cifs_sb;
165 struct cifsTconInfo *pTcon;
166 struct cifsFileInfo *pCifsFile;
167 struct cifsInodeInfo *pCifsInode;
168 struct list_head *tmp;
169 char *full_path = NULL;
170 int desiredAccess;
171 int disposition;
172 __u16 netfid;
173 FILE_ALL_INFO *buf = NULL;
175 xid = GetXid();
177 cifs_sb = CIFS_SB(inode->i_sb);
178 pTcon = cifs_sb->tcon;
180 if (file->f_flags & O_CREAT) {
181 /* search inode for this file and fill in file->private_data */
182 pCifsInode = CIFS_I(file->f_dentry->d_inode);
183 read_lock(&GlobalSMBSeslock);
184 list_for_each(tmp, &pCifsInode->openFileList) {
185 pCifsFile = list_entry(tmp, struct cifsFileInfo,
186 flist);
187 if ((pCifsFile->pfile == NULL) &&
188 (pCifsFile->pid == current->tgid)) {
189 /* mode set in cifs_create */
191 /* needed for writepage */
192 pCifsFile->pfile = file;
194 file->private_data = pCifsFile;
195 break;
198 read_unlock(&GlobalSMBSeslock);
199 if (file->private_data != NULL) {
200 rc = 0;
201 FreeXid(xid);
202 return rc;
203 } else {
204 if (file->f_flags & O_EXCL)
205 cERROR(1, ("could not find file instance for "
206 "new file %p", file));
210 full_path = build_path_from_dentry(file->f_dentry);
211 if (full_path == NULL) {
212 FreeXid(xid);
213 return -ENOMEM;
216 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
217 inode, file->f_flags, full_path));
218 desiredAccess = cifs_convert_flags(file->f_flags);
220 /*********************************************************************
221 * open flag mapping table:
223 * POSIX Flag CIFS Disposition
224 * ---------- ----------------
225 * O_CREAT FILE_OPEN_IF
226 * O_CREAT | O_EXCL FILE_CREATE
227 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
228 * O_TRUNC FILE_OVERWRITE
229 * none of the above FILE_OPEN
231 * Note that there is not a direct match between disposition
232 * FILE_SUPERSEDE (ie create whether or not file exists although
233 * O_CREAT | O_TRUNC is similar but truncates the existing
234 * file rather than creating a new file as FILE_SUPERSEDE does
235 * (which uses the attributes / metadata passed in on open call)
237 *? O_SYNC is a reasonable match to CIFS writethrough flag
238 *? and the read write flags match reasonably. O_LARGEFILE
239 *? is irrelevant because largefile support is always used
240 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
241 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
242 *********************************************************************/
244 disposition = cifs_get_disposition(file->f_flags);
246 if (oplockEnabled)
247 oplock = REQ_OPLOCK;
248 else
249 oplock = FALSE;
251 /* BB pass O_SYNC flag through on file attributes .. BB */
253 /* Also refresh inode by passing in file_info buf returned by SMBOpen
254 and calling get_inode_info with returned buf (at least helps
255 non-Unix server case) */
257 /* BB we can not do this if this is the second open of a file
258 and the first handle has writebehind data, we might be
259 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
260 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
261 if (!buf) {
262 rc = -ENOMEM;
263 goto out;
266 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
267 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
268 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
269 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
270 & CIFS_MOUNT_MAP_SPECIAL_CHR);
271 else
272 rc = -EIO; /* no NT SMB support fall into legacy open below */
274 if (rc == -EIO) {
275 /* Old server, try legacy style OpenX */
276 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
277 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
278 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
279 & CIFS_MOUNT_MAP_SPECIAL_CHR);
281 if (rc) {
282 cFYI(1, ("cifs_open returned 0x%x", rc));
283 goto out;
285 file->private_data =
286 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
287 if (file->private_data == NULL) {
288 rc = -ENOMEM;
289 goto out;
291 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
292 write_lock(&GlobalSMBSeslock);
293 list_add(&pCifsFile->tlist, &pTcon->openFileList);
295 pCifsInode = CIFS_I(file->f_dentry->d_inode);
296 if (pCifsInode) {
297 rc = cifs_open_inode_helper(inode, file, pCifsInode,
298 pCifsFile, pTcon,
299 &oplock, buf, full_path, xid);
300 } else {
301 write_unlock(&GlobalSMBSeslock);
304 if (oplock & CIFS_CREATE_ACTION) {
305 /* time to set mode which we can not set earlier due to
306 problems creating new read-only files */
307 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
308 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
309 inode->i_mode,
310 (__u64)-1, (__u64)-1, 0 /* dev */,
311 cifs_sb->local_nls,
312 cifs_sb->mnt_cifs_flags &
313 CIFS_MOUNT_MAP_SPECIAL_CHR);
314 } else {
315 /* BB implement via Windows security descriptors eg
316 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
317 -1, -1, local_nls);
318 in the meantime could set r/o dos attribute when
319 perms are eg: mode & 0222 == 0 */
323 out:
324 kfree(buf);
325 kfree(full_path);
326 FreeXid(xid);
327 return rc;
330 /* Try to reacquire byte range locks that were released when session */
331 /* to server was lost */
332 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
334 int rc = 0;
336 /* BB list all locks open on this file and relock */
338 return rc;
341 static int cifs_reopen_file(struct inode *inode, struct file *file,
342 int can_flush)
344 int rc = -EACCES;
345 int xid, oplock;
346 struct cifs_sb_info *cifs_sb;
347 struct cifsTconInfo *pTcon;
348 struct cifsFileInfo *pCifsFile;
349 struct cifsInodeInfo *pCifsInode;
350 char *full_path = NULL;
351 int desiredAccess;
352 int disposition = FILE_OPEN;
353 __u16 netfid;
355 if (inode == NULL)
356 return -EBADF;
357 if (file->private_data) {
358 pCifsFile = (struct cifsFileInfo *)file->private_data;
359 } else
360 return -EBADF;
362 xid = GetXid();
363 down(&pCifsFile->fh_sem);
364 if (pCifsFile->invalidHandle == FALSE) {
365 up(&pCifsFile->fh_sem);
366 FreeXid(xid);
367 return 0;
370 if (file->f_dentry == NULL) {
371 up(&pCifsFile->fh_sem);
372 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
373 FreeXid(xid);
374 return -EBADF;
376 cifs_sb = CIFS_SB(inode->i_sb);
377 pTcon = cifs_sb->tcon;
378 /* can not grab rename sem here because various ops, including
379 those that already have the rename sem can end up causing writepage
380 to get called and if the server was down that means we end up here,
381 and we can never tell if the caller already has the rename_sem */
382 full_path = build_path_from_dentry(file->f_dentry);
383 if (full_path == NULL) {
384 up(&pCifsFile->fh_sem);
385 FreeXid(xid);
386 return -ENOMEM;
389 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
390 inode, file->f_flags,full_path));
391 desiredAccess = cifs_convert_flags(file->f_flags);
393 if (oplockEnabled)
394 oplock = REQ_OPLOCK;
395 else
396 oplock = FALSE;
398 /* Can not refresh inode by passing in file_info buf to be returned
399 by SMBOpen and then calling get_inode_info with returned buf
400 since file might have write behind data that needs to be flushed
401 and server version of file size can be stale. If we knew for sure
402 that inode was not dirty locally we could do this */
404 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
405 if (buf == 0) {
406 up(&pCifsFile->fh_sem);
407 kfree(full_path);
408 FreeXid(xid);
409 return -ENOMEM;
410 } */
411 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
412 CREATE_NOT_DIR, &netfid, &oplock, NULL,
413 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
414 CIFS_MOUNT_MAP_SPECIAL_CHR);
415 if (rc) {
416 up(&pCifsFile->fh_sem);
417 cFYI(1, ("cifs_open returned 0x%x", rc));
418 cFYI(1, ("oplock: %d", oplock));
419 } else {
420 pCifsFile->netfid = netfid;
421 pCifsFile->invalidHandle = FALSE;
422 up(&pCifsFile->fh_sem);
423 pCifsInode = CIFS_I(inode);
424 if (pCifsInode) {
425 if (can_flush) {
426 filemap_write_and_wait(inode->i_mapping);
427 /* temporarily disable caching while we
428 go to server to get inode info */
429 pCifsInode->clientCanCacheAll = FALSE;
430 pCifsInode->clientCanCacheRead = FALSE;
431 if (pTcon->ses->capabilities & CAP_UNIX)
432 rc = cifs_get_inode_info_unix(&inode,
433 full_path, inode->i_sb, xid);
434 else
435 rc = cifs_get_inode_info(&inode,
436 full_path, NULL, inode->i_sb,
437 xid);
438 } /* else we are writing out data to server already
439 and could deadlock if we tried to flush data, and
440 since we do not know if we have data that would
441 invalidate the current end of file on the server
442 we can not go to the server to get the new inod
443 info */
444 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
445 pCifsInode->clientCanCacheAll = TRUE;
446 pCifsInode->clientCanCacheRead = TRUE;
447 cFYI(1, ("Exclusive Oplock granted on inode %p",
448 file->f_dentry->d_inode));
449 } else if ((oplock & 0xF) == OPLOCK_READ) {
450 pCifsInode->clientCanCacheRead = TRUE;
451 pCifsInode->clientCanCacheAll = FALSE;
452 } else {
453 pCifsInode->clientCanCacheRead = FALSE;
454 pCifsInode->clientCanCacheAll = FALSE;
456 cifs_relock_file(pCifsFile);
460 kfree(full_path);
461 FreeXid(xid);
462 return rc;
465 int cifs_close(struct inode *inode, struct file *file)
467 int rc = 0;
468 int xid;
469 struct cifs_sb_info *cifs_sb;
470 struct cifsTconInfo *pTcon;
471 struct cifsFileInfo *pSMBFile =
472 (struct cifsFileInfo *)file->private_data;
474 xid = GetXid();
476 cifs_sb = CIFS_SB(inode->i_sb);
477 pTcon = cifs_sb->tcon;
478 if (pSMBFile) {
479 struct cifsLockInfo *li, *tmp;
481 pSMBFile->closePend = TRUE;
482 if (pTcon) {
483 /* no sense reconnecting to close a file that is
484 already closed */
485 if (pTcon->tidStatus != CifsNeedReconnect) {
486 int timeout = 2;
487 while((atomic_read(&pSMBFile->wrtPending) != 0)
488 && (timeout < 1000) ) {
489 /* Give write a better chance to get to
490 server ahead of the close. We do not
491 want to add a wait_q here as it would
492 increase the memory utilization as
493 the struct would be in each open file,
494 but this should give enough time to
495 clear the socket */
496 cERROR(1,("close with pending writes"));
497 msleep(timeout);
498 timeout *= 4;
500 rc = CIFSSMBClose(xid, pTcon,
501 pSMBFile->netfid);
505 /* Delete any outstanding lock records.
506 We'll lose them when the file is closed anyway. */
507 down(&pSMBFile->lock_sem);
508 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
509 list_del(&li->llist);
510 kfree(li);
512 up(&pSMBFile->lock_sem);
514 write_lock(&GlobalSMBSeslock);
515 list_del(&pSMBFile->flist);
516 list_del(&pSMBFile->tlist);
517 write_unlock(&GlobalSMBSeslock);
518 kfree(pSMBFile->search_resume_name);
519 kfree(file->private_data);
520 file->private_data = NULL;
521 } else
522 rc = -EBADF;
524 if (list_empty(&(CIFS_I(inode)->openFileList))) {
525 cFYI(1, ("closing last open instance for inode %p", inode));
526 /* if the file is not open we do not know if we can cache info
527 on this inode, much less write behind and read ahead */
528 CIFS_I(inode)->clientCanCacheRead = FALSE;
529 CIFS_I(inode)->clientCanCacheAll = FALSE;
531 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
532 rc = CIFS_I(inode)->write_behind_rc;
533 FreeXid(xid);
534 return rc;
537 int cifs_closedir(struct inode *inode, struct file *file)
539 int rc = 0;
540 int xid;
541 struct cifsFileInfo *pCFileStruct =
542 (struct cifsFileInfo *)file->private_data;
543 char *ptmp;
545 cFYI(1, ("Closedir inode = 0x%p", inode));
547 xid = GetXid();
549 if (pCFileStruct) {
550 struct cifsTconInfo *pTcon;
551 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
553 pTcon = cifs_sb->tcon;
555 cFYI(1, ("Freeing private data in close dir"));
556 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
557 (pCFileStruct->invalidHandle == FALSE)) {
558 pCFileStruct->invalidHandle = TRUE;
559 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
560 cFYI(1, ("Closing uncompleted readdir with rc %d",
561 rc));
562 /* not much we can do if it fails anyway, ignore rc */
563 rc = 0;
565 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
566 if (ptmp) {
567 cFYI(1, ("closedir free smb buf in srch struct"));
568 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
569 if(pCFileStruct->srch_inf.smallBuf)
570 cifs_small_buf_release(ptmp);
571 else
572 cifs_buf_release(ptmp);
574 ptmp = pCFileStruct->search_resume_name;
575 if (ptmp) {
576 cFYI(1, ("closedir free resume name"));
577 pCFileStruct->search_resume_name = NULL;
578 kfree(ptmp);
580 kfree(file->private_data);
581 file->private_data = NULL;
583 /* BB can we lock the filestruct while this is going on? */
584 FreeXid(xid);
585 return rc;
588 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
589 __u64 offset, __u8 lockType)
591 struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
592 if (li == NULL)
593 return -ENOMEM;
594 li->offset = offset;
595 li->length = len;
596 li->type = lockType;
597 down(&fid->lock_sem);
598 list_add(&li->llist, &fid->llist);
599 up(&fid->lock_sem);
600 return 0;
603 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
605 int rc, xid;
606 __u32 numLock = 0;
607 __u32 numUnlock = 0;
608 __u64 length;
609 int wait_flag = FALSE;
610 struct cifs_sb_info *cifs_sb;
611 struct cifsTconInfo *pTcon;
612 __u16 netfid;
613 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
614 int posix_locking;
616 length = 1 + pfLock->fl_end - pfLock->fl_start;
617 rc = -EACCES;
618 xid = GetXid();
620 cFYI(1, ("Lock parm: 0x%x flockflags: "
621 "0x%x flocktype: 0x%x start: %lld end: %lld",
622 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
623 pfLock->fl_end));
625 if (pfLock->fl_flags & FL_POSIX)
626 cFYI(1, ("Posix"));
627 if (pfLock->fl_flags & FL_FLOCK)
628 cFYI(1, ("Flock"));
629 if (pfLock->fl_flags & FL_SLEEP) {
630 cFYI(1, ("Blocking lock"));
631 wait_flag = TRUE;
633 if (pfLock->fl_flags & FL_ACCESS)
634 cFYI(1, ("Process suspended by mandatory locking - "
635 "not implemented yet"));
636 if (pfLock->fl_flags & FL_LEASE)
637 cFYI(1, ("Lease on file - not implemented yet"));
638 if (pfLock->fl_flags &
639 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
640 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
642 if (pfLock->fl_type == F_WRLCK) {
643 cFYI(1, ("F_WRLCK "));
644 numLock = 1;
645 } else if (pfLock->fl_type == F_UNLCK) {
646 cFYI(1, ("F_UNLCK"));
647 numUnlock = 1;
648 /* Check if unlock includes more than
649 one lock range */
650 } else if (pfLock->fl_type == F_RDLCK) {
651 cFYI(1, ("F_RDLCK"));
652 lockType |= LOCKING_ANDX_SHARED_LOCK;
653 numLock = 1;
654 } else if (pfLock->fl_type == F_EXLCK) {
655 cFYI(1, ("F_EXLCK"));
656 numLock = 1;
657 } else if (pfLock->fl_type == F_SHLCK) {
658 cFYI(1, ("F_SHLCK"));
659 lockType |= LOCKING_ANDX_SHARED_LOCK;
660 numLock = 1;
661 } else
662 cFYI(1, ("Unknown type of lock"));
664 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
665 pTcon = cifs_sb->tcon;
667 if (file->private_data == NULL) {
668 FreeXid(xid);
669 return -EBADF;
671 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
673 posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
674 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
676 /* BB add code here to normalize offset and length to
677 account for negative length which we can not accept over the
678 wire */
679 if (IS_GETLK(cmd)) {
680 if(posix_locking) {
681 int posix_lock_type;
682 if(lockType & LOCKING_ANDX_SHARED_LOCK)
683 posix_lock_type = CIFS_RDLCK;
684 else
685 posix_lock_type = CIFS_WRLCK;
686 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
687 length, pfLock,
688 posix_lock_type, wait_flag);
689 FreeXid(xid);
690 return rc;
693 /* BB we could chain these into one lock request BB */
694 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
695 0, 1, lockType, 0 /* wait flag */ );
696 if (rc == 0) {
697 rc = CIFSSMBLock(xid, pTcon, netfid, length,
698 pfLock->fl_start, 1 /* numUnlock */ ,
699 0 /* numLock */ , lockType,
700 0 /* wait flag */ );
701 pfLock->fl_type = F_UNLCK;
702 if (rc != 0)
703 cERROR(1, ("Error unlocking previously locked "
704 "range %d during test of lock", rc));
705 rc = 0;
707 } else {
708 /* if rc == ERR_SHARING_VIOLATION ? */
709 rc = 0; /* do not change lock type to unlock
710 since range in use */
713 FreeXid(xid);
714 return rc;
717 if (!numLock && !numUnlock) {
718 /* if no lock or unlock then nothing
719 to do since we do not know what it is */
720 FreeXid(xid);
721 return -EOPNOTSUPP;
724 if (posix_locking) {
725 int posix_lock_type;
726 if(lockType & LOCKING_ANDX_SHARED_LOCK)
727 posix_lock_type = CIFS_RDLCK;
728 else
729 posix_lock_type = CIFS_WRLCK;
731 if(numUnlock == 1)
732 posix_lock_type = CIFS_UNLCK;
734 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
735 length, pfLock,
736 posix_lock_type, wait_flag);
737 } else {
738 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
740 if (numLock) {
741 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
742 0, numLock, lockType, wait_flag);
744 if (rc == 0) {
745 /* For Windows locks we must store them. */
746 rc = store_file_lock(fid, length,
747 pfLock->fl_start, lockType);
749 } else if (numUnlock) {
750 /* For each stored lock that this unlock overlaps
751 completely, unlock it. */
752 int stored_rc = 0;
753 struct cifsLockInfo *li, *tmp;
755 rc = 0;
756 down(&fid->lock_sem);
757 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
758 if (pfLock->fl_start <= li->offset &&
759 length >= li->length) {
760 stored_rc = CIFSSMBLock(xid, pTcon, netfid,
761 li->length, li->offset,
762 1, 0, li->type, FALSE);
763 if (stored_rc)
764 rc = stored_rc;
766 list_del(&li->llist);
767 kfree(li);
770 up(&fid->lock_sem);
774 if (pfLock->fl_flags & FL_POSIX)
775 posix_lock_file_wait(file, pfLock);
776 FreeXid(xid);
777 return rc;
780 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
781 size_t write_size, loff_t *poffset)
783 int rc = 0;
784 unsigned int bytes_written = 0;
785 unsigned int total_written;
786 struct cifs_sb_info *cifs_sb;
787 struct cifsTconInfo *pTcon;
788 int xid, long_op;
789 struct cifsFileInfo *open_file;
791 if (file->f_dentry == NULL)
792 return -EBADF;
794 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
795 if (cifs_sb == NULL)
796 return -EBADF;
798 pTcon = cifs_sb->tcon;
800 /* cFYI(1,
801 (" write %d bytes to offset %lld of %s", write_size,
802 *poffset, file->f_dentry->d_name.name)); */
804 if (file->private_data == NULL)
805 return -EBADF;
806 else
807 open_file = (struct cifsFileInfo *) file->private_data;
809 xid = GetXid();
810 if (file->f_dentry->d_inode == NULL) {
811 FreeXid(xid);
812 return -EBADF;
815 if (*poffset > file->f_dentry->d_inode->i_size)
816 long_op = 2; /* writes past end of file can take a long time */
817 else
818 long_op = 1;
820 for (total_written = 0; write_size > total_written;
821 total_written += bytes_written) {
822 rc = -EAGAIN;
823 while (rc == -EAGAIN) {
824 if (file->private_data == NULL) {
825 /* file has been closed on us */
826 FreeXid(xid);
827 /* if we have gotten here we have written some data
828 and blocked, and the file has been freed on us while
829 we blocked so return what we managed to write */
830 return total_written;
832 if (open_file->closePend) {
833 FreeXid(xid);
834 if (total_written)
835 return total_written;
836 else
837 return -EBADF;
839 if (open_file->invalidHandle) {
840 if ((file->f_dentry == NULL) ||
841 (file->f_dentry->d_inode == NULL)) {
842 FreeXid(xid);
843 return total_written;
845 /* we could deadlock if we called
846 filemap_fdatawait from here so tell
847 reopen_file not to flush data to server
848 now */
849 rc = cifs_reopen_file(file->f_dentry->d_inode,
850 file, FALSE);
851 if (rc != 0)
852 break;
855 rc = CIFSSMBWrite(xid, pTcon,
856 open_file->netfid,
857 min_t(const int, cifs_sb->wsize,
858 write_size - total_written),
859 *poffset, &bytes_written,
860 NULL, write_data + total_written, long_op);
862 if (rc || (bytes_written == 0)) {
863 if (total_written)
864 break;
865 else {
866 FreeXid(xid);
867 return rc;
869 } else
870 *poffset += bytes_written;
871 long_op = FALSE; /* subsequent writes fast -
872 15 seconds is plenty */
875 cifs_stats_bytes_written(pTcon, total_written);
877 /* since the write may have blocked check these pointers again */
878 if (file->f_dentry) {
879 if (file->f_dentry->d_inode) {
880 struct inode *inode = file->f_dentry->d_inode;
881 inode->i_ctime = inode->i_mtime =
882 current_fs_time(inode->i_sb);
883 if (total_written > 0) {
884 if (*poffset > file->f_dentry->d_inode->i_size)
885 i_size_write(file->f_dentry->d_inode,
886 *poffset);
888 mark_inode_dirty_sync(file->f_dentry->d_inode);
891 FreeXid(xid);
892 return total_written;
895 static ssize_t cifs_write(struct file *file, const char *write_data,
896 size_t write_size, loff_t *poffset)
898 int rc = 0;
899 unsigned int bytes_written = 0;
900 unsigned int total_written;
901 struct cifs_sb_info *cifs_sb;
902 struct cifsTconInfo *pTcon;
903 int xid, long_op;
904 struct cifsFileInfo *open_file;
906 if (file->f_dentry == NULL)
907 return -EBADF;
909 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
910 if (cifs_sb == NULL)
911 return -EBADF;
913 pTcon = cifs_sb->tcon;
915 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
916 *poffset, file->f_dentry->d_name.name));
918 if (file->private_data == NULL)
919 return -EBADF;
920 else
921 open_file = (struct cifsFileInfo *)file->private_data;
923 xid = GetXid();
924 if (file->f_dentry->d_inode == NULL) {
925 FreeXid(xid);
926 return -EBADF;
929 if (*poffset > file->f_dentry->d_inode->i_size)
930 long_op = 2; /* writes past end of file can take a long time */
931 else
932 long_op = 1;
934 for (total_written = 0; write_size > total_written;
935 total_written += bytes_written) {
936 rc = -EAGAIN;
937 while (rc == -EAGAIN) {
938 if (file->private_data == NULL) {
939 /* file has been closed on us */
940 FreeXid(xid);
941 /* if we have gotten here we have written some data
942 and blocked, and the file has been freed on us
943 while we blocked so return what we managed to
944 write */
945 return total_written;
947 if (open_file->closePend) {
948 FreeXid(xid);
949 if (total_written)
950 return total_written;
951 else
952 return -EBADF;
954 if (open_file->invalidHandle) {
955 if ((file->f_dentry == NULL) ||
956 (file->f_dentry->d_inode == NULL)) {
957 FreeXid(xid);
958 return total_written;
960 /* we could deadlock if we called
961 filemap_fdatawait from here so tell
962 reopen_file not to flush data to
963 server now */
964 rc = cifs_reopen_file(file->f_dentry->d_inode,
965 file, FALSE);
966 if (rc != 0)
967 break;
969 if(experimEnabled || (pTcon->ses->server &&
970 ((pTcon->ses->server->secMode &
971 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
972 == 0))) {
973 struct kvec iov[2];
974 unsigned int len;
976 len = min((size_t)cifs_sb->wsize,
977 write_size - total_written);
978 /* iov[0] is reserved for smb header */
979 iov[1].iov_base = (char *)write_data +
980 total_written;
981 iov[1].iov_len = len;
982 rc = CIFSSMBWrite2(xid, pTcon,
983 open_file->netfid, len,
984 *poffset, &bytes_written,
985 iov, 1, long_op);
986 } else
987 rc = CIFSSMBWrite(xid, pTcon,
988 open_file->netfid,
989 min_t(const int, cifs_sb->wsize,
990 write_size - total_written),
991 *poffset, &bytes_written,
992 write_data + total_written,
993 NULL, long_op);
995 if (rc || (bytes_written == 0)) {
996 if (total_written)
997 break;
998 else {
999 FreeXid(xid);
1000 return rc;
1002 } else
1003 *poffset += bytes_written;
1004 long_op = FALSE; /* subsequent writes fast -
1005 15 seconds is plenty */
1008 cifs_stats_bytes_written(pTcon, total_written);
1010 /* since the write may have blocked check these pointers again */
1011 if (file->f_dentry) {
1012 if (file->f_dentry->d_inode) {
1013 file->f_dentry->d_inode->i_ctime =
1014 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
1015 if (total_written > 0) {
1016 if (*poffset > file->f_dentry->d_inode->i_size)
1017 i_size_write(file->f_dentry->d_inode,
1018 *poffset);
1020 mark_inode_dirty_sync(file->f_dentry->d_inode);
1023 FreeXid(xid);
1024 return total_written;
1027 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1029 struct cifsFileInfo *open_file;
1030 int rc;
1032 /* Having a null inode here (because mapping->host was set to zero by
1033 the VFS or MM) should not happen but we had reports of on oops (due to
1034 it being zero) during stress testcases so we need to check for it */
1036 if(cifs_inode == NULL) {
1037 cERROR(1,("Null inode passed to cifs_writeable_file"));
1038 dump_stack();
1039 return NULL;
1042 read_lock(&GlobalSMBSeslock);
1043 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1044 if (open_file->closePend)
1045 continue;
1046 if (open_file->pfile &&
1047 ((open_file->pfile->f_flags & O_RDWR) ||
1048 (open_file->pfile->f_flags & O_WRONLY))) {
1049 atomic_inc(&open_file->wrtPending);
1050 read_unlock(&GlobalSMBSeslock);
1051 if((open_file->invalidHandle) &&
1052 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1053 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
1054 open_file->pfile, FALSE);
1055 /* if it fails, try another handle - might be */
1056 /* dangerous to hold up writepages with retry */
1057 if(rc) {
1058 cFYI(1,("failed on reopen file in wp"));
1059 read_lock(&GlobalSMBSeslock);
1060 /* can not use this handle, no write
1061 pending on this one after all */
1062 atomic_dec
1063 (&open_file->wrtPending);
1064 continue;
1067 return open_file;
1070 read_unlock(&GlobalSMBSeslock);
1071 return NULL;
1074 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1076 struct address_space *mapping = page->mapping;
1077 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1078 char *write_data;
1079 int rc = -EFAULT;
1080 int bytes_written = 0;
1081 struct cifs_sb_info *cifs_sb;
1082 struct cifsTconInfo *pTcon;
1083 struct inode *inode;
1084 struct cifsFileInfo *open_file;
1086 if (!mapping || !mapping->host)
1087 return -EFAULT;
1089 inode = page->mapping->host;
1090 cifs_sb = CIFS_SB(inode->i_sb);
1091 pTcon = cifs_sb->tcon;
1093 offset += (loff_t)from;
1094 write_data = kmap(page);
1095 write_data += from;
1097 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1098 kunmap(page);
1099 return -EIO;
1102 /* racing with truncate? */
1103 if (offset > mapping->host->i_size) {
1104 kunmap(page);
1105 return 0; /* don't care */
1108 /* check to make sure that we are not extending the file */
1109 if (mapping->host->i_size - offset < (loff_t)to)
1110 to = (unsigned)(mapping->host->i_size - offset);
1112 open_file = find_writable_file(CIFS_I(mapping->host));
1113 if (open_file) {
1114 bytes_written = cifs_write(open_file->pfile, write_data,
1115 to-from, &offset);
1116 atomic_dec(&open_file->wrtPending);
1117 /* Does mm or vfs already set times? */
1118 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1119 if ((bytes_written > 0) && (offset)) {
1120 rc = 0;
1121 } else if (bytes_written < 0) {
1122 if (rc != -EBADF)
1123 rc = bytes_written;
1125 } else {
1126 cFYI(1, ("No writeable filehandles for inode"));
1127 rc = -EIO;
1130 kunmap(page);
1131 return rc;
1134 static int cifs_writepages(struct address_space *mapping,
1135 struct writeback_control *wbc)
1137 struct backing_dev_info *bdi = mapping->backing_dev_info;
1138 unsigned int bytes_to_write;
1139 unsigned int bytes_written;
1140 struct cifs_sb_info *cifs_sb;
1141 int done = 0;
1142 pgoff_t end;
1143 pgoff_t index;
1144 int range_whole = 0;
1145 struct kvec iov[32];
1146 int len;
1147 int n_iov = 0;
1148 pgoff_t next;
1149 int nr_pages;
1150 __u64 offset = 0;
1151 struct cifsFileInfo *open_file;
1152 struct page *page;
1153 struct pagevec pvec;
1154 int rc = 0;
1155 int scanned = 0;
1156 int xid;
1158 cifs_sb = CIFS_SB(mapping->host->i_sb);
1161 * If wsize is smaller that the page cache size, default to writing
1162 * one page at a time via cifs_writepage
1164 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1165 return generic_writepages(mapping, wbc);
1167 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1168 if(cifs_sb->tcon->ses->server->secMode &
1169 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1170 if(!experimEnabled)
1171 return generic_writepages(mapping, wbc);
1174 * BB: Is this meaningful for a non-block-device file system?
1175 * If it is, we should test it again after we do I/O
1177 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1178 wbc->encountered_congestion = 1;
1179 return 0;
1182 xid = GetXid();
1184 pagevec_init(&pvec, 0);
1185 if (wbc->range_cyclic) {
1186 index = mapping->writeback_index; /* Start from prev offset */
1187 end = -1;
1188 } else {
1189 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1190 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1191 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1192 range_whole = 1;
1193 scanned = 1;
1195 retry:
1196 while (!done && (index <= end) &&
1197 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1198 PAGECACHE_TAG_DIRTY,
1199 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1200 int first;
1201 unsigned int i;
1203 first = -1;
1204 next = 0;
1205 n_iov = 0;
1206 bytes_to_write = 0;
1208 for (i = 0; i < nr_pages; i++) {
1209 page = pvec.pages[i];
1211 * At this point we hold neither mapping->tree_lock nor
1212 * lock on the page itself: the page may be truncated or
1213 * invalidated (changing page->mapping to NULL), or even
1214 * swizzled back from swapper_space to tmpfs file
1215 * mapping
1218 if (first < 0)
1219 lock_page(page);
1220 else if (TestSetPageLocked(page))
1221 break;
1223 if (unlikely(page->mapping != mapping)) {
1224 unlock_page(page);
1225 break;
1228 if (!wbc->range_cyclic && page->index > end) {
1229 done = 1;
1230 unlock_page(page);
1231 break;
1234 if (next && (page->index != next)) {
1235 /* Not next consecutive page */
1236 unlock_page(page);
1237 break;
1240 if (wbc->sync_mode != WB_SYNC_NONE)
1241 wait_on_page_writeback(page);
1243 if (PageWriteback(page) ||
1244 !test_clear_page_dirty(page)) {
1245 unlock_page(page);
1246 break;
1249 if (page_offset(page) >= mapping->host->i_size) {
1250 done = 1;
1251 unlock_page(page);
1252 break;
1256 * BB can we get rid of this? pages are held by pvec
1258 page_cache_get(page);
1260 len = min(mapping->host->i_size - page_offset(page),
1261 (loff_t)PAGE_CACHE_SIZE);
1263 /* reserve iov[0] for the smb header */
1264 n_iov++;
1265 iov[n_iov].iov_base = kmap(page);
1266 iov[n_iov].iov_len = len;
1267 bytes_to_write += len;
1269 if (first < 0) {
1270 first = i;
1271 offset = page_offset(page);
1273 next = page->index + 1;
1274 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1275 break;
1277 if (n_iov) {
1278 /* Search for a writable handle every time we call
1279 * CIFSSMBWrite2. We can't rely on the last handle
1280 * we used to still be valid
1282 open_file = find_writable_file(CIFS_I(mapping->host));
1283 if (!open_file) {
1284 cERROR(1, ("No writable handles for inode"));
1285 rc = -EBADF;
1286 } else {
1287 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1288 open_file->netfid,
1289 bytes_to_write, offset,
1290 &bytes_written, iov, n_iov,
1292 atomic_dec(&open_file->wrtPending);
1293 if (rc || bytes_written < bytes_to_write) {
1294 cERROR(1,("Write2 ret %d, written = %d",
1295 rc, bytes_written));
1296 /* BB what if continued retry is
1297 requested via mount flags? */
1298 set_bit(AS_EIO, &mapping->flags);
1299 } else {
1300 cifs_stats_bytes_written(cifs_sb->tcon,
1301 bytes_written);
1304 for (i = 0; i < n_iov; i++) {
1305 page = pvec.pages[first + i];
1306 /* Should we also set page error on
1307 success rc but too little data written? */
1308 /* BB investigate retry logic on temporary
1309 server crash cases and how recovery works
1310 when page marked as error */
1311 if(rc)
1312 SetPageError(page);
1313 kunmap(page);
1314 unlock_page(page);
1315 page_cache_release(page);
1317 if ((wbc->nr_to_write -= n_iov) <= 0)
1318 done = 1;
1319 index = next;
1321 pagevec_release(&pvec);
1323 if (!scanned && !done) {
1325 * We hit the last page and there is more work to be done: wrap
1326 * back to the start of the file
1328 scanned = 1;
1329 index = 0;
1330 goto retry;
1332 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1333 mapping->writeback_index = index;
1335 FreeXid(xid);
1337 return rc;
1340 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1342 int rc = -EFAULT;
1343 int xid;
1345 xid = GetXid();
1346 /* BB add check for wbc flags */
1347 page_cache_get(page);
1348 if (!PageUptodate(page)) {
1349 cFYI(1, ("ppw - page not up to date"));
1352 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1353 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1354 unlock_page(page);
1355 page_cache_release(page);
1356 FreeXid(xid);
1357 return rc;
1360 static int cifs_commit_write(struct file *file, struct page *page,
1361 unsigned offset, unsigned to)
1363 int xid;
1364 int rc = 0;
1365 struct inode *inode = page->mapping->host;
1366 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1367 char *page_data;
1369 xid = GetXid();
1370 cFYI(1, ("commit write for page %p up to position %lld for %d",
1371 page, position, to));
1372 if (position > inode->i_size) {
1373 i_size_write(inode, position);
1374 /* if (file->private_data == NULL) {
1375 rc = -EBADF;
1376 } else {
1377 open_file = (struct cifsFileInfo *)file->private_data;
1378 cifs_sb = CIFS_SB(inode->i_sb);
1379 rc = -EAGAIN;
1380 while (rc == -EAGAIN) {
1381 if ((open_file->invalidHandle) &&
1382 (!open_file->closePend)) {
1383 rc = cifs_reopen_file(
1384 file->f_dentry->d_inode, file);
1385 if (rc != 0)
1386 break;
1388 if (!open_file->closePend) {
1389 rc = CIFSSMBSetFileSize(xid,
1390 cifs_sb->tcon, position,
1391 open_file->netfid,
1392 open_file->pid, FALSE);
1393 } else {
1394 rc = -EBADF;
1395 break;
1398 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1399 } */
1401 if (!PageUptodate(page)) {
1402 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1403 /* can not rely on (or let) writepage write this data */
1404 if (to < offset) {
1405 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1406 offset, to));
1407 FreeXid(xid);
1408 return rc;
1410 /* this is probably better than directly calling
1411 partialpage_write since in this function the file handle is
1412 known which we might as well leverage */
1413 /* BB check if anything else missing out of ppw
1414 such as updating last write time */
1415 page_data = kmap(page);
1416 rc = cifs_write(file, page_data + offset, to-offset,
1417 &position);
1418 if (rc > 0)
1419 rc = 0;
1420 /* else if (rc < 0) should we set writebehind rc? */
1421 kunmap(page);
1422 } else {
1423 set_page_dirty(page);
1426 FreeXid(xid);
1427 return rc;
1430 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1432 int xid;
1433 int rc = 0;
1434 struct inode *inode = file->f_dentry->d_inode;
1436 xid = GetXid();
1438 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1439 dentry->d_name.name, datasync));
1441 rc = filemap_fdatawrite(inode->i_mapping);
1442 if (rc == 0)
1443 CIFS_I(inode)->write_behind_rc = 0;
1444 FreeXid(xid);
1445 return rc;
1448 /* static void cifs_sync_page(struct page *page)
1450 struct address_space *mapping;
1451 struct inode *inode;
1452 unsigned long index = page->index;
1453 unsigned int rpages = 0;
1454 int rc = 0;
1456 cFYI(1, ("sync page %p",page));
1457 mapping = page->mapping;
1458 if (!mapping)
1459 return 0;
1460 inode = mapping->host;
1461 if (!inode)
1462 return; */
1464 /* fill in rpages then
1465 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1467 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1469 #if 0
1470 if (rc < 0)
1471 return rc;
1472 return 0;
1473 #endif
1474 } */
1477 * As file closes, flush all cached write data for this inode checking
1478 * for write behind errors.
1480 int cifs_flush(struct file *file, fl_owner_t id)
1482 struct inode * inode = file->f_dentry->d_inode;
1483 int rc = 0;
1485 /* Rather than do the steps manually:
1486 lock the inode for writing
1487 loop through pages looking for write behind data (dirty pages)
1488 coalesce into contiguous 16K (or smaller) chunks to write to server
1489 send to server (prefer in parallel)
1490 deal with writebehind errors
1491 unlock inode for writing
1492 filemapfdatawrite appears easier for the time being */
1494 rc = filemap_fdatawrite(inode->i_mapping);
1495 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1496 CIFS_I(inode)->write_behind_rc = 0;
1498 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1500 return rc;
1503 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1504 size_t read_size, loff_t *poffset)
1506 int rc = -EACCES;
1507 unsigned int bytes_read = 0;
1508 unsigned int total_read = 0;
1509 unsigned int current_read_size;
1510 struct cifs_sb_info *cifs_sb;
1511 struct cifsTconInfo *pTcon;
1512 int xid;
1513 struct cifsFileInfo *open_file;
1514 char *smb_read_data;
1515 char __user *current_offset;
1516 struct smb_com_read_rsp *pSMBr;
1518 xid = GetXid();
1519 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1520 pTcon = cifs_sb->tcon;
1522 if (file->private_data == NULL) {
1523 FreeXid(xid);
1524 return -EBADF;
1526 open_file = (struct cifsFileInfo *)file->private_data;
1528 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1529 cFYI(1, ("attempting read on write only file instance"));
1531 for (total_read = 0, current_offset = read_data;
1532 read_size > total_read;
1533 total_read += bytes_read, current_offset += bytes_read) {
1534 current_read_size = min_t(const int, read_size - total_read,
1535 cifs_sb->rsize);
1536 rc = -EAGAIN;
1537 smb_read_data = NULL;
1538 while (rc == -EAGAIN) {
1539 int buf_type = CIFS_NO_BUFFER;
1540 if ((open_file->invalidHandle) &&
1541 (!open_file->closePend)) {
1542 rc = cifs_reopen_file(file->f_dentry->d_inode,
1543 file, TRUE);
1544 if (rc != 0)
1545 break;
1547 rc = CIFSSMBRead(xid, pTcon,
1548 open_file->netfid,
1549 current_read_size, *poffset,
1550 &bytes_read, &smb_read_data,
1551 &buf_type);
1552 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1553 if (smb_read_data) {
1554 if (copy_to_user(current_offset,
1555 smb_read_data +
1556 4 /* RFC1001 length field */ +
1557 le16_to_cpu(pSMBr->DataOffset),
1558 bytes_read)) {
1559 rc = -EFAULT;
1562 if(buf_type == CIFS_SMALL_BUFFER)
1563 cifs_small_buf_release(smb_read_data);
1564 else if(buf_type == CIFS_LARGE_BUFFER)
1565 cifs_buf_release(smb_read_data);
1566 smb_read_data = NULL;
1569 if (rc || (bytes_read == 0)) {
1570 if (total_read) {
1571 break;
1572 } else {
1573 FreeXid(xid);
1574 return rc;
1576 } else {
1577 cifs_stats_bytes_read(pTcon, bytes_read);
1578 *poffset += bytes_read;
1581 FreeXid(xid);
1582 return total_read;
1586 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1587 loff_t *poffset)
1589 int rc = -EACCES;
1590 unsigned int bytes_read = 0;
1591 unsigned int total_read;
1592 unsigned int current_read_size;
1593 struct cifs_sb_info *cifs_sb;
1594 struct cifsTconInfo *pTcon;
1595 int xid;
1596 char *current_offset;
1597 struct cifsFileInfo *open_file;
1598 int buf_type = CIFS_NO_BUFFER;
1600 xid = GetXid();
1601 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1602 pTcon = cifs_sb->tcon;
1604 if (file->private_data == NULL) {
1605 FreeXid(xid);
1606 return -EBADF;
1608 open_file = (struct cifsFileInfo *)file->private_data;
1610 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1611 cFYI(1, ("attempting read on write only file instance"));
1613 for (total_read = 0, current_offset = read_data;
1614 read_size > total_read;
1615 total_read += bytes_read, current_offset += bytes_read) {
1616 current_read_size = min_t(const int, read_size - total_read,
1617 cifs_sb->rsize);
1618 /* For windows me and 9x we do not want to request more
1619 than it negotiated since it will refuse the read then */
1620 if((pTcon->ses) &&
1621 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1622 current_read_size = min_t(const int, current_read_size,
1623 pTcon->ses->server->maxBuf - 128);
1625 rc = -EAGAIN;
1626 while (rc == -EAGAIN) {
1627 if ((open_file->invalidHandle) &&
1628 (!open_file->closePend)) {
1629 rc = cifs_reopen_file(file->f_dentry->d_inode,
1630 file, TRUE);
1631 if (rc != 0)
1632 break;
1634 rc = CIFSSMBRead(xid, pTcon,
1635 open_file->netfid,
1636 current_read_size, *poffset,
1637 &bytes_read, &current_offset,
1638 &buf_type);
1640 if (rc || (bytes_read == 0)) {
1641 if (total_read) {
1642 break;
1643 } else {
1644 FreeXid(xid);
1645 return rc;
1647 } else {
1648 cifs_stats_bytes_read(pTcon, total_read);
1649 *poffset += bytes_read;
1652 FreeXid(xid);
1653 return total_read;
1656 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1658 struct dentry *dentry = file->f_dentry;
1659 int rc, xid;
1661 xid = GetXid();
1662 rc = cifs_revalidate(dentry);
1663 if (rc) {
1664 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1665 FreeXid(xid);
1666 return rc;
1668 rc = generic_file_mmap(file, vma);
1669 FreeXid(xid);
1670 return rc;
1674 static void cifs_copy_cache_pages(struct address_space *mapping,
1675 struct list_head *pages, int bytes_read, char *data,
1676 struct pagevec *plru_pvec)
1678 struct page *page;
1679 char *target;
1681 while (bytes_read > 0) {
1682 if (list_empty(pages))
1683 break;
1685 page = list_entry(pages->prev, struct page, lru);
1686 list_del(&page->lru);
1688 if (add_to_page_cache(page, mapping, page->index,
1689 GFP_KERNEL)) {
1690 page_cache_release(page);
1691 cFYI(1, ("Add page cache failed"));
1692 data += PAGE_CACHE_SIZE;
1693 bytes_read -= PAGE_CACHE_SIZE;
1694 continue;
1697 target = kmap_atomic(page,KM_USER0);
1699 if (PAGE_CACHE_SIZE > bytes_read) {
1700 memcpy(target, data, bytes_read);
1701 /* zero the tail end of this partial page */
1702 memset(target + bytes_read, 0,
1703 PAGE_CACHE_SIZE - bytes_read);
1704 bytes_read = 0;
1705 } else {
1706 memcpy(target, data, PAGE_CACHE_SIZE);
1707 bytes_read -= PAGE_CACHE_SIZE;
1709 kunmap_atomic(target, KM_USER0);
1711 flush_dcache_page(page);
1712 SetPageUptodate(page);
1713 unlock_page(page);
1714 if (!pagevec_add(plru_pvec, page))
1715 __pagevec_lru_add(plru_pvec);
1716 data += PAGE_CACHE_SIZE;
1718 return;
1721 static int cifs_readpages(struct file *file, struct address_space *mapping,
1722 struct list_head *page_list, unsigned num_pages)
1724 int rc = -EACCES;
1725 int xid;
1726 loff_t offset;
1727 struct page *page;
1728 struct cifs_sb_info *cifs_sb;
1729 struct cifsTconInfo *pTcon;
1730 int bytes_read = 0;
1731 unsigned int read_size,i;
1732 char *smb_read_data = NULL;
1733 struct smb_com_read_rsp *pSMBr;
1734 struct pagevec lru_pvec;
1735 struct cifsFileInfo *open_file;
1736 int buf_type = CIFS_NO_BUFFER;
1738 xid = GetXid();
1739 if (file->private_data == NULL) {
1740 FreeXid(xid);
1741 return -EBADF;
1743 open_file = (struct cifsFileInfo *)file->private_data;
1744 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1745 pTcon = cifs_sb->tcon;
1747 pagevec_init(&lru_pvec, 0);
1749 for (i = 0; i < num_pages; ) {
1750 unsigned contig_pages;
1751 struct page *tmp_page;
1752 unsigned long expected_index;
1754 if (list_empty(page_list))
1755 break;
1757 page = list_entry(page_list->prev, struct page, lru);
1758 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1760 /* count adjacent pages that we will read into */
1761 contig_pages = 0;
1762 expected_index =
1763 list_entry(page_list->prev, struct page, lru)->index;
1764 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1765 if (tmp_page->index == expected_index) {
1766 contig_pages++;
1767 expected_index++;
1768 } else
1769 break;
1771 if (contig_pages + i > num_pages)
1772 contig_pages = num_pages - i;
1774 /* for reads over a certain size could initiate async
1775 read ahead */
1777 read_size = contig_pages * PAGE_CACHE_SIZE;
1778 /* Read size needs to be in multiples of one page */
1779 read_size = min_t(const unsigned int, read_size,
1780 cifs_sb->rsize & PAGE_CACHE_MASK);
1782 rc = -EAGAIN;
1783 while (rc == -EAGAIN) {
1784 if ((open_file->invalidHandle) &&
1785 (!open_file->closePend)) {
1786 rc = cifs_reopen_file(file->f_dentry->d_inode,
1787 file, TRUE);
1788 if (rc != 0)
1789 break;
1792 rc = CIFSSMBRead(xid, pTcon,
1793 open_file->netfid,
1794 read_size, offset,
1795 &bytes_read, &smb_read_data,
1796 &buf_type);
1797 /* BB more RC checks ? */
1798 if (rc== -EAGAIN) {
1799 if (smb_read_data) {
1800 if(buf_type == CIFS_SMALL_BUFFER)
1801 cifs_small_buf_release(smb_read_data);
1802 else if(buf_type == CIFS_LARGE_BUFFER)
1803 cifs_buf_release(smb_read_data);
1804 smb_read_data = NULL;
1808 if ((rc < 0) || (smb_read_data == NULL)) {
1809 cFYI(1, ("Read error in readpages: %d", rc));
1810 /* clean up remaing pages off list */
1811 while (!list_empty(page_list) && (i < num_pages)) {
1812 page = list_entry(page_list->prev, struct page,
1813 lru);
1814 list_del(&page->lru);
1815 page_cache_release(page);
1817 break;
1818 } else if (bytes_read > 0) {
1819 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1820 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1821 smb_read_data + 4 /* RFC1001 hdr */ +
1822 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1824 i += bytes_read >> PAGE_CACHE_SHIFT;
1825 cifs_stats_bytes_read(pTcon, bytes_read);
1826 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1827 i++; /* account for partial page */
1829 /* server copy of file can have smaller size
1830 than client */
1831 /* BB do we need to verify this common case ?
1832 this case is ok - if we are at server EOF
1833 we will hit it on next read */
1835 /* while (!list_empty(page_list) && (i < num_pages)) {
1836 page = list_entry(page_list->prev,
1837 struct page, list);
1838 list_del(&page->list);
1839 page_cache_release(page);
1841 break; */
1843 } else {
1844 cFYI(1, ("No bytes read (%d) at offset %lld . "
1845 "Cleaning remaining pages from readahead list",
1846 bytes_read, offset));
1847 /* BB turn off caching and do new lookup on
1848 file size at server? */
1849 while (!list_empty(page_list) && (i < num_pages)) {
1850 page = list_entry(page_list->prev, struct page,
1851 lru);
1852 list_del(&page->lru);
1854 /* BB removeme - replace with zero of page? */
1855 page_cache_release(page);
1857 break;
1859 if (smb_read_data) {
1860 if(buf_type == CIFS_SMALL_BUFFER)
1861 cifs_small_buf_release(smb_read_data);
1862 else if(buf_type == CIFS_LARGE_BUFFER)
1863 cifs_buf_release(smb_read_data);
1864 smb_read_data = NULL;
1866 bytes_read = 0;
1869 pagevec_lru_add(&lru_pvec);
1871 /* need to free smb_read_data buf before exit */
1872 if (smb_read_data) {
1873 if(buf_type == CIFS_SMALL_BUFFER)
1874 cifs_small_buf_release(smb_read_data);
1875 else if(buf_type == CIFS_LARGE_BUFFER)
1876 cifs_buf_release(smb_read_data);
1877 smb_read_data = NULL;
1880 FreeXid(xid);
1881 return rc;
1884 static int cifs_readpage_worker(struct file *file, struct page *page,
1885 loff_t *poffset)
1887 char *read_data;
1888 int rc;
1890 page_cache_get(page);
1891 read_data = kmap(page);
1892 /* for reads over a certain size could initiate async read ahead */
1894 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1896 if (rc < 0)
1897 goto io_error;
1898 else
1899 cFYI(1, ("Bytes read %d",rc));
1901 file->f_dentry->d_inode->i_atime =
1902 current_fs_time(file->f_dentry->d_inode->i_sb);
1904 if (PAGE_CACHE_SIZE > rc)
1905 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1907 flush_dcache_page(page);
1908 SetPageUptodate(page);
1909 rc = 0;
1911 io_error:
1912 kunmap(page);
1913 page_cache_release(page);
1914 return rc;
1917 static int cifs_readpage(struct file *file, struct page *page)
1919 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1920 int rc = -EACCES;
1921 int xid;
1923 xid = GetXid();
1925 if (file->private_data == NULL) {
1926 FreeXid(xid);
1927 return -EBADF;
1930 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1931 page, (int)offset, (int)offset));
1933 rc = cifs_readpage_worker(file, page, &offset);
1935 unlock_page(page);
1937 FreeXid(xid);
1938 return rc;
1941 /* We do not want to update the file size from server for inodes
1942 open for write - to avoid races with writepage extending
1943 the file - in the future we could consider allowing
1944 refreshing the inode only on increases in the file size
1945 but this is tricky to do without racing with writebehind
1946 page caching in the current Linux kernel design */
1947 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1949 struct cifsFileInfo *open_file = NULL;
1951 if (cifsInode)
1952 open_file = find_writable_file(cifsInode);
1954 if(open_file) {
1955 struct cifs_sb_info *cifs_sb;
1957 /* there is not actually a write pending so let
1958 this handle go free and allow it to
1959 be closable if needed */
1960 atomic_dec(&open_file->wrtPending);
1962 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1963 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1964 /* since no page cache to corrupt on directio
1965 we can change size safely */
1966 return 1;
1969 return 0;
1970 } else
1971 return 1;
1974 static int cifs_prepare_write(struct file *file, struct page *page,
1975 unsigned from, unsigned to)
1977 int rc = 0;
1978 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1979 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1980 if (!PageUptodate(page)) {
1981 /* if (to - from != PAGE_CACHE_SIZE) {
1982 void *kaddr = kmap_atomic(page, KM_USER0);
1983 memset(kaddr, 0, from);
1984 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1985 flush_dcache_page(page);
1986 kunmap_atomic(kaddr, KM_USER0);
1987 } */
1988 /* If we are writing a full page it will be up to date,
1989 no need to read from the server */
1990 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1991 SetPageUptodate(page);
1993 /* might as well read a page, it is fast enough */
1994 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1995 rc = cifs_readpage_worker(file, page, &offset);
1996 } else {
1997 /* should we try using another file handle if there is one -
1998 how would we lock it to prevent close of that handle
1999 racing with this read?
2000 In any case this will be written out by commit_write */
2004 /* BB should we pass any errors back?
2005 e.g. if we do not have read access to the file */
2006 return 0;
2009 const struct address_space_operations cifs_addr_ops = {
2010 .readpage = cifs_readpage,
2011 .readpages = cifs_readpages,
2012 .writepage = cifs_writepage,
2013 .writepages = cifs_writepages,
2014 .prepare_write = cifs_prepare_write,
2015 .commit_write = cifs_commit_write,
2016 .set_page_dirty = __set_page_dirty_nobuffers,
2017 /* .sync_page = cifs_sync_page, */
2018 /* .direct_IO = */
2022 * cifs_readpages requires the server to support a buffer large enough to
2023 * contain the header plus one complete page of data. Otherwise, we need
2024 * to leave cifs_readpages out of the address space operations.
2026 const struct address_space_operations cifs_addr_ops_smallbuf = {
2027 .readpage = cifs_readpage,
2028 .writepage = cifs_writepage,
2029 .writepages = cifs_writepages,
2030 .prepare_write = cifs_prepare_write,
2031 .commit_write = cifs_commit_write,
2032 .set_page_dirty = __set_page_dirty_nobuffers,
2033 /* .sync_page = cifs_sync_page, */
2034 /* .direct_IO = */