[PATCH] hpet: do_div fix
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / file.c
blob30ab70ce554716df92739f0b1643ce79053497e7
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/stat.h>
25 #include <linux/fcntl.h>
26 #include <linux/pagemap.h>
27 #include <linux/pagevec.h>
28 #include <linux/smp_lock.h>
29 #include <asm/div64.h>
30 #include "cifsfs.h"
31 #include "cifspdu.h"
32 #include "cifsglob.h"
33 #include "cifsproto.h"
34 #include "cifs_unicode.h"
35 #include "cifs_debug.h"
36 #include "cifs_fs_sb.h"
38 static inline struct cifsFileInfo *cifs_init_private(
39 struct cifsFileInfo *private_data, struct inode *inode,
40 struct file *file, __u16 netfid)
42 memset(private_data, 0, sizeof(struct cifsFileInfo));
43 private_data->netfid = netfid;
44 private_data->pid = current->tgid;
45 init_MUTEX(&private_data->fh_sem);
46 private_data->pfile = file; /* needed for writepage */
47 private_data->pInode = inode;
48 private_data->invalidHandle = FALSE;
49 private_data->closePend = FALSE;
51 return private_data;
54 static inline int cifs_convert_flags(unsigned int flags)
56 if ((flags & O_ACCMODE) == O_RDONLY)
57 return GENERIC_READ;
58 else if ((flags & O_ACCMODE) == O_WRONLY)
59 return GENERIC_WRITE;
60 else if ((flags & O_ACCMODE) == O_RDWR) {
61 /* GENERIC_ALL is too much permission to request
62 can cause unnecessary access denied on create */
63 /* return GENERIC_ALL; */
64 return (GENERIC_READ | GENERIC_WRITE);
67 return 0x20197;
70 static inline int cifs_get_disposition(unsigned int flags)
72 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
73 return FILE_CREATE;
74 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
75 return FILE_OVERWRITE_IF;
76 else if ((flags & O_CREAT) == O_CREAT)
77 return FILE_OPEN_IF;
78 else
79 return FILE_OPEN;
82 /* all arguments to this function must be checked for validity in caller */
83 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
84 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
85 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
86 char *full_path, int xid)
88 struct timespec temp;
89 int rc;
91 /* want handles we can use to read with first
92 in the list so we do not have to walk the
93 list to search for one in prepare_write */
94 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
95 list_add_tail(&pCifsFile->flist,
96 &pCifsInode->openFileList);
97 } else {
98 list_add(&pCifsFile->flist,
99 &pCifsInode->openFileList);
101 write_unlock(&GlobalSMBSeslock);
102 write_unlock(&file->f_owner.lock);
103 if (pCifsInode->clientCanCacheRead) {
104 /* we have the inode open somewhere else
105 no need to discard cache data */
106 goto client_can_cache;
109 /* BB need same check in cifs_create too? */
110 /* if not oplocked, invalidate inode pages if mtime or file
111 size changed */
112 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
113 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
114 (file->f_dentry->d_inode->i_size ==
115 (loff_t)le64_to_cpu(buf->EndOfFile))) {
116 cFYI(1, ("inode unchanged on server"));
117 } else {
118 if (file->f_dentry->d_inode->i_mapping) {
119 /* BB no need to lock inode until after invalidate
120 since namei code should already have it locked? */
121 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
122 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
124 cFYI(1, ("invalidating remote inode since open detected it "
125 "changed"));
126 invalidate_remote_inode(file->f_dentry->d_inode);
129 client_can_cache:
130 if (pTcon->ses->capabilities & CAP_UNIX)
131 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
132 full_path, inode->i_sb, xid);
133 else
134 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
135 full_path, buf, inode->i_sb, xid);
137 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
138 pCifsInode->clientCanCacheAll = TRUE;
139 pCifsInode->clientCanCacheRead = TRUE;
140 cFYI(1, ("Exclusive Oplock granted on inode %p",
141 file->f_dentry->d_inode));
142 } else if ((*oplock & 0xF) == OPLOCK_READ)
143 pCifsInode->clientCanCacheRead = TRUE;
145 return rc;
148 int cifs_open(struct inode *inode, struct file *file)
150 int rc = -EACCES;
151 int xid, oplock;
152 struct cifs_sb_info *cifs_sb;
153 struct cifsTconInfo *pTcon;
154 struct cifsFileInfo *pCifsFile;
155 struct cifsInodeInfo *pCifsInode;
156 struct list_head *tmp;
157 char *full_path = NULL;
158 int desiredAccess;
159 int disposition;
160 __u16 netfid;
161 FILE_ALL_INFO *buf = NULL;
163 xid = GetXid();
165 cifs_sb = CIFS_SB(inode->i_sb);
166 pTcon = cifs_sb->tcon;
168 if (file->f_flags & O_CREAT) {
169 /* search inode for this file and fill in file->private_data */
170 pCifsInode = CIFS_I(file->f_dentry->d_inode);
171 read_lock(&GlobalSMBSeslock);
172 list_for_each(tmp, &pCifsInode->openFileList) {
173 pCifsFile = list_entry(tmp, struct cifsFileInfo,
174 flist);
175 if ((pCifsFile->pfile == NULL) &&
176 (pCifsFile->pid == current->tgid)) {
177 /* mode set in cifs_create */
179 /* needed for writepage */
180 pCifsFile->pfile = file;
182 file->private_data = pCifsFile;
183 break;
186 read_unlock(&GlobalSMBSeslock);
187 if (file->private_data != NULL) {
188 rc = 0;
189 FreeXid(xid);
190 return rc;
191 } else {
192 if (file->f_flags & O_EXCL)
193 cERROR(1, ("could not find file instance for "
194 "new file %p ", file));
198 down(&inode->i_sb->s_vfs_rename_sem);
199 full_path = build_path_from_dentry(file->f_dentry);
200 up(&inode->i_sb->s_vfs_rename_sem);
201 if (full_path == NULL) {
202 FreeXid(xid);
203 return -ENOMEM;
206 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
207 inode, file->f_flags, full_path));
208 desiredAccess = cifs_convert_flags(file->f_flags);
210 /*********************************************************************
211 * open flag mapping table:
213 * POSIX Flag CIFS Disposition
214 * ---------- ----------------
215 * O_CREAT FILE_OPEN_IF
216 * O_CREAT | O_EXCL FILE_CREATE
217 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
218 * O_TRUNC FILE_OVERWRITE
219 * none of the above FILE_OPEN
221 * Note that there is not a direct match between disposition
222 * FILE_SUPERSEDE (ie create whether or not file exists although
223 * O_CREAT | O_TRUNC is similar but truncates the existing
224 * file rather than creating a new file as FILE_SUPERSEDE does
225 * (which uses the attributes / metadata passed in on open call)
227 *? O_SYNC is a reasonable match to CIFS writethrough flag
228 *? and the read write flags match reasonably. O_LARGEFILE
229 *? is irrelevant because largefile support is always used
230 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
231 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
232 *********************************************************************/
234 disposition = cifs_get_disposition(file->f_flags);
236 if (oplockEnabled)
237 oplock = REQ_OPLOCK;
238 else
239 oplock = FALSE;
241 /* BB pass O_SYNC flag through on file attributes .. BB */
243 /* Also refresh inode by passing in file_info buf returned by SMBOpen
244 and calling get_inode_info with returned buf (at least helps
245 non-Unix server case) */
247 /* BB we can not do this if this is the second open of a file
248 and the first handle has writebehind data, we might be
249 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
250 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
251 if (!buf) {
252 rc = -ENOMEM;
253 goto out;
255 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
256 CREATE_NOT_DIR, &netfid, &oplock, buf,
257 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
258 & CIFS_MOUNT_MAP_SPECIAL_CHR);
259 if (rc) {
260 cFYI(1, ("cifs_open returned 0x%x ", rc));
261 goto out;
263 file->private_data =
264 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
265 if (file->private_data == NULL) {
266 rc = -ENOMEM;
267 goto out;
269 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
270 write_lock(&file->f_owner.lock);
271 write_lock(&GlobalSMBSeslock);
272 list_add(&pCifsFile->tlist, &pTcon->openFileList);
274 pCifsInode = CIFS_I(file->f_dentry->d_inode);
275 if (pCifsInode) {
276 rc = cifs_open_inode_helper(inode, file, pCifsInode,
277 pCifsFile, pTcon,
278 &oplock, buf, full_path, xid);
279 } else {
280 write_unlock(&GlobalSMBSeslock);
281 write_unlock(&file->f_owner.lock);
284 if (oplock & CIFS_CREATE_ACTION) {
285 /* time to set mode which we can not set earlier due to
286 problems creating new read-only files */
287 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
288 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
289 inode->i_mode,
290 (__u64)-1, (__u64)-1, 0 /* dev */,
291 cifs_sb->local_nls,
292 cifs_sb->mnt_cifs_flags &
293 CIFS_MOUNT_MAP_SPECIAL_CHR);
294 } else {
295 /* BB implement via Windows security descriptors eg
296 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
297 -1, -1, local_nls);
298 in the meantime could set r/o dos attribute when
299 perms are eg: mode & 0222 == 0 */
303 out:
304 kfree(buf);
305 kfree(full_path);
306 FreeXid(xid);
307 return rc;
310 /* Try to reaquire byte range locks that were released when session */
311 /* to server was lost */
312 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
314 int rc = 0;
316 /* BB list all locks open on this file and relock */
318 return rc;
321 static int cifs_reopen_file(struct inode *inode, struct file *file,
322 int can_flush)
324 int rc = -EACCES;
325 int xid, oplock;
326 struct cifs_sb_info *cifs_sb;
327 struct cifsTconInfo *pTcon;
328 struct cifsFileInfo *pCifsFile;
329 struct cifsInodeInfo *pCifsInode;
330 char *full_path = NULL;
331 int desiredAccess;
332 int disposition = FILE_OPEN;
333 __u16 netfid;
335 if (inode == NULL)
336 return -EBADF;
337 if (file->private_data) {
338 pCifsFile = (struct cifsFileInfo *)file->private_data;
339 } else
340 return -EBADF;
342 xid = GetXid();
343 down(&pCifsFile->fh_sem);
344 if (pCifsFile->invalidHandle == FALSE) {
345 up(&pCifsFile->fh_sem);
346 FreeXid(xid);
347 return 0;
350 if (file->f_dentry == NULL) {
351 up(&pCifsFile->fh_sem);
352 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
353 FreeXid(xid);
354 return -EBADF;
356 cifs_sb = CIFS_SB(inode->i_sb);
357 pTcon = cifs_sb->tcon;
358 /* can not grab rename sem here because various ops, including
359 those that already have the rename sem can end up causing writepage
360 to get called and if the server was down that means we end up here,
361 and we can never tell if the caller already has the rename_sem */
362 full_path = build_path_from_dentry(file->f_dentry);
363 if (full_path == NULL) {
364 up(&pCifsFile->fh_sem);
365 FreeXid(xid);
366 return -ENOMEM;
369 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
370 inode, file->f_flags,full_path));
371 desiredAccess = cifs_convert_flags(file->f_flags);
373 if (oplockEnabled)
374 oplock = REQ_OPLOCK;
375 else
376 oplock = FALSE;
378 /* Can not refresh inode by passing in file_info buf to be returned
379 by SMBOpen and then calling get_inode_info with returned buf
380 since file might have write behind data that needs to be flushed
381 and server version of file size can be stale. If we knew for sure
382 that inode was not dirty locally we could do this */
384 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
385 if (buf == 0) {
386 up(&pCifsFile->fh_sem);
387 kfree(full_path);
388 FreeXid(xid);
389 return -ENOMEM;
390 } */
391 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
392 CREATE_NOT_DIR, &netfid, &oplock, NULL,
393 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
394 CIFS_MOUNT_MAP_SPECIAL_CHR);
395 if (rc) {
396 up(&pCifsFile->fh_sem);
397 cFYI(1, ("cifs_open returned 0x%x ", rc));
398 cFYI(1, ("oplock: %d ", oplock));
399 } else {
400 pCifsFile->netfid = netfid;
401 pCifsFile->invalidHandle = FALSE;
402 up(&pCifsFile->fh_sem);
403 pCifsInode = CIFS_I(inode);
404 if (pCifsInode) {
405 if (can_flush) {
406 filemap_fdatawrite(inode->i_mapping);
407 filemap_fdatawait(inode->i_mapping);
408 /* temporarily disable caching while we
409 go to server to get inode info */
410 pCifsInode->clientCanCacheAll = FALSE;
411 pCifsInode->clientCanCacheRead = FALSE;
412 if (pTcon->ses->capabilities & CAP_UNIX)
413 rc = cifs_get_inode_info_unix(&inode,
414 full_path, inode->i_sb, xid);
415 else
416 rc = cifs_get_inode_info(&inode,
417 full_path, NULL, inode->i_sb,
418 xid);
419 } /* else we are writing out data to server already
420 and could deadlock if we tried to flush data, and
421 since we do not know if we have data that would
422 invalidate the current end of file on the server
423 we can not go to the server to get the new inod
424 info */
425 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
426 pCifsInode->clientCanCacheAll = TRUE;
427 pCifsInode->clientCanCacheRead = TRUE;
428 cFYI(1, ("Exclusive Oplock granted on inode %p",
429 file->f_dentry->d_inode));
430 } else if ((oplock & 0xF) == OPLOCK_READ) {
431 pCifsInode->clientCanCacheRead = TRUE;
432 pCifsInode->clientCanCacheAll = FALSE;
433 } else {
434 pCifsInode->clientCanCacheRead = FALSE;
435 pCifsInode->clientCanCacheAll = FALSE;
437 cifs_relock_file(pCifsFile);
441 kfree(full_path);
442 FreeXid(xid);
443 return rc;
446 int cifs_close(struct inode *inode, struct file *file)
448 int rc = 0;
449 int xid;
450 struct cifs_sb_info *cifs_sb;
451 struct cifsTconInfo *pTcon;
452 struct cifsFileInfo *pSMBFile =
453 (struct cifsFileInfo *)file->private_data;
455 xid = GetXid();
457 cifs_sb = CIFS_SB(inode->i_sb);
458 pTcon = cifs_sb->tcon;
459 if (pSMBFile) {
460 pSMBFile->closePend = TRUE;
461 write_lock(&file->f_owner.lock);
462 if (pTcon) {
463 /* no sense reconnecting to close a file that is
464 already closed */
465 if (pTcon->tidStatus != CifsNeedReconnect) {
466 write_unlock(&file->f_owner.lock);
467 rc = CIFSSMBClose(xid, pTcon,
468 pSMBFile->netfid);
469 write_lock(&file->f_owner.lock);
472 write_lock(&GlobalSMBSeslock);
473 list_del(&pSMBFile->flist);
474 list_del(&pSMBFile->tlist);
475 write_unlock(&GlobalSMBSeslock);
476 write_unlock(&file->f_owner.lock);
477 kfree(pSMBFile->search_resume_name);
478 kfree(file->private_data);
479 file->private_data = NULL;
480 } else
481 rc = -EBADF;
483 if (list_empty(&(CIFS_I(inode)->openFileList))) {
484 cFYI(1, ("closing last open instance for inode %p", inode));
485 /* if the file is not open we do not know if we can cache info
486 on this inode, much less write behind and read ahead */
487 CIFS_I(inode)->clientCanCacheRead = FALSE;
488 CIFS_I(inode)->clientCanCacheAll = FALSE;
490 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
491 rc = CIFS_I(inode)->write_behind_rc;
492 FreeXid(xid);
493 return rc;
496 int cifs_closedir(struct inode *inode, struct file *file)
498 int rc = 0;
499 int xid;
500 struct cifsFileInfo *pCFileStruct =
501 (struct cifsFileInfo *)file->private_data;
502 char *ptmp;
504 cFYI(1, ("Closedir inode = 0x%p with ", inode));
506 xid = GetXid();
508 if (pCFileStruct) {
509 struct cifsTconInfo *pTcon;
510 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
512 pTcon = cifs_sb->tcon;
514 cFYI(1, ("Freeing private data in close dir"));
515 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
516 (pCFileStruct->invalidHandle == FALSE)) {
517 pCFileStruct->invalidHandle = TRUE;
518 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
519 cFYI(1, ("Closing uncompleted readdir with rc %d",
520 rc));
521 /* not much we can do if it fails anyway, ignore rc */
522 rc = 0;
524 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
525 if (ptmp) {
526 /* BB removeme BB */ cFYI(1, ("freeing smb buf in srch struct in closedir"));
527 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
528 cifs_buf_release(ptmp);
530 ptmp = pCFileStruct->search_resume_name;
531 if (ptmp) {
532 /* BB removeme BB */ cFYI(1, ("freeing resume name in closedir"));
533 pCFileStruct->search_resume_name = NULL;
534 kfree(ptmp);
536 kfree(file->private_data);
537 file->private_data = NULL;
539 /* BB can we lock the filestruct while this is going on? */
540 FreeXid(xid);
541 return rc;
544 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
546 int rc, xid;
547 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
548 __u32 numLock = 0;
549 __u32 numUnlock = 0;
550 __u64 length;
551 int wait_flag = FALSE;
552 struct cifs_sb_info *cifs_sb;
553 struct cifsTconInfo *pTcon;
555 length = 1 + pfLock->fl_end - pfLock->fl_start;
556 rc = -EACCES;
557 xid = GetXid();
559 cFYI(1, ("Lock parm: 0x%x flockflags: "
560 "0x%x flocktype: 0x%x start: %lld end: %lld",
561 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
562 pfLock->fl_end));
564 if (pfLock->fl_flags & FL_POSIX)
565 cFYI(1, ("Posix "));
566 if (pfLock->fl_flags & FL_FLOCK)
567 cFYI(1, ("Flock "));
568 if (pfLock->fl_flags & FL_SLEEP) {
569 cFYI(1, ("Blocking lock "));
570 wait_flag = TRUE;
572 if (pfLock->fl_flags & FL_ACCESS)
573 cFYI(1, ("Process suspended by mandatory locking - "
574 "not implemented yet "));
575 if (pfLock->fl_flags & FL_LEASE)
576 cFYI(1, ("Lease on file - not implemented yet"));
577 if (pfLock->fl_flags &
578 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
579 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
581 if (pfLock->fl_type == F_WRLCK) {
582 cFYI(1, ("F_WRLCK "));
583 numLock = 1;
584 } else if (pfLock->fl_type == F_UNLCK) {
585 cFYI(1, ("F_UNLCK "));
586 numUnlock = 1;
587 } else if (pfLock->fl_type == F_RDLCK) {
588 cFYI(1, ("F_RDLCK "));
589 lockType |= LOCKING_ANDX_SHARED_LOCK;
590 numLock = 1;
591 } else if (pfLock->fl_type == F_EXLCK) {
592 cFYI(1, ("F_EXLCK "));
593 numLock = 1;
594 } else if (pfLock->fl_type == F_SHLCK) {
595 cFYI(1, ("F_SHLCK "));
596 lockType |= LOCKING_ANDX_SHARED_LOCK;
597 numLock = 1;
598 } else
599 cFYI(1, ("Unknown type of lock "));
601 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
602 pTcon = cifs_sb->tcon;
604 if (file->private_data == NULL) {
605 FreeXid(xid);
606 return -EBADF;
609 if (IS_GETLK(cmd)) {
610 rc = CIFSSMBLock(xid, pTcon,
611 ((struct cifsFileInfo *)file->
612 private_data)->netfid,
613 length,
614 pfLock->fl_start, 0, 1, lockType,
615 0 /* wait flag */ );
616 if (rc == 0) {
617 rc = CIFSSMBLock(xid, pTcon,
618 ((struct cifsFileInfo *) file->
619 private_data)->netfid,
620 length,
621 pfLock->fl_start, 1 /* numUnlock */ ,
622 0 /* numLock */ , lockType,
623 0 /* wait flag */ );
624 pfLock->fl_type = F_UNLCK;
625 if (rc != 0)
626 cERROR(1, ("Error unlocking previously locked "
627 "range %d during test of lock ",
628 rc));
629 rc = 0;
631 } else {
632 /* if rc == ERR_SHARING_VIOLATION ? */
633 rc = 0; /* do not change lock type to unlock
634 since range in use */
637 FreeXid(xid);
638 return rc;
641 rc = CIFSSMBLock(xid, pTcon,
642 ((struct cifsFileInfo *) file->private_data)->
643 netfid, length,
644 pfLock->fl_start, numUnlock, numLock, lockType,
645 wait_flag);
646 if (rc == 0 && (pfLock->fl_flags & FL_POSIX))
647 posix_lock_file_wait(file, pfLock);
648 FreeXid(xid);
649 return rc;
652 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
653 size_t write_size, loff_t *poffset)
655 int rc = 0;
656 unsigned int bytes_written = 0;
657 unsigned int total_written;
658 struct cifs_sb_info *cifs_sb;
659 struct cifsTconInfo *pTcon;
660 int xid, long_op;
661 struct cifsFileInfo *open_file;
663 if (file->f_dentry == NULL)
664 return -EBADF;
666 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
667 if (cifs_sb == NULL)
668 return -EBADF;
670 pTcon = cifs_sb->tcon;
672 /* cFYI(1,
673 (" write %d bytes to offset %lld of %s", write_size,
674 *poffset, file->f_dentry->d_name.name)); */
676 if (file->private_data == NULL)
677 return -EBADF;
678 else
679 open_file = (struct cifsFileInfo *) file->private_data;
681 xid = GetXid();
682 if (file->f_dentry->d_inode == NULL) {
683 FreeXid(xid);
684 return -EBADF;
687 if (*poffset > file->f_dentry->d_inode->i_size)
688 long_op = 2; /* writes past end of file can take a long time */
689 else
690 long_op = 1;
692 for (total_written = 0; write_size > total_written;
693 total_written += bytes_written) {
694 rc = -EAGAIN;
695 while (rc == -EAGAIN) {
696 if (file->private_data == NULL) {
697 /* file has been closed on us */
698 FreeXid(xid);
699 /* if we have gotten here we have written some data
700 and blocked, and the file has been freed on us while
701 we blocked so return what we managed to write */
702 return total_written;
704 if (open_file->closePend) {
705 FreeXid(xid);
706 if (total_written)
707 return total_written;
708 else
709 return -EBADF;
711 if (open_file->invalidHandle) {
712 if ((file->f_dentry == NULL) ||
713 (file->f_dentry->d_inode == NULL)) {
714 FreeXid(xid);
715 return total_written;
717 /* we could deadlock if we called
718 filemap_fdatawait from here so tell
719 reopen_file not to flush data to server
720 now */
721 rc = cifs_reopen_file(file->f_dentry->d_inode,
722 file, FALSE);
723 if (rc != 0)
724 break;
727 rc = CIFSSMBWrite(xid, pTcon,
728 open_file->netfid,
729 min_t(const int, cifs_sb->wsize,
730 write_size - total_written),
731 *poffset, &bytes_written,
732 NULL, write_data + total_written, long_op);
734 if (rc || (bytes_written == 0)) {
735 if (total_written)
736 break;
737 else {
738 FreeXid(xid);
739 return rc;
741 } else
742 *poffset += bytes_written;
743 long_op = FALSE; /* subsequent writes fast -
744 15 seconds is plenty */
747 #ifdef CONFIG_CIFS_STATS
748 if (total_written > 0) {
749 atomic_inc(&pTcon->num_writes);
750 spin_lock(&pTcon->stat_lock);
751 pTcon->bytes_written += total_written;
752 spin_unlock(&pTcon->stat_lock);
754 #endif
756 /* since the write may have blocked check these pointers again */
757 if (file->f_dentry) {
758 if (file->f_dentry->d_inode) {
759 struct inode *inode = file->f_dentry->d_inode;
760 inode->i_ctime = inode->i_mtime =
761 current_fs_time(inode->i_sb);
762 if (total_written > 0) {
763 if (*poffset > file->f_dentry->d_inode->i_size)
764 i_size_write(file->f_dentry->d_inode,
765 *poffset);
767 mark_inode_dirty_sync(file->f_dentry->d_inode);
770 FreeXid(xid);
771 return total_written;
774 static ssize_t cifs_write(struct file *file, const char *write_data,
775 size_t write_size, loff_t *poffset)
777 int rc = 0;
778 unsigned int bytes_written = 0;
779 unsigned int total_written;
780 struct cifs_sb_info *cifs_sb;
781 struct cifsTconInfo *pTcon;
782 int xid, long_op;
783 struct cifsFileInfo *open_file;
785 if (file->f_dentry == NULL)
786 return -EBADF;
788 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
789 if (cifs_sb == NULL)
790 return -EBADF;
792 pTcon = cifs_sb->tcon;
794 /* cFYI(1,
795 (" write %d bytes to offset %lld of %s", write_size,
796 *poffset, file->f_dentry->d_name.name)); */
798 if (file->private_data == NULL)
799 return -EBADF;
800 else
801 open_file = (struct cifsFileInfo *)file->private_data;
803 xid = GetXid();
804 if (file->f_dentry->d_inode == NULL) {
805 FreeXid(xid);
806 return -EBADF;
809 if (*poffset > file->f_dentry->d_inode->i_size)
810 long_op = 2; /* writes past end of file can take a long time */
811 else
812 long_op = 1;
814 for (total_written = 0; write_size > total_written;
815 total_written += bytes_written) {
816 rc = -EAGAIN;
817 while (rc == -EAGAIN) {
818 if (file->private_data == NULL) {
819 /* file has been closed on us */
820 FreeXid(xid);
821 /* if we have gotten here we have written some data
822 and blocked, and the file has been freed on us
823 while we blocked so return what we managed to
824 write */
825 return total_written;
827 if (open_file->closePend) {
828 FreeXid(xid);
829 if (total_written)
830 return total_written;
831 else
832 return -EBADF;
834 if (open_file->invalidHandle) {
835 if ((file->f_dentry == NULL) ||
836 (file->f_dentry->d_inode == NULL)) {
837 FreeXid(xid);
838 return total_written;
840 /* we could deadlock if we called
841 filemap_fdatawait from here so tell
842 reopen_file not to flush data to
843 server now */
844 rc = cifs_reopen_file(file->f_dentry->d_inode,
845 file, FALSE);
846 if (rc != 0)
847 break;
850 rc = CIFSSMBWrite(xid, pTcon,
851 open_file->netfid,
852 min_t(const int, cifs_sb->wsize,
853 write_size - total_written),
854 *poffset, &bytes_written,
855 write_data + total_written, NULL, long_op);
857 if (rc || (bytes_written == 0)) {
858 if (total_written)
859 break;
860 else {
861 FreeXid(xid);
862 return rc;
864 } else
865 *poffset += bytes_written;
866 long_op = FALSE; /* subsequent writes fast -
867 15 seconds is plenty */
870 #ifdef CONFIG_CIFS_STATS
871 if (total_written > 0) {
872 atomic_inc(&pTcon->num_writes);
873 spin_lock(&pTcon->stat_lock);
874 pTcon->bytes_written += total_written;
875 spin_unlock(&pTcon->stat_lock);
877 #endif
879 /* since the write may have blocked check these pointers again */
880 if (file->f_dentry) {
881 if (file->f_dentry->d_inode) {
882 file->f_dentry->d_inode->i_ctime =
883 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
884 if (total_written > 0) {
885 if (*poffset > file->f_dentry->d_inode->i_size)
886 i_size_write(file->f_dentry->d_inode,
887 *poffset);
889 mark_inode_dirty_sync(file->f_dentry->d_inode);
892 FreeXid(xid);
893 return total_written;
896 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
898 struct address_space *mapping = page->mapping;
899 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
900 char *write_data;
901 int rc = -EFAULT;
902 int bytes_written = 0;
903 struct cifs_sb_info *cifs_sb;
904 struct cifsTconInfo *pTcon;
905 struct inode *inode;
906 struct cifsInodeInfo *cifsInode;
907 struct cifsFileInfo *open_file = NULL;
908 struct list_head *tmp;
909 struct list_head *tmp1;
911 if (!mapping || !mapping->host)
912 return -EFAULT;
914 inode = page->mapping->host;
915 cifs_sb = CIFS_SB(inode->i_sb);
916 pTcon = cifs_sb->tcon;
918 offset += (loff_t)from;
919 write_data = kmap(page);
920 write_data += from;
922 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
923 kunmap(page);
924 return -EIO;
927 /* racing with truncate? */
928 if (offset > mapping->host->i_size) {
929 kunmap(page);
930 return 0; /* don't care */
933 /* check to make sure that we are not extending the file */
934 if (mapping->host->i_size - offset < (loff_t)to)
935 to = (unsigned)(mapping->host->i_size - offset);
937 cifsInode = CIFS_I(mapping->host);
938 read_lock(&GlobalSMBSeslock);
939 /* BB we should start at the end */
940 list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) {
941 open_file = list_entry(tmp, struct cifsFileInfo, flist);
942 if (open_file->closePend)
943 continue;
944 /* We check if file is open for writing first */
945 if ((open_file->pfile) &&
946 ((open_file->pfile->f_flags & O_RDWR) ||
947 (open_file->pfile->f_flags & O_WRONLY))) {
948 read_unlock(&GlobalSMBSeslock);
949 bytes_written = cifs_write(open_file->pfile,
950 write_data, to-from,
951 &offset);
952 read_lock(&GlobalSMBSeslock);
953 /* Does mm or vfs already set times? */
954 inode->i_atime =
955 inode->i_mtime = current_fs_time(inode->i_sb);
956 if ((bytes_written > 0) && (offset)) {
957 rc = 0;
958 } else if (bytes_written < 0) {
959 if (rc == -EBADF) {
960 /* have seen a case in which kernel seemed to
961 have closed/freed a file even with writes
962 active so we might as well see if there are
963 other file structs to try for the same
964 inode before giving up */
965 continue;
966 } else
967 rc = bytes_written;
969 break; /* now that we found a valid file handle and
970 tried to write to it we are done, no sense
971 continuing to loop looking for another */
973 if (tmp->next == NULL) {
974 cFYI(1, ("File instance %p removed", tmp));
975 break;
978 read_unlock(&GlobalSMBSeslock);
979 if (open_file == NULL) {
980 cFYI(1, ("No writeable filehandles for inode"));
981 rc = -EIO;
984 kunmap(page);
985 return rc;
988 #if 0
989 static int cifs_writepages(struct address_space *mapping,
990 struct writeback_control *wbc)
992 int rc = -EFAULT;
993 int xid;
995 xid = GetXid();
997 /* Find contiguous pages then iterate through repeating
998 call 16K write then Setpageuptodate or if LARGE_WRITE_X
999 support then send larger writes via kevec so as to eliminate
1000 a memcpy */
1001 FreeXid(xid);
1002 return rc;
1004 #endif
1006 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1008 int rc = -EFAULT;
1009 int xid;
1011 xid = GetXid();
1012 /* BB add check for wbc flags */
1013 page_cache_get(page);
1014 if (!PageUptodate(page)) {
1015 cFYI(1, ("ppw - page not up to date"));
1018 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1019 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1020 unlock_page(page);
1021 page_cache_release(page);
1022 FreeXid(xid);
1023 return rc;
1026 static int cifs_commit_write(struct file *file, struct page *page,
1027 unsigned offset, unsigned to)
1029 int xid;
1030 int rc = 0;
1031 struct inode *inode = page->mapping->host;
1032 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1033 char *page_data;
1035 xid = GetXid();
1036 cFYI(1, ("commit write for page %p up to position %lld for %d",
1037 page, position, to));
1038 if (position > inode->i_size) {
1039 i_size_write(inode, position);
1040 /* if (file->private_data == NULL) {
1041 rc = -EBADF;
1042 } else {
1043 open_file = (struct cifsFileInfo *)file->private_data;
1044 cifs_sb = CIFS_SB(inode->i_sb);
1045 rc = -EAGAIN;
1046 while (rc == -EAGAIN) {
1047 if ((open_file->invalidHandle) &&
1048 (!open_file->closePend)) {
1049 rc = cifs_reopen_file(
1050 file->f_dentry->d_inode, file);
1051 if (rc != 0)
1052 break;
1054 if (!open_file->closePend) {
1055 rc = CIFSSMBSetFileSize(xid,
1056 cifs_sb->tcon, position,
1057 open_file->netfid,
1058 open_file->pid, FALSE);
1059 } else {
1060 rc = -EBADF;
1061 break;
1064 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1065 } */
1067 if (!PageUptodate(page)) {
1068 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1069 /* can not rely on (or let) writepage write this data */
1070 if (to < offset) {
1071 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1072 offset, to));
1073 FreeXid(xid);
1074 return rc;
1076 /* this is probably better than directly calling
1077 partialpage_write since in this function the file handle is
1078 known which we might as well leverage */
1079 /* BB check if anything else missing out of ppw
1080 such as updating last write time */
1081 page_data = kmap(page);
1082 rc = cifs_write(file, page_data + offset, to-offset,
1083 &position);
1084 if (rc > 0)
1085 rc = 0;
1086 /* else if (rc < 0) should we set writebehind rc? */
1087 kunmap(page);
1088 } else {
1089 set_page_dirty(page);
1092 FreeXid(xid);
1093 return rc;
1096 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1098 int xid;
1099 int rc = 0;
1100 struct inode *inode = file->f_dentry->d_inode;
1102 xid = GetXid();
1104 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1105 dentry->d_name.name, datasync));
1107 rc = filemap_fdatawrite(inode->i_mapping);
1108 if (rc == 0)
1109 CIFS_I(inode)->write_behind_rc = 0;
1110 FreeXid(xid);
1111 return rc;
1114 /* static int cifs_sync_page(struct page *page)
1116 struct address_space *mapping;
1117 struct inode *inode;
1118 unsigned long index = page->index;
1119 unsigned int rpages = 0;
1120 int rc = 0;
1122 cFYI(1, ("sync page %p",page));
1123 mapping = page->mapping;
1124 if (!mapping)
1125 return 0;
1126 inode = mapping->host;
1127 if (!inode)
1128 return 0; */
1130 /* fill in rpages then
1131 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1133 /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1135 if (rc < 0)
1136 return rc;
1137 return 0;
1138 } */
1141 * As file closes, flush all cached write data for this inode checking
1142 * for write behind errors.
1144 int cifs_flush(struct file *file)
1146 struct inode * inode = file->f_dentry->d_inode;
1147 int rc = 0;
1149 /* Rather than do the steps manually:
1150 lock the inode for writing
1151 loop through pages looking for write behind data (dirty pages)
1152 coalesce into contiguous 16K (or smaller) chunks to write to server
1153 send to server (prefer in parallel)
1154 deal with writebehind errors
1155 unlock inode for writing
1156 filemapfdatawrite appears easier for the time being */
1158 rc = filemap_fdatawrite(inode->i_mapping);
1159 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1160 CIFS_I(inode)->write_behind_rc = 0;
1162 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1164 return rc;
1167 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1168 size_t read_size, loff_t *poffset)
1170 int rc = -EACCES;
1171 unsigned int bytes_read = 0;
1172 unsigned int total_read = 0;
1173 unsigned int current_read_size;
1174 struct cifs_sb_info *cifs_sb;
1175 struct cifsTconInfo *pTcon;
1176 int xid;
1177 struct cifsFileInfo *open_file;
1178 char *smb_read_data;
1179 char __user *current_offset;
1180 struct smb_com_read_rsp *pSMBr;
1182 xid = GetXid();
1183 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1184 pTcon = cifs_sb->tcon;
1186 if (file->private_data == NULL) {
1187 FreeXid(xid);
1188 return -EBADF;
1190 open_file = (struct cifsFileInfo *)file->private_data;
1192 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1193 cFYI(1, ("attempting read on write only file instance"));
1195 for (total_read = 0, current_offset = read_data;
1196 read_size > total_read;
1197 total_read += bytes_read, current_offset += bytes_read) {
1198 current_read_size = min_t(const int, read_size - total_read,
1199 cifs_sb->rsize);
1200 rc = -EAGAIN;
1201 smb_read_data = NULL;
1202 while (rc == -EAGAIN) {
1203 if ((open_file->invalidHandle) &&
1204 (!open_file->closePend)) {
1205 rc = cifs_reopen_file(file->f_dentry->d_inode,
1206 file, TRUE);
1207 if (rc != 0)
1208 break;
1211 rc = CIFSSMBRead(xid, pTcon,
1212 open_file->netfid,
1213 current_read_size, *poffset,
1214 &bytes_read, &smb_read_data);
1216 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1217 if (copy_to_user(current_offset,
1218 smb_read_data + 4 /* RFC1001 hdr */
1219 + le16_to_cpu(pSMBr->DataOffset),
1220 bytes_read)) {
1221 rc = -EFAULT;
1222 FreeXid(xid);
1223 return rc;
1225 if (smb_read_data) {
1226 cifs_buf_release(smb_read_data);
1227 smb_read_data = NULL;
1230 if (rc || (bytes_read == 0)) {
1231 if (total_read) {
1232 break;
1233 } else {
1234 FreeXid(xid);
1235 return rc;
1237 } else {
1238 #ifdef CONFIG_CIFS_STATS
1239 atomic_inc(&pTcon->num_reads);
1240 spin_lock(&pTcon->stat_lock);
1241 pTcon->bytes_read += total_read;
1242 spin_unlock(&pTcon->stat_lock);
1243 #endif
1244 *poffset += bytes_read;
1247 FreeXid(xid);
1248 return total_read;
1252 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1253 loff_t *poffset)
1255 int rc = -EACCES;
1256 unsigned int bytes_read = 0;
1257 unsigned int total_read;
1258 unsigned int current_read_size;
1259 struct cifs_sb_info *cifs_sb;
1260 struct cifsTconInfo *pTcon;
1261 int xid;
1262 char *current_offset;
1263 struct cifsFileInfo *open_file;
1265 xid = GetXid();
1266 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1267 pTcon = cifs_sb->tcon;
1269 if (file->private_data == NULL) {
1270 FreeXid(xid);
1271 return -EBADF;
1273 open_file = (struct cifsFileInfo *)file->private_data;
1275 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1276 cFYI(1, ("attempting read on write only file instance"));
1278 for (total_read = 0, current_offset = read_data;
1279 read_size > total_read;
1280 total_read += bytes_read, current_offset += bytes_read) {
1281 current_read_size = min_t(const int, read_size - total_read,
1282 cifs_sb->rsize);
1283 rc = -EAGAIN;
1284 while (rc == -EAGAIN) {
1285 if ((open_file->invalidHandle) &&
1286 (!open_file->closePend)) {
1287 rc = cifs_reopen_file(file->f_dentry->d_inode,
1288 file, TRUE);
1289 if (rc != 0)
1290 break;
1293 rc = CIFSSMBRead(xid, pTcon,
1294 open_file->netfid,
1295 current_read_size, *poffset,
1296 &bytes_read, &current_offset);
1298 if (rc || (bytes_read == 0)) {
1299 if (total_read) {
1300 break;
1301 } else {
1302 FreeXid(xid);
1303 return rc;
1305 } else {
1306 #ifdef CONFIG_CIFS_STATS
1307 atomic_inc(&pTcon->num_reads);
1308 spin_lock(&pTcon->stat_lock);
1309 pTcon->bytes_read += total_read;
1310 spin_unlock(&pTcon->stat_lock);
1311 #endif
1312 *poffset += bytes_read;
1315 FreeXid(xid);
1316 return total_read;
1319 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1321 struct dentry *dentry = file->f_dentry;
1322 int rc, xid;
1324 xid = GetXid();
1325 rc = cifs_revalidate(dentry);
1326 if (rc) {
1327 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1328 FreeXid(xid);
1329 return rc;
1331 rc = generic_file_mmap(file, vma);
1332 FreeXid(xid);
1333 return rc;
1337 static void cifs_copy_cache_pages(struct address_space *mapping,
1338 struct list_head *pages, int bytes_read, char *data,
1339 struct pagevec *plru_pvec)
1341 struct page *page;
1342 char *target;
1344 while (bytes_read > 0) {
1345 if (list_empty(pages))
1346 break;
1348 page = list_entry(pages->prev, struct page, lru);
1349 list_del(&page->lru);
1351 if (add_to_page_cache(page, mapping, page->index,
1352 GFP_KERNEL)) {
1353 page_cache_release(page);
1354 cFYI(1, ("Add page cache failed"));
1355 data += PAGE_CACHE_SIZE;
1356 bytes_read -= PAGE_CACHE_SIZE;
1357 continue;
1360 target = kmap_atomic(page,KM_USER0);
1362 if (PAGE_CACHE_SIZE > bytes_read) {
1363 memcpy(target, data, bytes_read);
1364 /* zero the tail end of this partial page */
1365 memset(target + bytes_read, 0,
1366 PAGE_CACHE_SIZE - bytes_read);
1367 bytes_read = 0;
1368 } else {
1369 memcpy(target, data, PAGE_CACHE_SIZE);
1370 bytes_read -= PAGE_CACHE_SIZE;
1372 kunmap_atomic(target, KM_USER0);
1374 flush_dcache_page(page);
1375 SetPageUptodate(page);
1376 unlock_page(page);
1377 if (!pagevec_add(plru_pvec, page))
1378 __pagevec_lru_add(plru_pvec);
1379 data += PAGE_CACHE_SIZE;
1381 return;
1384 static int cifs_readpages(struct file *file, struct address_space *mapping,
1385 struct list_head *page_list, unsigned num_pages)
1387 int rc = -EACCES;
1388 int xid;
1389 loff_t offset;
1390 struct page *page;
1391 struct cifs_sb_info *cifs_sb;
1392 struct cifsTconInfo *pTcon;
1393 int bytes_read = 0;
1394 unsigned int read_size,i;
1395 char *smb_read_data = NULL;
1396 struct smb_com_read_rsp *pSMBr;
1397 struct pagevec lru_pvec;
1398 struct cifsFileInfo *open_file;
1400 xid = GetXid();
1401 if (file->private_data == NULL) {
1402 FreeXid(xid);
1403 return -EBADF;
1405 open_file = (struct cifsFileInfo *)file->private_data;
1406 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1407 pTcon = cifs_sb->tcon;
1409 pagevec_init(&lru_pvec, 0);
1411 for (i = 0; i < num_pages; ) {
1412 unsigned contig_pages;
1413 struct page *tmp_page;
1414 unsigned long expected_index;
1416 if (list_empty(page_list))
1417 break;
1419 page = list_entry(page_list->prev, struct page, lru);
1420 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1422 /* count adjacent pages that we will read into */
1423 contig_pages = 0;
1424 expected_index =
1425 list_entry(page_list->prev, struct page, lru)->index;
1426 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1427 if (tmp_page->index == expected_index) {
1428 contig_pages++;
1429 expected_index++;
1430 } else
1431 break;
1433 if (contig_pages + i > num_pages)
1434 contig_pages = num_pages - i;
1436 /* for reads over a certain size could initiate async
1437 read ahead */
1439 read_size = contig_pages * PAGE_CACHE_SIZE;
1440 /* Read size needs to be in multiples of one page */
1441 read_size = min_t(const unsigned int, read_size,
1442 cifs_sb->rsize & PAGE_CACHE_MASK);
1444 rc = -EAGAIN;
1445 while (rc == -EAGAIN) {
1446 if ((open_file->invalidHandle) &&
1447 (!open_file->closePend)) {
1448 rc = cifs_reopen_file(file->f_dentry->d_inode,
1449 file, TRUE);
1450 if (rc != 0)
1451 break;
1454 rc = CIFSSMBRead(xid, pTcon,
1455 open_file->netfid,
1456 read_size, offset,
1457 &bytes_read, &smb_read_data);
1458 /* BB need to check return code here */
1459 if (rc== -EAGAIN) {
1460 if (smb_read_data) {
1461 cifs_buf_release(smb_read_data);
1462 smb_read_data = NULL;
1466 if ((rc < 0) || (smb_read_data == NULL)) {
1467 cFYI(1, ("Read error in readpages: %d", rc));
1468 /* clean up remaing pages off list */
1469 while (!list_empty(page_list) && (i < num_pages)) {
1470 page = list_entry(page_list->prev, struct page,
1471 lru);
1472 list_del(&page->lru);
1473 page_cache_release(page);
1475 break;
1476 } else if (bytes_read > 0) {
1477 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1478 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1479 smb_read_data + 4 /* RFC1001 hdr */ +
1480 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1482 i += bytes_read >> PAGE_CACHE_SHIFT;
1483 #ifdef CONFIG_CIFS_STATS
1484 atomic_inc(&pTcon->num_reads);
1485 spin_lock(&pTcon->stat_lock);
1486 pTcon->bytes_read += bytes_read;
1487 spin_unlock(&pTcon->stat_lock);
1488 #endif
1489 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1490 i++; /* account for partial page */
1492 /* server copy of file can have smaller size
1493 than client */
1494 /* BB do we need to verify this common case ?
1495 this case is ok - if we are at server EOF
1496 we will hit it on next read */
1498 /* while (!list_empty(page_list) && (i < num_pages)) {
1499 page = list_entry(page_list->prev,
1500 struct page, list);
1501 list_del(&page->list);
1502 page_cache_release(page);
1504 break; */
1506 } else {
1507 cFYI(1, ("No bytes read (%d) at offset %lld . "
1508 "Cleaning remaining pages from readahead list",
1509 bytes_read, offset));
1510 /* BB turn off caching and do new lookup on
1511 file size at server? */
1512 while (!list_empty(page_list) && (i < num_pages)) {
1513 page = list_entry(page_list->prev, struct page,
1514 lru);
1515 list_del(&page->lru);
1517 /* BB removeme - replace with zero of page? */
1518 page_cache_release(page);
1520 break;
1522 if (smb_read_data) {
1523 cifs_buf_release(smb_read_data);
1524 smb_read_data = NULL;
1526 bytes_read = 0;
1529 pagevec_lru_add(&lru_pvec);
1531 /* need to free smb_read_data buf before exit */
1532 if (smb_read_data) {
1533 cifs_buf_release(smb_read_data);
1534 smb_read_data = NULL;
1537 FreeXid(xid);
1538 return rc;
1541 static int cifs_readpage_worker(struct file *file, struct page *page,
1542 loff_t *poffset)
1544 char *read_data;
1545 int rc;
1547 page_cache_get(page);
1548 read_data = kmap(page);
1549 /* for reads over a certain size could initiate async read ahead */
1551 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1553 if (rc < 0)
1554 goto io_error;
1555 else
1556 cFYI(1, ("Bytes read %d ",rc));
1558 file->f_dentry->d_inode->i_atime =
1559 current_fs_time(file->f_dentry->d_inode->i_sb);
1561 if (PAGE_CACHE_SIZE > rc)
1562 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1564 flush_dcache_page(page);
1565 SetPageUptodate(page);
1566 rc = 0;
1568 io_error:
1569 kunmap(page);
1570 page_cache_release(page);
1571 return rc;
1574 static int cifs_readpage(struct file *file, struct page *page)
1576 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1577 int rc = -EACCES;
1578 int xid;
1580 xid = GetXid();
1582 if (file->private_data == NULL) {
1583 FreeXid(xid);
1584 return -EBADF;
1587 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1588 page, (int)offset, (int)offset));
1590 rc = cifs_readpage_worker(file, page, &offset);
1592 unlock_page(page);
1594 FreeXid(xid);
1595 return rc;
1598 /* We do not want to update the file size from server for inodes
1599 open for write - to avoid races with writepage extending
1600 the file - in the future we could consider allowing
1601 refreshing the inode only on increases in the file size
1602 but this is tricky to do without racing with writebehind
1603 page caching in the current Linux kernel design */
1604 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1606 struct list_head *tmp;
1607 struct list_head *tmp1;
1608 struct cifsFileInfo *open_file = NULL;
1609 int rc = TRUE;
1611 if (cifsInode == NULL)
1612 return rc;
1614 read_lock(&GlobalSMBSeslock);
1615 list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) {
1616 open_file = list_entry(tmp, struct cifsFileInfo, flist);
1617 if (open_file == NULL)
1618 break;
1619 if (open_file->closePend)
1620 continue;
1621 /* We check if file is open for writing,
1622 BB we could supplement this with a check to see if file size
1623 changes have been flushed to server - ie inode metadata dirty */
1624 if ((open_file->pfile) &&
1625 ((open_file->pfile->f_flags & O_RDWR) ||
1626 (open_file->pfile->f_flags & O_WRONLY))) {
1627 rc = FALSE;
1628 break;
1630 if (tmp->next == NULL) {
1631 cFYI(1, ("File instance %p removed", tmp));
1632 break;
1635 read_unlock(&GlobalSMBSeslock);
1636 return rc;
1640 static int cifs_prepare_write(struct file *file, struct page *page,
1641 unsigned from, unsigned to)
1643 int rc = 0;
1644 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1645 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1646 if (!PageUptodate(page)) {
1647 /* if (to - from != PAGE_CACHE_SIZE) {
1648 void *kaddr = kmap_atomic(page, KM_USER0);
1649 memset(kaddr, 0, from);
1650 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1651 flush_dcache_page(page);
1652 kunmap_atomic(kaddr, KM_USER0);
1653 } */
1654 /* If we are writing a full page it will be up to date,
1655 no need to read from the server */
1656 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1657 SetPageUptodate(page);
1659 /* might as well read a page, it is fast enough */
1660 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1661 rc = cifs_readpage_worker(file, page, &offset);
1662 } else {
1663 /* should we try using another file handle if there is one -
1664 how would we lock it to prevent close of that handle
1665 racing with this read?
1666 In any case this will be written out by commit_write */
1670 /* BB should we pass any errors back?
1671 e.g. if we do not have read access to the file */
1672 return 0;
1675 struct address_space_operations cifs_addr_ops = {
1676 .readpage = cifs_readpage,
1677 .readpages = cifs_readpages,
1678 .writepage = cifs_writepage,
1679 .prepare_write = cifs_prepare_write,
1680 .commit_write = cifs_commit_write,
1681 .set_page_dirty = __set_page_dirty_nobuffers,
1682 /* .sync_page = cifs_sync_page, */
1683 /* .direct_IO = */