2 * linux/fs/nfs/direct.c
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6 * High-performance uncached I/O for the Linux NFS client
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
37 * 04 May 2005 support O_DIRECT with aio --cel
41 #include <linux/config.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/smp_lock.h>
46 #include <linux/file.h>
47 #include <linux/pagemap.h>
48 #include <linux/kref.h>
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
54 #include <asm/system.h>
55 #include <asm/uaccess.h>
56 #include <asm/atomic.h>
60 #define NFSDBG_FACILITY NFSDBG_VFS
62 static kmem_cache_t
*nfs_direct_cachep
;
65 * This represents a set of asynchronous requests that we're waiting on
67 struct nfs_direct_req
{
68 struct kref kref
; /* release manager */
71 struct list_head list
, /* nfs_read/write_data structs */
72 rewrite_list
; /* saved nfs_write_data structs */
73 struct nfs_open_context
*ctx
; /* file open context info */
74 struct kiocb
* iocb
; /* controlling i/o request */
75 struct inode
* inode
; /* target file of i/o */
76 unsigned long user_addr
; /* location of user's buffer */
77 size_t user_count
; /* total bytes to move */
78 loff_t pos
; /* starting offset in file */
79 struct page
** pages
; /* pages in our buffer */
80 unsigned int npages
; /* count of pages */
82 /* completion state */
83 spinlock_t lock
; /* protect completion state */
84 int outstanding
; /* i/os we're waiting for */
85 ssize_t count
, /* bytes actually processed */
86 error
; /* any reported error */
87 struct completion completion
; /* wait for i/o completion */
90 struct nfs_write_data
* commit_data
; /* special write_data for commits */
92 #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
93 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
94 struct nfs_writeverf verf
; /* unstable write verifier */
97 static void nfs_direct_write_schedule(struct nfs_direct_req
*dreq
, int sync
);
98 static void nfs_direct_write_complete(struct nfs_direct_req
*dreq
, struct inode
*inode
);
101 * nfs_direct_IO - NFS address space operation for direct I/O
102 * @rw: direction (read or write)
103 * @iocb: target I/O control block
104 * @iov: array of vectors that define I/O buffer
105 * @pos: offset in file to begin the operation
106 * @nr_segs: size of iovec array
108 * The presence of this routine in the address space ops vector means
109 * the NFS client supports direct I/O. However, we shunt off direct
110 * read and write requests before the VFS gets them, so this method
111 * should never be called.
113 ssize_t
nfs_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
, loff_t pos
, unsigned long nr_segs
)
115 struct dentry
*dentry
= iocb
->ki_filp
->f_dentry
;
117 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
118 dentry
->d_name
.name
, (long long) pos
, nr_segs
);
123 static void nfs_free_user_pages(struct page
**pages
, int npages
, int do_dirty
)
126 for (i
= 0; i
< npages
; i
++) {
127 struct page
*page
= pages
[i
];
128 if (do_dirty
&& !PageCompound(page
))
129 set_page_dirty_lock(page
);
130 page_cache_release(page
);
135 static inline int nfs_get_user_pages(int rw
, unsigned long user_addr
, size_t size
, struct page
***pages
)
137 int result
= -ENOMEM
;
138 unsigned long page_count
;
141 page_count
= (user_addr
+ size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
142 page_count
-= user_addr
>> PAGE_SHIFT
;
144 array_size
= (page_count
* sizeof(struct page
*));
145 *pages
= kmalloc(array_size
, GFP_KERNEL
);
147 down_read(¤t
->mm
->mmap_sem
);
148 result
= get_user_pages(current
, current
->mm
, user_addr
,
149 page_count
, (rw
== READ
), 0,
151 up_read(¤t
->mm
->mmap_sem
);
152 if (result
!= page_count
) {
154 * If we got fewer pages than expected from
155 * get_user_pages(), the user buffer runs off the
156 * end of a mapping; return EFAULT.
159 nfs_free_user_pages(*pages
, result
, 0);
169 static inline struct nfs_direct_req
*nfs_direct_req_alloc(void)
171 struct nfs_direct_req
*dreq
;
173 dreq
= kmem_cache_alloc(nfs_direct_cachep
, SLAB_KERNEL
);
177 kref_init(&dreq
->kref
);
178 init_completion(&dreq
->completion
);
179 INIT_LIST_HEAD(&dreq
->list
);
180 INIT_LIST_HEAD(&dreq
->rewrite_list
);
183 spin_lock_init(&dreq
->lock
);
184 dreq
->outstanding
= 0;
192 static void nfs_direct_req_release(struct kref
*kref
)
194 struct nfs_direct_req
*dreq
= container_of(kref
, struct nfs_direct_req
, kref
);
196 if (dreq
->ctx
!= NULL
)
197 put_nfs_open_context(dreq
->ctx
);
198 kmem_cache_free(nfs_direct_cachep
, dreq
);
202 * Collects and returns the final error value/byte-count.
204 static ssize_t
nfs_direct_wait(struct nfs_direct_req
*dreq
)
206 ssize_t result
= -EIOCBQUEUED
;
208 /* Async requests don't wait here */
212 result
= wait_for_completion_interruptible(&dreq
->completion
);
215 result
= dreq
->error
;
217 result
= dreq
->count
;
220 kref_put(&dreq
->kref
, nfs_direct_req_release
);
221 return (ssize_t
) result
;
225 * We must hold a reference to all the pages in this direct read request
226 * until the RPCs complete. This could be long *after* we are woken up in
227 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
229 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
230 * can't trust the iocb is still valid here if this is a synchronous
231 * request. If the waiter is woken prematurely, the iocb is long gone.
233 static void nfs_direct_complete(struct nfs_direct_req
*dreq
)
235 nfs_free_user_pages(dreq
->pages
, dreq
->npages
, 1);
238 long res
= (long) dreq
->error
;
240 res
= (long) dreq
->count
;
241 aio_complete(dreq
->iocb
, res
, 0);
243 complete_all(&dreq
->completion
);
245 kref_put(&dreq
->kref
, nfs_direct_req_release
);
249 * Note we also set the number of requests we have in the dreq when we are
250 * done. This prevents races with I/O completion so we will always wait
251 * until all requests have been dispatched and completed.
253 static struct nfs_direct_req
*nfs_direct_read_alloc(size_t nbytes
, size_t rsize
)
255 struct list_head
*list
;
256 struct nfs_direct_req
*dreq
;
257 unsigned int rpages
= (rsize
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
259 dreq
= nfs_direct_req_alloc();
265 struct nfs_read_data
*data
= nfs_readdata_alloc(rpages
);
267 if (unlikely(!data
)) {
268 while (!list_empty(list
)) {
269 data
= list_entry(list
->next
,
270 struct nfs_read_data
, pages
);
271 list_del(&data
->pages
);
272 nfs_readdata_free(data
);
274 kref_put(&dreq
->kref
, nfs_direct_req_release
);
278 INIT_LIST_HEAD(&data
->pages
);
279 list_add(&data
->pages
, list
);
281 data
->req
= (struct nfs_page
*) dreq
;
287 kref_get(&dreq
->kref
);
291 static void nfs_direct_read_result(struct rpc_task
*task
, void *calldata
)
293 struct nfs_read_data
*data
= calldata
;
294 struct nfs_direct_req
*dreq
= (struct nfs_direct_req
*) data
->req
;
296 if (nfs_readpage_result(task
, data
) != 0)
299 spin_lock(&dreq
->lock
);
301 if (likely(task
->tk_status
>= 0))
302 dreq
->count
+= data
->res
.count
;
304 dreq
->error
= task
->tk_status
;
306 if (--dreq
->outstanding
) {
307 spin_unlock(&dreq
->lock
);
311 spin_unlock(&dreq
->lock
);
312 nfs_direct_complete(dreq
);
315 static const struct rpc_call_ops nfs_read_direct_ops
= {
316 .rpc_call_done
= nfs_direct_read_result
,
317 .rpc_release
= nfs_readdata_release
,
321 * For each nfs_read_data struct that was allocated on the list, dispatch
322 * an NFS READ operation
324 static void nfs_direct_read_schedule(struct nfs_direct_req
*dreq
)
326 struct nfs_open_context
*ctx
= dreq
->ctx
;
327 struct inode
*inode
= ctx
->dentry
->d_inode
;
328 struct list_head
*list
= &dreq
->list
;
329 struct page
**pages
= dreq
->pages
;
330 size_t count
= dreq
->user_count
;
331 loff_t pos
= dreq
->pos
;
332 size_t rsize
= NFS_SERVER(inode
)->rsize
;
333 unsigned int curpage
, pgbase
;
336 pgbase
= dreq
->user_addr
& ~PAGE_MASK
;
338 struct nfs_read_data
*data
;
345 BUG_ON(list_empty(list
));
346 data
= list_entry(list
->next
, struct nfs_read_data
, pages
);
347 list_del_init(&data
->pages
);
350 data
->cred
= ctx
->cred
;
351 data
->args
.fh
= NFS_FH(inode
);
352 data
->args
.context
= ctx
;
353 data
->args
.offset
= pos
;
354 data
->args
.pgbase
= pgbase
;
355 data
->args
.pages
= &pages
[curpage
];
356 data
->args
.count
= bytes
;
357 data
->res
.fattr
= &data
->fattr
;
359 data
->res
.count
= bytes
;
361 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), RPC_TASK_ASYNC
,
362 &nfs_read_direct_ops
, data
);
363 NFS_PROTO(inode
)->read_setup(data
);
365 data
->task
.tk_cookie
= (unsigned long) inode
;
368 rpc_execute(&data
->task
);
371 dfprintk(VFS
, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
374 (long long)NFS_FILEID(inode
),
376 (unsigned long long)data
->args
.offset
);
380 curpage
+= pgbase
>> PAGE_SHIFT
;
381 pgbase
&= ~PAGE_MASK
;
384 } while (count
!= 0);
385 BUG_ON(!list_empty(list
));
388 static ssize_t
nfs_direct_read(struct kiocb
*iocb
, unsigned long user_addr
, size_t count
, loff_t pos
, struct page
**pages
, unsigned int nr_pages
)
392 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
393 struct rpc_clnt
*clnt
= NFS_CLIENT(inode
);
394 struct nfs_direct_req
*dreq
;
396 dreq
= nfs_direct_read_alloc(count
, NFS_SERVER(inode
)->rsize
);
400 dreq
->user_addr
= user_addr
;
401 dreq
->user_count
= count
;
404 dreq
->npages
= nr_pages
;
406 dreq
->ctx
= get_nfs_open_context((struct nfs_open_context
*)iocb
->ki_filp
->private_data
);
407 if (!is_sync_kiocb(iocb
))
410 nfs_add_stats(inode
, NFSIOS_DIRECTREADBYTES
, count
);
411 rpc_clnt_sigmask(clnt
, &oldset
);
412 nfs_direct_read_schedule(dreq
);
413 result
= nfs_direct_wait(dreq
);
414 rpc_clnt_sigunmask(clnt
, &oldset
);
419 static void nfs_direct_free_writedata(struct nfs_direct_req
*dreq
)
421 list_splice_init(&dreq
->rewrite_list
, &dreq
->list
);
422 while (!list_empty(&dreq
->list
)) {
423 struct nfs_write_data
*data
= list_entry(dreq
->list
.next
, struct nfs_write_data
, pages
);
424 list_del(&data
->pages
);
425 nfs_writedata_release(data
);
429 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
430 static void nfs_direct_write_reschedule(struct nfs_direct_req
*dreq
)
432 struct list_head
*pos
;
434 list_splice_init(&dreq
->rewrite_list
, &dreq
->list
);
435 list_for_each(pos
, &dreq
->list
)
439 nfs_direct_write_schedule(dreq
, FLUSH_STABLE
);
442 static void nfs_direct_commit_result(struct rpc_task
*task
, void *calldata
)
444 struct nfs_write_data
*data
= calldata
;
445 struct nfs_direct_req
*dreq
= (struct nfs_direct_req
*) data
->req
;
447 /* Call the NFS version-specific code */
448 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
450 if (unlikely(task
->tk_status
< 0)) {
451 dreq
->error
= task
->tk_status
;
452 dreq
->flags
= NFS_ODIRECT_RESCHED_WRITES
;
454 if (memcmp(&dreq
->verf
, &data
->verf
, sizeof(data
->verf
))) {
455 dprintk("NFS: %5u commit verify failed\n", task
->tk_pid
);
456 dreq
->flags
= NFS_ODIRECT_RESCHED_WRITES
;
459 dprintk("NFS: %5u commit returned %d\n", task
->tk_pid
, task
->tk_status
);
460 nfs_direct_write_complete(dreq
, data
->inode
);
463 static const struct rpc_call_ops nfs_commit_direct_ops
= {
464 .rpc_call_done
= nfs_direct_commit_result
,
465 .rpc_release
= nfs_commit_release
,
468 static void nfs_direct_commit_schedule(struct nfs_direct_req
*dreq
)
470 struct nfs_write_data
*data
= dreq
->commit_data
;
471 struct rpc_task
*task
= &data
->task
;
473 data
->inode
= dreq
->inode
;
474 data
->cred
= dreq
->ctx
->cred
;
476 data
->args
.fh
= NFS_FH(data
->inode
);
477 data
->args
.offset
= dreq
->pos
;
478 data
->args
.count
= dreq
->user_count
;
480 data
->res
.fattr
= &data
->fattr
;
481 data
->res
.verf
= &data
->verf
;
483 rpc_init_task(&data
->task
, NFS_CLIENT(dreq
->inode
), RPC_TASK_ASYNC
,
484 &nfs_commit_direct_ops
, data
);
485 NFS_PROTO(data
->inode
)->commit_setup(data
, 0);
487 data
->task
.tk_priority
= RPC_PRIORITY_NORMAL
;
488 data
->task
.tk_cookie
= (unsigned long)data
->inode
;
489 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
490 dreq
->commit_data
= NULL
;
492 dprintk("NFS: %5u initiated commit call\n", task
->tk_pid
);
495 rpc_execute(&data
->task
);
499 static void nfs_direct_write_complete(struct nfs_direct_req
*dreq
, struct inode
*inode
)
501 int flags
= dreq
->flags
;
505 case NFS_ODIRECT_DO_COMMIT
:
506 nfs_direct_commit_schedule(dreq
);
508 case NFS_ODIRECT_RESCHED_WRITES
:
509 nfs_direct_write_reschedule(dreq
);
512 nfs_end_data_update(inode
);
513 if (dreq
->commit_data
!= NULL
)
514 nfs_commit_free(dreq
->commit_data
);
515 nfs_direct_free_writedata(dreq
);
516 nfs_direct_complete(dreq
);
520 static void nfs_alloc_commit_data(struct nfs_direct_req
*dreq
)
522 dreq
->commit_data
= nfs_commit_alloc(0);
523 if (dreq
->commit_data
!= NULL
)
524 dreq
->commit_data
->req
= (struct nfs_page
*) dreq
;
527 static inline void nfs_alloc_commit_data(struct nfs_direct_req
*dreq
)
529 dreq
->commit_data
= NULL
;
532 static void nfs_direct_write_complete(struct nfs_direct_req
*dreq
, struct inode
*inode
)
534 nfs_end_data_update(inode
);
535 nfs_direct_free_writedata(dreq
);
536 nfs_direct_complete(dreq
);
540 static struct nfs_direct_req
*nfs_direct_write_alloc(size_t nbytes
, size_t wsize
)
542 struct list_head
*list
;
543 struct nfs_direct_req
*dreq
;
544 unsigned int wpages
= (wsize
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
546 dreq
= nfs_direct_req_alloc();
552 struct nfs_write_data
*data
= nfs_writedata_alloc(wpages
);
554 if (unlikely(!data
)) {
555 while (!list_empty(list
)) {
556 data
= list_entry(list
->next
,
557 struct nfs_write_data
, pages
);
558 list_del(&data
->pages
);
559 nfs_writedata_free(data
);
561 kref_put(&dreq
->kref
, nfs_direct_req_release
);
565 INIT_LIST_HEAD(&data
->pages
);
566 list_add(&data
->pages
, list
);
568 data
->req
= (struct nfs_page
*) dreq
;
575 nfs_alloc_commit_data(dreq
);
577 kref_get(&dreq
->kref
);
581 static void nfs_direct_write_result(struct rpc_task
*task
, void *calldata
)
583 struct nfs_write_data
*data
= calldata
;
584 struct nfs_direct_req
*dreq
= (struct nfs_direct_req
*) data
->req
;
585 int status
= task
->tk_status
;
587 if (nfs_writeback_done(task
, data
) != 0)
590 spin_lock(&dreq
->lock
);
592 if (likely(status
>= 0))
593 dreq
->count
+= data
->res
.count
;
595 dreq
->error
= task
->tk_status
;
597 if (data
->res
.verf
->committed
!= NFS_FILE_SYNC
) {
598 switch (dreq
->flags
) {
600 memcpy(&dreq
->verf
, &data
->verf
, sizeof(dreq
->verf
));
601 dreq
->flags
= NFS_ODIRECT_DO_COMMIT
;
603 case NFS_ODIRECT_DO_COMMIT
:
604 if (memcmp(&dreq
->verf
, &data
->verf
, sizeof(dreq
->verf
))) {
605 dprintk("NFS: %5u write verify failed\n", task
->tk_pid
);
606 dreq
->flags
= NFS_ODIRECT_RESCHED_WRITES
;
610 /* In case we have to resend */
611 data
->args
.stable
= NFS_FILE_SYNC
;
613 spin_unlock(&dreq
->lock
);
617 * NB: Return the value of the first error return code. Subsequent
618 * errors after the first one are ignored.
620 static void nfs_direct_write_release(void *calldata
)
622 struct nfs_write_data
*data
= calldata
;
623 struct nfs_direct_req
*dreq
= (struct nfs_direct_req
*) data
->req
;
625 spin_lock(&dreq
->lock
);
626 if (--dreq
->outstanding
) {
627 spin_unlock(&dreq
->lock
);
630 spin_unlock(&dreq
->lock
);
632 nfs_direct_write_complete(dreq
, data
->inode
);
635 static const struct rpc_call_ops nfs_write_direct_ops
= {
636 .rpc_call_done
= nfs_direct_write_result
,
637 .rpc_release
= nfs_direct_write_release
,
641 * For each nfs_write_data struct that was allocated on the list, dispatch
642 * an NFS WRITE operation
644 static void nfs_direct_write_schedule(struct nfs_direct_req
*dreq
, int sync
)
646 struct nfs_open_context
*ctx
= dreq
->ctx
;
647 struct inode
*inode
= ctx
->dentry
->d_inode
;
648 struct list_head
*list
= &dreq
->list
;
649 struct page
**pages
= dreq
->pages
;
650 size_t count
= dreq
->user_count
;
651 loff_t pos
= dreq
->pos
;
652 size_t wsize
= NFS_SERVER(inode
)->wsize
;
653 unsigned int curpage
, pgbase
;
656 pgbase
= dreq
->user_addr
& ~PAGE_MASK
;
658 struct nfs_write_data
*data
;
665 BUG_ON(list_empty(list
));
666 data
= list_entry(list
->next
, struct nfs_write_data
, pages
);
667 list_move_tail(&data
->pages
, &dreq
->rewrite_list
);
670 data
->cred
= ctx
->cred
;
671 data
->args
.fh
= NFS_FH(inode
);
672 data
->args
.context
= ctx
;
673 data
->args
.offset
= pos
;
674 data
->args
.pgbase
= pgbase
;
675 data
->args
.pages
= &pages
[curpage
];
676 data
->args
.count
= bytes
;
677 data
->res
.fattr
= &data
->fattr
;
678 data
->res
.count
= bytes
;
679 data
->res
.verf
= &data
->verf
;
681 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), RPC_TASK_ASYNC
,
682 &nfs_write_direct_ops
, data
);
683 NFS_PROTO(inode
)->write_setup(data
, sync
);
685 data
->task
.tk_priority
= RPC_PRIORITY_NORMAL
;
686 data
->task
.tk_cookie
= (unsigned long) inode
;
689 rpc_execute(&data
->task
);
692 dfprintk(VFS
, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
695 (long long)NFS_FILEID(inode
),
697 (unsigned long long)data
->args
.offset
);
701 curpage
+= pgbase
>> PAGE_SHIFT
;
702 pgbase
&= ~PAGE_MASK
;
705 } while (count
!= 0);
706 BUG_ON(!list_empty(list
));
709 static ssize_t
nfs_direct_write(struct kiocb
*iocb
, unsigned long user_addr
, size_t count
, loff_t pos
, struct page
**pages
, int nr_pages
)
713 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
714 struct rpc_clnt
*clnt
= NFS_CLIENT(inode
);
715 struct nfs_direct_req
*dreq
;
716 size_t wsize
= NFS_SERVER(inode
)->wsize
;
719 dreq
= nfs_direct_write_alloc(count
, wsize
);
722 if (dreq
->commit_data
== NULL
|| count
< wsize
)
725 dreq
->user_addr
= user_addr
;
726 dreq
->user_count
= count
;
729 dreq
->npages
= nr_pages
;
731 dreq
->ctx
= get_nfs_open_context((struct nfs_open_context
*)iocb
->ki_filp
->private_data
);
732 if (!is_sync_kiocb(iocb
))
735 nfs_add_stats(inode
, NFSIOS_DIRECTWRITTENBYTES
, count
);
737 nfs_begin_data_update(inode
);
739 rpc_clnt_sigmask(clnt
, &oldset
);
740 nfs_direct_write_schedule(dreq
, sync
);
741 result
= nfs_direct_wait(dreq
);
742 rpc_clnt_sigunmask(clnt
, &oldset
);
748 * nfs_file_direct_read - file direct read operation for NFS files
749 * @iocb: target I/O control block
750 * @buf: user's buffer into which to read data
751 * @count: number of bytes to read
752 * @pos: byte offset in file where reading starts
754 * We use this function for direct reads instead of calling
755 * generic_file_aio_read() in order to avoid gfar's check to see if
756 * the request starts before the end of the file. For that check
757 * to work, we must generate a GETATTR before each direct read, and
758 * even then there is a window between the GETATTR and the subsequent
759 * READ where the file size could change. Our preference is simply
760 * to do all reads the application wants, and the server will take
761 * care of managing the end of file boundary.
763 * This function also eliminates unnecessarily updating the file's
764 * atime locally, as the NFS server sets the file's atime, and this
765 * client must read the updated atime from the server back into its
768 ssize_t
nfs_file_direct_read(struct kiocb
*iocb
, char __user
*buf
, size_t count
, loff_t pos
)
770 ssize_t retval
= -EINVAL
;
773 struct file
*file
= iocb
->ki_filp
;
774 struct address_space
*mapping
= file
->f_mapping
;
776 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
777 file
->f_dentry
->d_parent
->d_name
.name
,
778 file
->f_dentry
->d_name
.name
,
779 (unsigned long) count
, (long long) pos
);
784 if (!access_ok(VERIFY_WRITE
, buf
, count
))
790 retval
= nfs_sync_mapping(mapping
);
794 retval
= nfs_get_user_pages(READ
, (unsigned long) buf
,
800 retval
= nfs_direct_read(iocb
, (unsigned long) buf
, count
, pos
,
803 iocb
->ki_pos
= pos
+ retval
;
810 * nfs_file_direct_write - file direct write operation for NFS files
811 * @iocb: target I/O control block
812 * @buf: user's buffer from which to write data
813 * @count: number of bytes to write
814 * @pos: byte offset in file where writing starts
816 * We use this function for direct writes instead of calling
817 * generic_file_aio_write() in order to avoid taking the inode
818 * semaphore and updating the i_size. The NFS server will set
819 * the new i_size and this client must read the updated size
820 * back into its cache. We let the server do generic write
821 * parameter checking and report problems.
823 * We also avoid an unnecessary invocation of generic_osync_inode(),
824 * as it is fairly meaningless to sync the metadata of an NFS file.
826 * We eliminate local atime updates, see direct read above.
828 * We avoid unnecessary page cache invalidations for normal cached
829 * readers of this file.
831 * Note that O_APPEND is not supported for NFS direct writes, as there
832 * is no atomic O_APPEND write facility in the NFS protocol.
834 ssize_t
nfs_file_direct_write(struct kiocb
*iocb
, const char __user
*buf
, size_t count
, loff_t pos
)
839 struct file
*file
= iocb
->ki_filp
;
840 struct address_space
*mapping
= file
->f_mapping
;
842 dfprintk(VFS
, "nfs: direct write(%s/%s, %lu@%Ld)\n",
843 file
->f_dentry
->d_parent
->d_name
.name
,
844 file
->f_dentry
->d_name
.name
,
845 (unsigned long) count
, (long long) pos
);
847 retval
= generic_write_checks(file
, &pos
, &count
, 0);
852 if ((ssize_t
) count
< 0)
859 if (!access_ok(VERIFY_READ
, buf
, count
))
862 retval
= nfs_sync_mapping(mapping
);
866 retval
= nfs_get_user_pages(WRITE
, (unsigned long) buf
,
872 retval
= nfs_direct_write(iocb
, (unsigned long) buf
, count
,
873 pos
, pages
, page_count
);
876 * XXX: nfs_end_data_update() already ensures this file's
877 * cached data is subsequently invalidated. Do we really
878 * need to call invalidate_inode_pages2() again here?
880 * For aio writes, this invalidation will almost certainly
881 * occur before the writes complete. Kind of racey.
883 if (mapping
->nrpages
)
884 invalidate_inode_pages2(mapping
);
887 iocb
->ki_pos
= pos
+ retval
;
894 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
897 int nfs_init_directcache(void)
899 nfs_direct_cachep
= kmem_cache_create("nfs_direct_cache",
900 sizeof(struct nfs_direct_req
),
901 0, (SLAB_RECLAIM_ACCOUNT
|
904 if (nfs_direct_cachep
== NULL
)
911 * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
914 void nfs_destroy_directcache(void)
916 if (kmem_cache_destroy(nfs_direct_cachep
))
917 printk(KERN_INFO
"nfs_direct_cache: not all structures were freed\n");