2 * linux/fs/nfs/direct.c
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6 * High-performance uncached I/O for the Linux NFS client
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
37 * 04 May 2005 support O_DIRECT with aio --cel
41 #include <linux/config.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/smp_lock.h>
46 #include <linux/file.h>
47 #include <linux/pagemap.h>
48 #include <linux/kref.h>
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
54 #include <asm/system.h>
55 #include <asm/uaccess.h>
56 #include <asm/atomic.h>
60 #define NFSDBG_FACILITY NFSDBG_VFS
62 static void nfs_free_user_pages(struct page
**pages
, int npages
, int do_dirty
);
63 static kmem_cache_t
*nfs_direct_cachep
;
66 * This represents a set of asynchronous requests that we're waiting on
68 struct nfs_direct_req
{
69 struct kref kref
; /* release manager */
72 struct list_head list
; /* nfs_read/write_data structs */
73 struct file
* filp
; /* file descriptor */
74 struct kiocb
* iocb
; /* controlling i/o request */
75 wait_queue_head_t wait
; /* wait for i/o completion */
76 struct inode
* inode
; /* target file of i/o */
77 struct page
** pages
; /* pages in our buffer */
78 unsigned int npages
; /* count of pages */
80 /* completion state */
81 spinlock_t lock
; /* protect completion state */
82 int outstanding
; /* i/os we're waiting for */
83 ssize_t count
, /* bytes actually processed */
84 error
; /* any reported error */
88 * nfs_direct_IO - NFS address space operation for direct I/O
89 * @rw: direction (read or write)
90 * @iocb: target I/O control block
91 * @iov: array of vectors that define I/O buffer
92 * @pos: offset in file to begin the operation
93 * @nr_segs: size of iovec array
95 * The presence of this routine in the address space ops vector means
96 * the NFS client supports direct I/O. However, we shunt off direct
97 * read and write requests before the VFS gets them, so this method
98 * should never be called.
100 ssize_t
nfs_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
, loff_t pos
, unsigned long nr_segs
)
102 struct dentry
*dentry
= iocb
->ki_filp
->f_dentry
;
104 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
105 dentry
->d_name
.name
, (long long) pos
, nr_segs
);
110 static inline int nfs_get_user_pages(int rw
, unsigned long user_addr
, size_t size
, struct page
***pages
)
112 int result
= -ENOMEM
;
113 unsigned long page_count
;
116 page_count
= (user_addr
+ size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
117 page_count
-= user_addr
>> PAGE_SHIFT
;
119 array_size
= (page_count
* sizeof(struct page
*));
120 *pages
= kmalloc(array_size
, GFP_KERNEL
);
122 down_read(¤t
->mm
->mmap_sem
);
123 result
= get_user_pages(current
, current
->mm
, user_addr
,
124 page_count
, (rw
== READ
), 0,
126 up_read(¤t
->mm
->mmap_sem
);
128 * If we got fewer pages than expected from get_user_pages(),
129 * the user buffer runs off the end of a mapping; return EFAULT.
131 if (result
>= 0 && result
< page_count
) {
132 nfs_free_user_pages(*pages
, result
, 0);
140 static void nfs_free_user_pages(struct page
**pages
, int npages
, int do_dirty
)
143 for (i
= 0; i
< npages
; i
++) {
144 struct page
*page
= pages
[i
];
145 if (do_dirty
&& !PageCompound(page
))
146 set_page_dirty_lock(page
);
147 page_cache_release(page
);
152 static inline struct nfs_direct_req
*nfs_direct_req_alloc(void)
154 struct nfs_direct_req
*dreq
;
156 dreq
= kmem_cache_alloc(nfs_direct_cachep
, SLAB_KERNEL
);
160 kref_init(&dreq
->kref
);
161 init_waitqueue_head(&dreq
->wait
);
162 INIT_LIST_HEAD(&dreq
->list
);
164 spin_lock_init(&dreq
->lock
);
165 dreq
->outstanding
= 0;
172 static void nfs_direct_req_release(struct kref
*kref
)
174 struct nfs_direct_req
*dreq
= container_of(kref
, struct nfs_direct_req
, kref
);
175 kmem_cache_free(nfs_direct_cachep
, dreq
);
179 * Collects and returns the final error value/byte-count.
181 static ssize_t
nfs_direct_wait(struct nfs_direct_req
*dreq
)
183 ssize_t result
= -EIOCBQUEUED
;
185 /* Async requests don't wait here */
189 result
= wait_event_interruptible(dreq
->wait
, (dreq
->outstanding
== 0));
192 result
= dreq
->error
;
194 result
= dreq
->count
;
197 kref_put(&dreq
->kref
, nfs_direct_req_release
);
198 return (ssize_t
) result
;
202 * We must hold a reference to all the pages in this direct read request
203 * until the RPCs complete. This could be long *after* we are woken up in
204 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
206 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
207 * can't trust the iocb is still valid here if this is a synchronous
208 * request. If the waiter is woken prematurely, the iocb is long gone.
210 static void nfs_direct_complete(struct nfs_direct_req
*dreq
)
212 nfs_free_user_pages(dreq
->pages
, dreq
->npages
, 1);
215 long res
= (long) dreq
->error
;
217 res
= (long) dreq
->count
;
218 aio_complete(dreq
->iocb
, res
, 0);
220 wake_up(&dreq
->wait
);
223 kref_put(&dreq
->kref
, nfs_direct_req_release
);
227 * Note we also set the number of requests we have in the dreq when we are
228 * done. This prevents races with I/O completion so we will always wait
229 * until all requests have been dispatched and completed.
231 static struct nfs_direct_req
*nfs_direct_read_alloc(size_t nbytes
, size_t rsize
)
233 struct list_head
*list
;
234 struct nfs_direct_req
*dreq
;
235 unsigned int rpages
= (rsize
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
237 dreq
= nfs_direct_req_alloc();
243 struct nfs_read_data
*data
= nfs_readdata_alloc(rpages
);
245 if (unlikely(!data
)) {
246 while (!list_empty(list
)) {
247 data
= list_entry(list
->next
,
248 struct nfs_read_data
, pages
);
249 list_del(&data
->pages
);
250 nfs_readdata_free(data
);
252 kref_put(&dreq
->kref
, nfs_direct_req_release
);
256 INIT_LIST_HEAD(&data
->pages
);
257 list_add(&data
->pages
, list
);
259 data
->req
= (struct nfs_page
*) dreq
;
265 kref_get(&dreq
->kref
);
269 static void nfs_direct_read_result(struct rpc_task
*task
, void *calldata
)
271 struct nfs_read_data
*data
= calldata
;
272 struct nfs_direct_req
*dreq
= (struct nfs_direct_req
*) data
->req
;
274 if (nfs_readpage_result(task
, data
) != 0)
277 spin_lock(&dreq
->lock
);
279 if (likely(task
->tk_status
>= 0))
280 dreq
->count
+= data
->res
.count
;
282 dreq
->error
= task
->tk_status
;
284 if (--dreq
->outstanding
) {
285 spin_unlock(&dreq
->lock
);
289 spin_unlock(&dreq
->lock
);
290 nfs_direct_complete(dreq
);
293 static const struct rpc_call_ops nfs_read_direct_ops
= {
294 .rpc_call_done
= nfs_direct_read_result
,
295 .rpc_release
= nfs_readdata_release
,
299 * For each nfs_read_data struct that was allocated on the list, dispatch
300 * an NFS READ operation
302 static void nfs_direct_read_schedule(struct nfs_direct_req
*dreq
, unsigned long user_addr
, size_t count
, loff_t pos
)
304 struct file
*file
= dreq
->filp
;
305 struct inode
*inode
= file
->f_mapping
->host
;
306 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)
308 struct list_head
*list
= &dreq
->list
;
309 struct page
**pages
= dreq
->pages
;
310 size_t rsize
= NFS_SERVER(inode
)->rsize
;
311 unsigned int curpage
, pgbase
;
314 pgbase
= user_addr
& ~PAGE_MASK
;
316 struct nfs_read_data
*data
;
323 data
= list_entry(list
->next
, struct nfs_read_data
, pages
);
324 list_del_init(&data
->pages
);
327 data
->cred
= ctx
->cred
;
328 data
->args
.fh
= NFS_FH(inode
);
329 data
->args
.context
= ctx
;
330 data
->args
.offset
= pos
;
331 data
->args
.pgbase
= pgbase
;
332 data
->args
.pages
= &pages
[curpage
];
333 data
->args
.count
= bytes
;
334 data
->res
.fattr
= &data
->fattr
;
336 data
->res
.count
= bytes
;
338 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), RPC_TASK_ASYNC
,
339 &nfs_read_direct_ops
, data
);
340 NFS_PROTO(inode
)->read_setup(data
);
342 data
->task
.tk_cookie
= (unsigned long) inode
;
345 rpc_execute(&data
->task
);
348 dfprintk(VFS
, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
351 (long long)NFS_FILEID(inode
),
353 (unsigned long long)data
->args
.offset
);
357 curpage
+= pgbase
>> PAGE_SHIFT
;
358 pgbase
&= ~PAGE_MASK
;
361 } while (count
!= 0);
364 static ssize_t
nfs_direct_read(struct kiocb
*iocb
, unsigned long user_addr
, size_t count
, loff_t pos
, struct page
**pages
, unsigned int nr_pages
)
368 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
369 struct rpc_clnt
*clnt
= NFS_CLIENT(inode
);
370 struct nfs_direct_req
*dreq
;
372 dreq
= nfs_direct_read_alloc(count
, NFS_SERVER(inode
)->rsize
);
377 dreq
->npages
= nr_pages
;
380 dreq
->filp
= iocb
->ki_filp
;
381 if (!is_sync_kiocb(iocb
))
384 nfs_add_stats(inode
, NFSIOS_DIRECTREADBYTES
, count
);
385 rpc_clnt_sigmask(clnt
, &oldset
);
386 nfs_direct_read_schedule(dreq
, user_addr
, count
, pos
);
387 result
= nfs_direct_wait(dreq
);
388 rpc_clnt_sigunmask(clnt
, &oldset
);
393 static struct nfs_direct_req
*nfs_direct_write_alloc(size_t nbytes
, size_t wsize
)
395 struct list_head
*list
;
396 struct nfs_direct_req
*dreq
;
397 unsigned int wpages
= (wsize
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
399 dreq
= nfs_direct_req_alloc();
405 struct nfs_write_data
*data
= nfs_writedata_alloc(wpages
);
407 if (unlikely(!data
)) {
408 while (!list_empty(list
)) {
409 data
= list_entry(list
->next
,
410 struct nfs_write_data
, pages
);
411 list_del(&data
->pages
);
412 nfs_writedata_free(data
);
414 kref_put(&dreq
->kref
, nfs_direct_req_release
);
418 INIT_LIST_HEAD(&data
->pages
);
419 list_add(&data
->pages
, list
);
421 data
->req
= (struct nfs_page
*) dreq
;
427 kref_get(&dreq
->kref
);
432 * NB: Return the value of the first error return code. Subsequent
433 * errors after the first one are ignored.
435 static void nfs_direct_write_result(struct rpc_task
*task
, void *calldata
)
437 struct nfs_write_data
*data
= calldata
;
438 struct nfs_direct_req
*dreq
= (struct nfs_direct_req
*) data
->req
;
439 int status
= task
->tk_status
;
441 if (nfs_writeback_done(task
, data
) != 0)
443 /* If the server fell back to an UNSTABLE write, it's an error. */
444 if (unlikely(data
->res
.verf
->committed
!= NFS_FILE_SYNC
))
447 spin_lock(&dreq
->lock
);
449 if (likely(status
>= 0))
450 dreq
->count
+= data
->res
.count
;
452 dreq
->error
= status
;
454 if (--dreq
->outstanding
) {
455 spin_unlock(&dreq
->lock
);
459 spin_unlock(&dreq
->lock
);
461 nfs_end_data_update(data
->inode
);
462 nfs_direct_complete(dreq
);
465 static const struct rpc_call_ops nfs_write_direct_ops
= {
466 .rpc_call_done
= nfs_direct_write_result
,
467 .rpc_release
= nfs_writedata_release
,
471 * For each nfs_write_data struct that was allocated on the list, dispatch
472 * an NFS WRITE operation
474 * XXX: For now, support only FILE_SYNC writes. Later we may add
475 * support for UNSTABLE + COMMIT.
477 static void nfs_direct_write_schedule(struct nfs_direct_req
*dreq
, unsigned long user_addr
, size_t count
, loff_t pos
)
479 struct file
*file
= dreq
->filp
;
480 struct inode
*inode
= file
->f_mapping
->host
;
481 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)
483 struct list_head
*list
= &dreq
->list
;
484 struct page
**pages
= dreq
->pages
;
485 size_t wsize
= NFS_SERVER(inode
)->wsize
;
486 unsigned int curpage
, pgbase
;
489 pgbase
= user_addr
& ~PAGE_MASK
;
491 struct nfs_write_data
*data
;
498 data
= list_entry(list
->next
, struct nfs_write_data
, pages
);
499 list_del_init(&data
->pages
);
502 data
->cred
= ctx
->cred
;
503 data
->args
.fh
= NFS_FH(inode
);
504 data
->args
.context
= ctx
;
505 data
->args
.offset
= pos
;
506 data
->args
.pgbase
= pgbase
;
507 data
->args
.pages
= &pages
[curpage
];
508 data
->args
.count
= bytes
;
509 data
->res
.fattr
= &data
->fattr
;
510 data
->res
.count
= bytes
;
511 data
->res
.verf
= &data
->verf
;
513 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), RPC_TASK_ASYNC
,
514 &nfs_write_direct_ops
, data
);
515 NFS_PROTO(inode
)->write_setup(data
, FLUSH_STABLE
);
517 data
->task
.tk_priority
= RPC_PRIORITY_NORMAL
;
518 data
->task
.tk_cookie
= (unsigned long) inode
;
521 rpc_execute(&data
->task
);
524 dfprintk(VFS
, "NFS: %4d initiated direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
527 (long long)NFS_FILEID(inode
),
529 (unsigned long long)data
->args
.offset
);
533 curpage
+= pgbase
>> PAGE_SHIFT
;
534 pgbase
&= ~PAGE_MASK
;
537 } while (count
!= 0);
540 static ssize_t
nfs_direct_write(struct kiocb
*iocb
, unsigned long user_addr
, size_t count
, loff_t pos
, struct page
**pages
, int nr_pages
)
544 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
545 struct rpc_clnt
*clnt
= NFS_CLIENT(inode
);
546 struct nfs_direct_req
*dreq
;
548 dreq
= nfs_direct_write_alloc(count
, NFS_SERVER(inode
)->wsize
);
553 dreq
->npages
= nr_pages
;
556 dreq
->filp
= iocb
->ki_filp
;
557 if (!is_sync_kiocb(iocb
))
560 nfs_add_stats(inode
, NFSIOS_DIRECTWRITTENBYTES
, count
);
562 nfs_begin_data_update(inode
);
564 rpc_clnt_sigmask(clnt
, &oldset
);
565 nfs_direct_write_schedule(dreq
, user_addr
, count
, pos
);
566 result
= nfs_direct_wait(dreq
);
567 rpc_clnt_sigunmask(clnt
, &oldset
);
573 * nfs_file_direct_read - file direct read operation for NFS files
574 * @iocb: target I/O control block
575 * @buf: user's buffer into which to read data
576 * @count: number of bytes to read
577 * @pos: byte offset in file where reading starts
579 * We use this function for direct reads instead of calling
580 * generic_file_aio_read() in order to avoid gfar's check to see if
581 * the request starts before the end of the file. For that check
582 * to work, we must generate a GETATTR before each direct read, and
583 * even then there is a window between the GETATTR and the subsequent
584 * READ where the file size could change. Our preference is simply
585 * to do all reads the application wants, and the server will take
586 * care of managing the end of file boundary.
588 * This function also eliminates unnecessarily updating the file's
589 * atime locally, as the NFS server sets the file's atime, and this
590 * client must read the updated atime from the server back into its
593 ssize_t
nfs_file_direct_read(struct kiocb
*iocb
, char __user
*buf
, size_t count
, loff_t pos
)
595 ssize_t retval
= -EINVAL
;
598 struct file
*file
= iocb
->ki_filp
;
599 struct address_space
*mapping
= file
->f_mapping
;
601 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
602 file
->f_dentry
->d_parent
->d_name
.name
,
603 file
->f_dentry
->d_name
.name
,
604 (unsigned long) count
, (long long) pos
);
609 if (!access_ok(VERIFY_WRITE
, buf
, count
))
615 retval
= nfs_sync_mapping(mapping
);
619 page_count
= nfs_get_user_pages(READ
, (unsigned long) buf
,
621 if (page_count
< 0) {
622 nfs_free_user_pages(pages
, 0, 0);
627 retval
= nfs_direct_read(iocb
, (unsigned long) buf
, count
, pos
,
630 iocb
->ki_pos
= pos
+ retval
;
637 * nfs_file_direct_write - file direct write operation for NFS files
638 * @iocb: target I/O control block
639 * @buf: user's buffer from which to write data
640 * @count: number of bytes to write
641 * @pos: byte offset in file where writing starts
643 * We use this function for direct writes instead of calling
644 * generic_file_aio_write() in order to avoid taking the inode
645 * semaphore and updating the i_size. The NFS server will set
646 * the new i_size and this client must read the updated size
647 * back into its cache. We let the server do generic write
648 * parameter checking and report problems.
650 * We also avoid an unnecessary invocation of generic_osync_inode(),
651 * as it is fairly meaningless to sync the metadata of an NFS file.
653 * We eliminate local atime updates, see direct read above.
655 * We avoid unnecessary page cache invalidations for normal cached
656 * readers of this file.
658 * Note that O_APPEND is not supported for NFS direct writes, as there
659 * is no atomic O_APPEND write facility in the NFS protocol.
661 ssize_t
nfs_file_direct_write(struct kiocb
*iocb
, const char __user
*buf
, size_t count
, loff_t pos
)
666 struct file
*file
= iocb
->ki_filp
;
667 struct address_space
*mapping
= file
->f_mapping
;
669 dfprintk(VFS
, "nfs: direct write(%s/%s, %lu@%Ld)\n",
670 file
->f_dentry
->d_parent
->d_name
.name
,
671 file
->f_dentry
->d_name
.name
,
672 (unsigned long) count
, (long long) pos
);
674 retval
= generic_write_checks(file
, &pos
, &count
, 0);
679 if ((ssize_t
) count
< 0)
686 if (!access_ok(VERIFY_READ
, buf
, count
))
689 retval
= nfs_sync_mapping(mapping
);
693 page_count
= nfs_get_user_pages(WRITE
, (unsigned long) buf
,
695 if (page_count
< 0) {
696 nfs_free_user_pages(pages
, 0, 0);
701 retval
= nfs_direct_write(iocb
, (unsigned long) buf
, count
,
702 pos
, pages
, page_count
);
705 * XXX: nfs_end_data_update() already ensures this file's
706 * cached data is subsequently invalidated. Do we really
707 * need to call invalidate_inode_pages2() again here?
709 * For aio writes, this invalidation will almost certainly
710 * occur before the writes complete. Kind of racey.
712 if (mapping
->nrpages
)
713 invalidate_inode_pages2(mapping
);
716 iocb
->ki_pos
= pos
+ retval
;
723 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
726 int nfs_init_directcache(void)
728 nfs_direct_cachep
= kmem_cache_create("nfs_direct_cache",
729 sizeof(struct nfs_direct_req
),
730 0, SLAB_RECLAIM_ACCOUNT
,
732 if (nfs_direct_cachep
== NULL
)
739 * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
742 void nfs_destroy_directcache(void)
744 if (kmem_cache_destroy(nfs_direct_cachep
))
745 printk(KERN_INFO
"nfs_direct_cache: not all structures were freed\n");