6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
28 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
30 static const struct nfs_pageio_ops nfs_pageio_read_ops
;
31 static const struct rpc_call_ops nfs_read_common_ops
;
32 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops
;
34 static struct kmem_cache
*nfs_rdata_cachep
;
36 struct nfs_read_header
*nfs_readhdr_alloc(void)
38 struct nfs_read_header
*rhdr
;
40 rhdr
= kmem_cache_zalloc(nfs_rdata_cachep
, GFP_KERNEL
);
42 struct nfs_pgio_header
*hdr
= &rhdr
->header
;
44 INIT_LIST_HEAD(&hdr
->pages
);
45 INIT_LIST_HEAD(&hdr
->rpc_list
);
46 spin_lock_init(&hdr
->lock
);
47 atomic_set(&hdr
->refcnt
, 0);
51 EXPORT_SYMBOL_GPL(nfs_readhdr_alloc
);
53 static struct nfs_read_data
*nfs_readdata_alloc(struct nfs_pgio_header
*hdr
,
54 unsigned int pagecount
)
56 struct nfs_read_data
*data
, *prealloc
;
58 prealloc
= &container_of(hdr
, struct nfs_read_header
, header
)->rpc_data
;
59 if (prealloc
->header
== NULL
)
62 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
66 if (nfs_pgarray_set(&data
->pages
, pagecount
)) {
68 atomic_inc(&hdr
->refcnt
);
78 void nfs_readhdr_free(struct nfs_pgio_header
*hdr
)
80 struct nfs_read_header
*rhdr
= container_of(hdr
, struct nfs_read_header
, header
);
82 kmem_cache_free(nfs_rdata_cachep
, rhdr
);
84 EXPORT_SYMBOL_GPL(nfs_readhdr_free
);
86 void nfs_readdata_release(struct nfs_read_data
*rdata
)
88 struct nfs_pgio_header
*hdr
= rdata
->header
;
89 struct nfs_read_header
*read_header
= container_of(hdr
, struct nfs_read_header
, header
);
91 put_nfs_open_context(rdata
->args
.context
);
92 if (rdata
->pages
.pagevec
!= rdata
->pages
.page_array
)
93 kfree(rdata
->pages
.pagevec
);
94 if (rdata
== &read_header
->rpc_data
) {
98 if (atomic_dec_and_test(&hdr
->refcnt
))
99 hdr
->completion_ops
->completion(hdr
);
100 /* Note: we only free the rpc_task after callbacks are done.
101 * See the comment in rpc_free_task() for why
105 EXPORT_SYMBOL_GPL(nfs_readdata_release
);
108 int nfs_return_empty_page(struct page
*page
)
110 zero_user(page
, 0, PAGE_CACHE_SIZE
);
111 SetPageUptodate(page
);
116 void nfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
,
118 const struct nfs_pgio_completion_ops
*compl_ops
)
120 nfs_pageio_init(pgio
, inode
, &nfs_pageio_read_ops
, compl_ops
,
121 NFS_SERVER(inode
)->rsize
, 0);
123 EXPORT_SYMBOL_GPL(nfs_pageio_init_read
);
125 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor
*pgio
)
127 pgio
->pg_ops
= &nfs_pageio_read_ops
;
128 pgio
->pg_bsize
= NFS_SERVER(pgio
->pg_inode
)->rsize
;
130 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds
);
132 int nfs_readpage_async(struct nfs_open_context
*ctx
, struct inode
*inode
,
135 struct nfs_page
*new;
137 struct nfs_pageio_descriptor pgio
;
139 len
= nfs_page_length(page
);
141 return nfs_return_empty_page(page
);
142 new = nfs_create_request(ctx
, inode
, page
, 0, len
);
147 if (len
< PAGE_CACHE_SIZE
)
148 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
150 NFS_PROTO(inode
)->read_pageio_init(&pgio
, inode
, &nfs_async_read_completion_ops
);
151 nfs_pageio_add_request(&pgio
, new);
152 nfs_pageio_complete(&pgio
);
153 NFS_I(inode
)->read_io
+= pgio
.pg_bytes_written
;
157 static void nfs_readpage_release(struct nfs_page
*req
)
159 struct inode
*d_inode
= req
->wb_context
->dentry
->d_inode
;
161 if (PageUptodate(req
->wb_page
))
162 nfs_readpage_to_fscache(d_inode
, req
->wb_page
, 0);
164 unlock_page(req
->wb_page
);
166 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
167 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
168 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
170 (long long)req_offset(req
));
171 nfs_release_request(req
);
174 /* Note io was page aligned */
175 static void nfs_read_completion(struct nfs_pgio_header
*hdr
)
177 unsigned long bytes
= 0;
179 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
181 while (!list_empty(&hdr
->pages
)) {
182 struct nfs_page
*req
= nfs_list_entry(hdr
->pages
.next
);
183 struct page
*page
= req
->wb_page
;
185 if (test_bit(NFS_IOHDR_EOF
, &hdr
->flags
)) {
186 if (bytes
> hdr
->good_bytes
)
187 zero_user(page
, 0, PAGE_SIZE
);
188 else if (hdr
->good_bytes
- bytes
< PAGE_SIZE
)
189 zero_user_segment(page
,
190 hdr
->good_bytes
& ~PAGE_MASK
,
193 bytes
+= req
->wb_bytes
;
194 if (test_bit(NFS_IOHDR_ERROR
, &hdr
->flags
)) {
195 if (bytes
<= hdr
->good_bytes
)
196 SetPageUptodate(page
);
198 SetPageUptodate(page
);
199 nfs_list_remove_request(req
);
200 nfs_readpage_release(req
);
206 int nfs_initiate_read(struct rpc_clnt
*clnt
,
207 struct nfs_read_data
*data
,
208 const struct rpc_call_ops
*call_ops
, int flags
)
210 struct inode
*inode
= data
->header
->inode
;
211 int swap_flags
= IS_SWAPFILE(inode
) ? NFS_RPC_SWAPFLAGS
: 0;
212 struct rpc_task
*task
;
213 struct rpc_message msg
= {
214 .rpc_argp
= &data
->args
,
215 .rpc_resp
= &data
->res
,
216 .rpc_cred
= data
->header
->cred
,
218 struct rpc_task_setup task_setup_data
= {
222 .callback_ops
= call_ops
,
223 .callback_data
= data
,
224 .workqueue
= nfsiod_workqueue
,
225 .flags
= RPC_TASK_ASYNC
| swap_flags
| flags
,
228 /* Set up the initial task struct. */
229 NFS_PROTO(inode
)->read_setup(data
, &msg
);
231 dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
235 (long long)NFS_FILEID(inode
),
237 (unsigned long long)data
->args
.offset
);
239 task
= rpc_run_task(&task_setup_data
);
241 return PTR_ERR(task
);
245 EXPORT_SYMBOL_GPL(nfs_initiate_read
);
248 * Set up the NFS read request struct
250 static void nfs_read_rpcsetup(struct nfs_read_data
*data
,
251 unsigned int count
, unsigned int offset
)
253 struct nfs_page
*req
= data
->header
->req
;
255 data
->args
.fh
= NFS_FH(data
->header
->inode
);
256 data
->args
.offset
= req_offset(req
) + offset
;
257 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
258 data
->args
.pages
= data
->pages
.pagevec
;
259 data
->args
.count
= count
;
260 data
->args
.context
= get_nfs_open_context(req
->wb_context
);
261 data
->args
.lock_context
= req
->wb_lock_context
;
263 data
->res
.fattr
= &data
->fattr
;
264 data
->res
.count
= count
;
266 nfs_fattr_init(&data
->fattr
);
269 static int nfs_do_read(struct nfs_read_data
*data
,
270 const struct rpc_call_ops
*call_ops
)
272 struct inode
*inode
= data
->header
->inode
;
274 return nfs_initiate_read(NFS_CLIENT(inode
), data
, call_ops
, 0);
278 nfs_do_multiple_reads(struct list_head
*head
,
279 const struct rpc_call_ops
*call_ops
)
281 struct nfs_read_data
*data
;
284 while (!list_empty(head
)) {
287 data
= list_first_entry(head
, struct nfs_read_data
, list
);
288 list_del_init(&data
->list
);
290 ret2
= nfs_do_read(data
, call_ops
);
298 nfs_async_read_error(struct list_head
*head
)
300 struct nfs_page
*req
;
302 while (!list_empty(head
)) {
303 req
= nfs_list_entry(head
->next
);
304 nfs_list_remove_request(req
);
305 nfs_readpage_release(req
);
309 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops
= {
310 .error_cleanup
= nfs_async_read_error
,
311 .completion
= nfs_read_completion
,
314 static void nfs_pagein_error(struct nfs_pageio_descriptor
*desc
,
315 struct nfs_pgio_header
*hdr
)
317 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
318 while (!list_empty(&hdr
->rpc_list
)) {
319 struct nfs_read_data
*data
= list_first_entry(&hdr
->rpc_list
,
320 struct nfs_read_data
, list
);
321 list_del(&data
->list
);
322 nfs_readdata_release(data
);
324 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
328 * Generate multiple requests to fill a single page.
330 * We optimize to reduce the number of read operations on the wire. If we
331 * detect that we're reading a page, or an area of a page, that is past the
332 * end of file, we do not generate NFS read operations but just clear the
333 * parts of the page that would have come back zero from the server anyway.
335 * We rely on the cached value of i_size to make this determination; another
336 * client can fill pages on the server past our cached end-of-file, but we
337 * won't see the new data until our attribute cache is updated. This is more
338 * or less conventional NFS client behavior.
340 static int nfs_pagein_multi(struct nfs_pageio_descriptor
*desc
,
341 struct nfs_pgio_header
*hdr
)
343 struct nfs_page
*req
= hdr
->req
;
344 struct page
*page
= req
->wb_page
;
345 struct nfs_read_data
*data
;
346 size_t rsize
= desc
->pg_bsize
, nbytes
;
350 nbytes
= desc
->pg_count
;
352 size_t len
= min(nbytes
,rsize
);
354 data
= nfs_readdata_alloc(hdr
, 1);
356 nfs_pagein_error(desc
, hdr
);
359 data
->pages
.pagevec
[0] = page
;
360 nfs_read_rpcsetup(data
, len
, offset
);
361 list_add(&data
->list
, &hdr
->rpc_list
);
364 } while (nbytes
!= 0);
366 nfs_list_remove_request(req
);
367 nfs_list_add_request(req
, &hdr
->pages
);
368 desc
->pg_rpc_callops
= &nfs_read_common_ops
;
372 static int nfs_pagein_one(struct nfs_pageio_descriptor
*desc
,
373 struct nfs_pgio_header
*hdr
)
375 struct nfs_page
*req
;
377 struct nfs_read_data
*data
;
378 struct list_head
*head
= &desc
->pg_list
;
380 data
= nfs_readdata_alloc(hdr
, nfs_page_array_len(desc
->pg_base
,
383 nfs_pagein_error(desc
, hdr
);
387 pages
= data
->pages
.pagevec
;
388 while (!list_empty(head
)) {
389 req
= nfs_list_entry(head
->next
);
390 nfs_list_remove_request(req
);
391 nfs_list_add_request(req
, &hdr
->pages
);
392 *pages
++ = req
->wb_page
;
395 nfs_read_rpcsetup(data
, desc
->pg_count
, 0);
396 list_add(&data
->list
, &hdr
->rpc_list
);
397 desc
->pg_rpc_callops
= &nfs_read_common_ops
;
401 int nfs_generic_pagein(struct nfs_pageio_descriptor
*desc
,
402 struct nfs_pgio_header
*hdr
)
404 if (desc
->pg_bsize
< PAGE_CACHE_SIZE
)
405 return nfs_pagein_multi(desc
, hdr
);
406 return nfs_pagein_one(desc
, hdr
);
408 EXPORT_SYMBOL_GPL(nfs_generic_pagein
);
410 static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
412 struct nfs_read_header
*rhdr
;
413 struct nfs_pgio_header
*hdr
;
416 rhdr
= nfs_readhdr_alloc();
418 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
422 nfs_pgheader_init(desc
, hdr
, nfs_readhdr_free
);
423 atomic_inc(&hdr
->refcnt
);
424 ret
= nfs_generic_pagein(desc
, hdr
);
426 ret
= nfs_do_multiple_reads(&hdr
->rpc_list
,
427 desc
->pg_rpc_callops
);
428 if (atomic_dec_and_test(&hdr
->refcnt
))
429 hdr
->completion_ops
->completion(hdr
);
433 static const struct nfs_pageio_ops nfs_pageio_read_ops
= {
434 .pg_test
= nfs_generic_pg_test
,
435 .pg_doio
= nfs_generic_pg_readpages
,
439 * This is the callback from RPC telling us whether a reply was
440 * received or some error occurred (timeout or socket shutdown).
442 int nfs_readpage_result(struct rpc_task
*task
, struct nfs_read_data
*data
)
444 struct inode
*inode
= data
->header
->inode
;
447 dprintk("NFS: %s: %5u, (status %d)\n", __func__
, task
->tk_pid
,
450 status
= NFS_PROTO(inode
)->read_done(task
, data
);
454 nfs_add_stats(inode
, NFSIOS_SERVERREADBYTES
, data
->res
.count
);
456 if (task
->tk_status
== -ESTALE
) {
457 set_bit(NFS_INO_STALE
, &NFS_I(inode
)->flags
);
458 nfs_mark_for_revalidate(inode
);
463 static void nfs_readpage_retry(struct rpc_task
*task
, struct nfs_read_data
*data
)
465 struct nfs_readargs
*argp
= &data
->args
;
466 struct nfs_readres
*resp
= &data
->res
;
468 /* This is a short read! */
469 nfs_inc_stats(data
->header
->inode
, NFSIOS_SHORTREAD
);
470 /* Has the server at least made some progress? */
471 if (resp
->count
== 0) {
472 nfs_set_pgio_error(data
->header
, -EIO
, argp
->offset
);
475 /* Yes, so retry the read at the end of the data */
476 data
->mds_offset
+= resp
->count
;
477 argp
->offset
+= resp
->count
;
478 argp
->pgbase
+= resp
->count
;
479 argp
->count
-= resp
->count
;
480 rpc_restart_call_prepare(task
);
483 static void nfs_readpage_result_common(struct rpc_task
*task
, void *calldata
)
485 struct nfs_read_data
*data
= calldata
;
486 struct nfs_pgio_header
*hdr
= data
->header
;
488 /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
489 if (nfs_readpage_result(task
, data
) != 0)
491 if (task
->tk_status
< 0)
492 nfs_set_pgio_error(hdr
, task
->tk_status
, data
->args
.offset
);
493 else if (data
->res
.eof
) {
496 bound
= data
->args
.offset
+ data
->res
.count
;
497 spin_lock(&hdr
->lock
);
498 if (bound
< hdr
->io_start
+ hdr
->good_bytes
) {
499 set_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
500 clear_bit(NFS_IOHDR_ERROR
, &hdr
->flags
);
501 hdr
->good_bytes
= bound
- hdr
->io_start
;
503 spin_unlock(&hdr
->lock
);
504 } else if (data
->res
.count
!= data
->args
.count
)
505 nfs_readpage_retry(task
, data
);
508 static void nfs_readpage_release_common(void *calldata
)
510 nfs_readdata_release(calldata
);
513 void nfs_read_prepare(struct rpc_task
*task
, void *calldata
)
515 struct nfs_read_data
*data
= calldata
;
516 NFS_PROTO(data
->header
->inode
)->read_rpc_prepare(task
, data
);
519 static const struct rpc_call_ops nfs_read_common_ops
= {
520 .rpc_call_prepare
= nfs_read_prepare
,
521 .rpc_call_done
= nfs_readpage_result_common
,
522 .rpc_release
= nfs_readpage_release_common
,
526 * Read a page over NFS.
527 * We read the page synchronously in the following case:
528 * - The error flag is set for this page. This happens only when a
529 * previous async read operation failed.
531 int nfs_readpage(struct file
*file
, struct page
*page
)
533 struct nfs_open_context
*ctx
;
534 struct inode
*inode
= page_file_mapping(page
)->host
;
537 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
538 page
, PAGE_CACHE_SIZE
, page_file_index(page
));
539 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGE
);
540 nfs_add_stats(inode
, NFSIOS_READPAGES
, 1);
543 * Try to flush any pending writes to the file..
545 * NOTE! Because we own the page lock, there cannot
546 * be any new pending writes generated at this point
547 * for this page (other pages can be written to).
549 error
= nfs_wb_page(inode
, page
);
552 if (PageUptodate(page
))
556 if (NFS_STALE(inode
))
561 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
565 ctx
= get_nfs_open_context(nfs_file_open_context(file
));
567 if (!IS_SYNC(inode
)) {
568 error
= nfs_readpage_from_fscache(ctx
, inode
, page
);
573 error
= nfs_readpage_async(ctx
, inode
, page
);
576 put_nfs_open_context(ctx
);
583 struct nfs_readdesc
{
584 struct nfs_pageio_descriptor
*pgio
;
585 struct nfs_open_context
*ctx
;
589 readpage_async_filler(void *data
, struct page
*page
)
591 struct nfs_readdesc
*desc
= (struct nfs_readdesc
*)data
;
592 struct inode
*inode
= page_file_mapping(page
)->host
;
593 struct nfs_page
*new;
597 len
= nfs_page_length(page
);
599 return nfs_return_empty_page(page
);
601 new = nfs_create_request(desc
->ctx
, inode
, page
, 0, len
);
605 if (len
< PAGE_CACHE_SIZE
)
606 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
607 if (!nfs_pageio_add_request(desc
->pgio
, new)) {
608 error
= desc
->pgio
->pg_error
;
613 error
= PTR_ERR(new);
619 int nfs_readpages(struct file
*filp
, struct address_space
*mapping
,
620 struct list_head
*pages
, unsigned nr_pages
)
622 struct nfs_pageio_descriptor pgio
;
623 struct nfs_readdesc desc
= {
626 struct inode
*inode
= mapping
->host
;
627 unsigned long npages
;
630 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
632 (long long)NFS_FILEID(inode
),
634 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGES
);
636 if (NFS_STALE(inode
))
640 desc
.ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
641 if (desc
.ctx
== NULL
)
644 desc
.ctx
= get_nfs_open_context(nfs_file_open_context(filp
));
646 /* attempt to read as many of the pages as possible from the cache
647 * - this returns -ENOBUFS immediately if the cookie is negative
649 ret
= nfs_readpages_from_fscache(desc
.ctx
, inode
, mapping
,
652 goto read_complete
; /* all pages were read */
654 NFS_PROTO(inode
)->read_pageio_init(&pgio
, inode
, &nfs_async_read_completion_ops
);
656 ret
= read_cache_pages(mapping
, pages
, readpage_async_filler
, &desc
);
658 nfs_pageio_complete(&pgio
);
659 NFS_I(inode
)->read_io
+= pgio
.pg_bytes_written
;
660 npages
= (pgio
.pg_bytes_written
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
661 nfs_add_stats(inode
, NFSIOS_READPAGES
, npages
);
663 put_nfs_open_context(desc
.ctx
);
668 int __init
nfs_init_readpagecache(void)
670 nfs_rdata_cachep
= kmem_cache_create("nfs_read_data",
671 sizeof(struct nfs_read_header
),
672 0, SLAB_HWCACHE_ALIGN
,
674 if (nfs_rdata_cachep
== NULL
)
680 void nfs_destroy_readpagecache(void)
682 kmem_cache_destroy(nfs_rdata_cachep
);