6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
23 #include <asm/system.h>
30 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
32 static int nfs_pagein_multi(struct inode
*, struct list_head
*, unsigned int, size_t, int);
33 static int nfs_pagein_one(struct inode
*, struct list_head
*, unsigned int, size_t, int);
34 static const struct rpc_call_ops nfs_read_partial_ops
;
35 static const struct rpc_call_ops nfs_read_full_ops
;
37 static struct kmem_cache
*nfs_rdata_cachep
;
38 static mempool_t
*nfs_rdata_mempool
;
40 #define MIN_POOL_READ (32)
42 struct nfs_read_data
*nfs_readdata_alloc(unsigned int pagecount
)
44 struct nfs_read_data
*p
= mempool_alloc(nfs_rdata_mempool
, GFP_NOFS
);
47 memset(p
, 0, sizeof(*p
));
48 INIT_LIST_HEAD(&p
->pages
);
49 p
->npages
= pagecount
;
50 p
->res
.seq_res
.sr_slotid
= NFS4_MAX_SLOT_TABLE
;
51 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
52 p
->pagevec
= p
->page_array
;
54 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
56 mempool_free(p
, nfs_rdata_mempool
);
64 static void nfs_readdata_free(struct nfs_read_data
*p
)
66 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
68 mempool_free(p
, nfs_rdata_mempool
);
71 void nfs_readdata_release(void *data
)
73 struct nfs_read_data
*rdata
= data
;
75 put_nfs_open_context(rdata
->args
.context
);
76 nfs_readdata_free(rdata
);
80 int nfs_return_empty_page(struct page
*page
)
82 zero_user(page
, 0, PAGE_CACHE_SIZE
);
83 SetPageUptodate(page
);
88 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data
*data
)
90 unsigned int remainder
= data
->args
.count
- data
->res
.count
;
91 unsigned int base
= data
->args
.pgbase
+ data
->res
.count
;
95 if (data
->res
.eof
== 0 || remainder
== 0)
98 * Note: "remainder" can never be negative, since we check for
99 * this in the XDR code.
101 pages
= &data
->args
.pages
[base
>> PAGE_CACHE_SHIFT
];
102 base
&= ~PAGE_CACHE_MASK
;
103 pglen
= PAGE_CACHE_SIZE
- base
;
105 if (remainder
<= pglen
) {
106 zero_user(*pages
, base
, remainder
);
109 zero_user(*pages
, base
, pglen
);
112 pglen
= PAGE_CACHE_SIZE
;
117 int nfs_readpage_async(struct nfs_open_context
*ctx
, struct inode
*inode
,
120 LIST_HEAD(one_request
);
121 struct nfs_page
*new;
124 len
= nfs_page_length(page
);
126 return nfs_return_empty_page(page
);
127 new = nfs_create_request(ctx
, inode
, page
, 0, len
);
132 if (len
< PAGE_CACHE_SIZE
)
133 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
135 nfs_list_add_request(new, &one_request
);
136 if (NFS_SERVER(inode
)->rsize
< PAGE_CACHE_SIZE
)
137 nfs_pagein_multi(inode
, &one_request
, 1, len
, 0);
139 nfs_pagein_one(inode
, &one_request
, 1, len
, 0);
143 static void nfs_readpage_release(struct nfs_page
*req
)
145 struct inode
*d_inode
= req
->wb_context
->path
.dentry
->d_inode
;
147 if (PageUptodate(req
->wb_page
))
148 nfs_readpage_to_fscache(d_inode
, req
->wb_page
, 0);
150 unlock_page(req
->wb_page
);
152 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
153 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
154 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
156 (long long)req_offset(req
));
157 nfs_clear_request(req
);
158 nfs_release_request(req
);
162 * Set up the NFS read request struct
164 static int nfs_read_rpcsetup(struct nfs_page
*req
, struct nfs_read_data
*data
,
165 const struct rpc_call_ops
*call_ops
,
166 unsigned int count
, unsigned int offset
)
168 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
169 int swap_flags
= IS_SWAPFILE(inode
) ? NFS_RPC_SWAPFLAGS
: 0;
170 struct rpc_task
*task
;
171 struct rpc_message msg
= {
172 .rpc_argp
= &data
->args
,
173 .rpc_resp
= &data
->res
,
174 .rpc_cred
= req
->wb_context
->cred
,
176 struct rpc_task_setup task_setup_data
= {
178 .rpc_client
= NFS_CLIENT(inode
),
180 .callback_ops
= call_ops
,
181 .callback_data
= data
,
182 .workqueue
= nfsiod_workqueue
,
183 .flags
= RPC_TASK_ASYNC
| swap_flags
,
188 data
->cred
= msg
.rpc_cred
;
190 data
->args
.fh
= NFS_FH(inode
);
191 data
->args
.offset
= req_offset(req
) + offset
;
192 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
193 data
->args
.pages
= data
->pagevec
;
194 data
->args
.count
= count
;
195 data
->args
.context
= get_nfs_open_context(req
->wb_context
);
197 data
->res
.fattr
= &data
->fattr
;
198 data
->res
.count
= count
;
200 nfs_fattr_init(&data
->fattr
);
202 /* Set up the initial task struct. */
203 NFS_PROTO(inode
)->read_setup(data
, &msg
);
205 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
208 (long long)NFS_FILEID(inode
),
210 (unsigned long long)data
->args
.offset
);
212 task
= rpc_run_task(&task_setup_data
);
214 return PTR_ERR(task
);
220 nfs_async_read_error(struct list_head
*head
)
222 struct nfs_page
*req
;
224 while (!list_empty(head
)) {
225 req
= nfs_list_entry(head
->next
);
226 nfs_list_remove_request(req
);
227 SetPageError(req
->wb_page
);
228 nfs_readpage_release(req
);
233 * Generate multiple requests to fill a single page.
235 * We optimize to reduce the number of read operations on the wire. If we
236 * detect that we're reading a page, or an area of a page, that is past the
237 * end of file, we do not generate NFS read operations but just clear the
238 * parts of the page that would have come back zero from the server anyway.
240 * We rely on the cached value of i_size to make this determination; another
241 * client can fill pages on the server past our cached end-of-file, but we
242 * won't see the new data until our attribute cache is updated. This is more
243 * or less conventional NFS client behavior.
245 static int nfs_pagein_multi(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int flags
)
247 struct nfs_page
*req
= nfs_list_entry(head
->next
);
248 struct page
*page
= req
->wb_page
;
249 struct nfs_read_data
*data
;
250 size_t rsize
= NFS_SERVER(inode
)->rsize
, nbytes
;
256 nfs_list_remove_request(req
);
260 size_t len
= min(nbytes
,rsize
);
262 data
= nfs_readdata_alloc(1);
265 list_add(&data
->pages
, &list
);
268 } while(nbytes
!= 0);
269 atomic_set(&req
->wb_complete
, requests
);
271 ClearPageError(page
);
277 data
= list_entry(list
.next
, struct nfs_read_data
, pages
);
278 list_del_init(&data
->pages
);
280 data
->pagevec
[0] = page
;
284 ret2
= nfs_read_rpcsetup(req
, data
, &nfs_read_partial_ops
,
290 } while (nbytes
!= 0);
295 while (!list_empty(&list
)) {
296 data
= list_entry(list
.next
, struct nfs_read_data
, pages
);
297 list_del(&data
->pages
);
298 nfs_readdata_free(data
);
301 nfs_readpage_release(req
);
305 static int nfs_pagein_one(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int flags
)
307 struct nfs_page
*req
;
309 struct nfs_read_data
*data
;
312 data
= nfs_readdata_alloc(npages
);
316 pages
= data
->pagevec
;
317 while (!list_empty(head
)) {
318 req
= nfs_list_entry(head
->next
);
319 nfs_list_remove_request(req
);
320 nfs_list_add_request(req
, &data
->pages
);
321 ClearPageError(req
->wb_page
);
322 *pages
++ = req
->wb_page
;
324 req
= nfs_list_entry(data
->pages
.next
);
326 return nfs_read_rpcsetup(req
, data
, &nfs_read_full_ops
, count
, 0);
328 nfs_async_read_error(head
);
333 * This is the callback from RPC telling us whether a reply was
334 * received or some error occurred (timeout or socket shutdown).
336 int nfs_readpage_result(struct rpc_task
*task
, struct nfs_read_data
*data
)
340 dprintk("NFS: %s: %5u, (status %d)\n", __func__
, task
->tk_pid
,
343 status
= NFS_PROTO(data
->inode
)->read_done(task
, data
);
347 nfs_add_stats(data
->inode
, NFSIOS_SERVERREADBYTES
, data
->res
.count
);
349 if (task
->tk_status
== -ESTALE
) {
350 set_bit(NFS_INO_STALE
, &NFS_I(data
->inode
)->flags
);
351 nfs_mark_for_revalidate(data
->inode
);
356 static void nfs_readpage_retry(struct rpc_task
*task
, struct nfs_read_data
*data
)
358 struct nfs_readargs
*argp
= &data
->args
;
359 struct nfs_readres
*resp
= &data
->res
;
361 if (resp
->eof
|| resp
->count
== argp
->count
)
364 /* This is a short read! */
365 nfs_inc_stats(data
->inode
, NFSIOS_SHORTREAD
);
366 /* Has the server at least made some progress? */
367 if (resp
->count
== 0)
370 /* Yes, so retry the read at the end of the data */
371 argp
->offset
+= resp
->count
;
372 argp
->pgbase
+= resp
->count
;
373 argp
->count
-= resp
->count
;
374 nfs4_restart_rpc(task
, NFS_SERVER(data
->inode
)->nfs_client
);
377 nfs4_sequence_free_slot(NFS_SERVER(data
->inode
)->nfs_client
,
384 * Handle a read reply that fills part of a page.
386 static void nfs_readpage_result_partial(struct rpc_task
*task
, void *calldata
)
388 struct nfs_read_data
*data
= calldata
;
390 if (nfs_readpage_result(task
, data
) != 0)
392 if (task
->tk_status
< 0)
395 nfs_readpage_truncate_uninitialised_page(data
);
396 nfs_readpage_retry(task
, data
);
399 static void nfs_readpage_release_partial(void *calldata
)
401 struct nfs_read_data
*data
= calldata
;
402 struct nfs_page
*req
= data
->req
;
403 struct page
*page
= req
->wb_page
;
404 int status
= data
->task
.tk_status
;
409 if (atomic_dec_and_test(&req
->wb_complete
)) {
410 if (!PageError(page
))
411 SetPageUptodate(page
);
412 nfs_readpage_release(req
);
414 nfs_readdata_release(calldata
);
417 #if defined(CONFIG_NFS_V4_1)
418 void nfs_read_prepare(struct rpc_task
*task
, void *calldata
)
420 struct nfs_read_data
*data
= calldata
;
422 if (nfs4_setup_sequence(NFS_SERVER(data
->inode
)->nfs_client
,
423 &data
->args
.seq_args
, &data
->res
.seq_res
,
426 rpc_call_start(task
);
428 #endif /* CONFIG_NFS_V4_1 */
430 static const struct rpc_call_ops nfs_read_partial_ops
= {
431 #if defined(CONFIG_NFS_V4_1)
432 .rpc_call_prepare
= nfs_read_prepare
,
433 #endif /* CONFIG_NFS_V4_1 */
434 .rpc_call_done
= nfs_readpage_result_partial
,
435 .rpc_release
= nfs_readpage_release_partial
,
438 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data
*data
)
440 unsigned int count
= data
->res
.count
;
441 unsigned int base
= data
->args
.pgbase
;
445 count
= data
->args
.count
;
446 if (unlikely(count
== 0))
448 pages
= &data
->args
.pages
[base
>> PAGE_CACHE_SHIFT
];
449 base
&= ~PAGE_CACHE_MASK
;
451 for (;count
>= PAGE_CACHE_SIZE
; count
-= PAGE_CACHE_SIZE
, pages
++)
452 SetPageUptodate(*pages
);
455 /* Was this a short read? */
456 if (data
->res
.eof
|| data
->res
.count
== data
->args
.count
)
457 SetPageUptodate(*pages
);
461 * This is the callback from RPC telling us whether a reply was
462 * received or some error occurred (timeout or socket shutdown).
464 static void nfs_readpage_result_full(struct rpc_task
*task
, void *calldata
)
466 struct nfs_read_data
*data
= calldata
;
468 if (nfs_readpage_result(task
, data
) != 0)
470 if (task
->tk_status
< 0)
473 * Note: nfs_readpage_retry may change the values of
474 * data->args. In the multi-page case, we therefore need
475 * to ensure that we call nfs_readpage_set_pages_uptodate()
478 nfs_readpage_truncate_uninitialised_page(data
);
479 nfs_readpage_set_pages_uptodate(data
);
480 nfs_readpage_retry(task
, data
);
483 static void nfs_readpage_release_full(void *calldata
)
485 struct nfs_read_data
*data
= calldata
;
487 while (!list_empty(&data
->pages
)) {
488 struct nfs_page
*req
= nfs_list_entry(data
->pages
.next
);
490 nfs_list_remove_request(req
);
491 nfs_readpage_release(req
);
493 nfs_readdata_release(calldata
);
496 static const struct rpc_call_ops nfs_read_full_ops
= {
497 #if defined(CONFIG_NFS_V4_1)
498 .rpc_call_prepare
= nfs_read_prepare
,
499 #endif /* CONFIG_NFS_V4_1 */
500 .rpc_call_done
= nfs_readpage_result_full
,
501 .rpc_release
= nfs_readpage_release_full
,
505 * Read a page over NFS.
506 * We read the page synchronously in the following case:
507 * - The error flag is set for this page. This happens only when a
508 * previous async read operation failed.
510 int nfs_readpage(struct file
*file
, struct page
*page
)
512 struct nfs_open_context
*ctx
;
513 struct inode
*inode
= page
->mapping
->host
;
516 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
517 page
, PAGE_CACHE_SIZE
, page
->index
);
518 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGE
);
519 nfs_add_stats(inode
, NFSIOS_READPAGES
, 1);
522 * Try to flush any pending writes to the file..
524 * NOTE! Because we own the page lock, there cannot
525 * be any new pending writes generated at this point
526 * for this page (other pages can be written to).
528 error
= nfs_wb_page(inode
, page
);
531 if (PageUptodate(page
))
535 if (NFS_STALE(inode
))
540 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
544 ctx
= get_nfs_open_context(nfs_file_open_context(file
));
546 if (!IS_SYNC(inode
)) {
547 error
= nfs_readpage_from_fscache(ctx
, inode
, page
);
552 error
= nfs_readpage_async(ctx
, inode
, page
);
555 put_nfs_open_context(ctx
);
562 struct nfs_readdesc
{
563 struct nfs_pageio_descriptor
*pgio
;
564 struct nfs_open_context
*ctx
;
568 readpage_async_filler(void *data
, struct page
*page
)
570 struct nfs_readdesc
*desc
= (struct nfs_readdesc
*)data
;
571 struct inode
*inode
= page
->mapping
->host
;
572 struct nfs_page
*new;
576 len
= nfs_page_length(page
);
578 return nfs_return_empty_page(page
);
580 new = nfs_create_request(desc
->ctx
, inode
, page
, 0, len
);
584 if (len
< PAGE_CACHE_SIZE
)
585 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
586 if (!nfs_pageio_add_request(desc
->pgio
, new)) {
587 error
= desc
->pgio
->pg_error
;
592 error
= PTR_ERR(new);
599 int nfs_readpages(struct file
*filp
, struct address_space
*mapping
,
600 struct list_head
*pages
, unsigned nr_pages
)
602 struct nfs_pageio_descriptor pgio
;
603 struct nfs_readdesc desc
= {
606 struct inode
*inode
= mapping
->host
;
607 struct nfs_server
*server
= NFS_SERVER(inode
);
608 size_t rsize
= server
->rsize
;
609 unsigned long npages
;
612 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
614 (long long)NFS_FILEID(inode
),
616 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGES
);
618 if (NFS_STALE(inode
))
622 desc
.ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
623 if (desc
.ctx
== NULL
)
626 desc
.ctx
= get_nfs_open_context(nfs_file_open_context(filp
));
628 /* attempt to read as many of the pages as possible from the cache
629 * - this returns -ENOBUFS immediately if the cookie is negative
631 ret
= nfs_readpages_from_fscache(desc
.ctx
, inode
, mapping
,
634 goto read_complete
; /* all pages were read */
636 if (rsize
< PAGE_CACHE_SIZE
)
637 nfs_pageio_init(&pgio
, inode
, nfs_pagein_multi
, rsize
, 0);
639 nfs_pageio_init(&pgio
, inode
, nfs_pagein_one
, rsize
, 0);
641 ret
= read_cache_pages(mapping
, pages
, readpage_async_filler
, &desc
);
643 nfs_pageio_complete(&pgio
);
644 npages
= (pgio
.pg_bytes_written
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
645 nfs_add_stats(inode
, NFSIOS_READPAGES
, npages
);
647 put_nfs_open_context(desc
.ctx
);
652 int __init
nfs_init_readpagecache(void)
654 nfs_rdata_cachep
= kmem_cache_create("nfs_read_data",
655 sizeof(struct nfs_read_data
),
656 0, SLAB_HWCACHE_ALIGN
,
658 if (nfs_rdata_cachep
== NULL
)
661 nfs_rdata_mempool
= mempool_create_slab_pool(MIN_POOL_READ
,
663 if (nfs_rdata_mempool
== NULL
)
669 void nfs_destroy_readpagecache(void)
671 mempool_destroy(nfs_rdata_mempool
);
672 kmem_cache_destroy(nfs_rdata_cachep
);