6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
9 * We do an ugly hack here in order to return proper error codes to the
10 * user program when a read request failed: since generic_file_read
11 * only checks the return value of inode->i_op->readpage() which is always 0
12 * for async RPC, we set the error bit of the page to 1 when an error occurs,
13 * and make nfs_readpage transmit requests synchronously when encountering this.
14 * This is only a small problem, though, since we now retry all operations
15 * within the RPC code when root squashing is suspected.
18 #include <linux/time.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/fcntl.h>
22 #include <linux/stat.h>
24 #include <linux/slab.h>
25 #include <linux/pagemap.h>
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/nfs_fs.h>
28 #include <linux/nfs_page.h>
29 #include <linux/smp_lock.h>
31 #include <asm/system.h>
35 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
37 static int nfs_pagein_one(struct list_head
*, struct inode
*);
38 static const struct rpc_call_ops nfs_read_partial_ops
;
39 static const struct rpc_call_ops nfs_read_full_ops
;
41 static kmem_cache_t
*nfs_rdata_cachep
;
42 static mempool_t
*nfs_rdata_mempool
;
44 #define MIN_POOL_READ (32)
46 struct nfs_read_data
*nfs_readdata_alloc(unsigned int pagecount
)
48 struct nfs_read_data
*p
= mempool_alloc(nfs_rdata_mempool
, SLAB_NOFS
);
51 memset(p
, 0, sizeof(*p
));
52 INIT_LIST_HEAD(&p
->pages
);
53 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
54 p
->pagevec
= p
->page_array
;
56 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
58 mempool_free(p
, nfs_rdata_mempool
);
66 static void nfs_readdata_free(struct nfs_read_data
*p
)
68 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
70 mempool_free(p
, nfs_rdata_mempool
);
73 void nfs_readdata_release(void *data
)
75 nfs_readdata_free(data
);
79 unsigned int nfs_page_length(struct inode
*inode
, struct page
*page
)
81 loff_t i_size
= i_size_read(inode
);
86 idx
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
87 if (page
->index
> idx
)
89 if (page
->index
!= idx
)
90 return PAGE_CACHE_SIZE
;
91 return 1 + ((i_size
- 1) & (PAGE_CACHE_SIZE
- 1));
95 int nfs_return_empty_page(struct page
*page
)
97 memclear_highpage_flush(page
, 0, PAGE_CACHE_SIZE
);
98 SetPageUptodate(page
);
103 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data
*data
)
105 unsigned int remainder
= data
->args
.count
- data
->res
.count
;
106 unsigned int base
= data
->args
.pgbase
+ data
->res
.count
;
110 if (data
->res
.eof
== 0 || remainder
== 0)
113 * Note: "remainder" can never be negative, since we check for
114 * this in the XDR code.
116 pages
= &data
->args
.pages
[base
>> PAGE_CACHE_SHIFT
];
117 base
&= ~PAGE_CACHE_MASK
;
118 pglen
= PAGE_CACHE_SIZE
- base
;
120 if (remainder
<= pglen
) {
121 memclear_highpage_flush(*pages
, base
, remainder
);
124 memclear_highpage_flush(*pages
, base
, pglen
);
127 pglen
= PAGE_CACHE_SIZE
;
133 * Read a page synchronously.
135 static int nfs_readpage_sync(struct nfs_open_context
*ctx
, struct inode
*inode
,
138 unsigned int rsize
= NFS_SERVER(inode
)->rsize
;
139 unsigned int count
= PAGE_CACHE_SIZE
;
141 struct nfs_read_data
*rdata
;
143 rdata
= nfs_readdata_alloc(1);
147 memset(rdata
, 0, sizeof(*rdata
));
148 rdata
->flags
= (IS_SWAPFILE(inode
)? NFS_RPC_SWAPFLAGS
: 0);
149 rdata
->cred
= ctx
->cred
;
150 rdata
->inode
= inode
;
151 INIT_LIST_HEAD(&rdata
->pages
);
152 rdata
->args
.fh
= NFS_FH(inode
);
153 rdata
->args
.context
= ctx
;
154 rdata
->args
.pages
= &page
;
155 rdata
->args
.pgbase
= 0UL;
156 rdata
->args
.count
= rsize
;
157 rdata
->res
.fattr
= &rdata
->fattr
;
159 dprintk("NFS: nfs_readpage_sync(%p)\n", page
);
162 * This works now because the socket layer never tries to DMA
163 * into this buffer directly.
167 rdata
->args
.count
= count
;
168 rdata
->res
.count
= rdata
->args
.count
;
169 rdata
->args
.offset
= page_offset(page
) + rdata
->args
.pgbase
;
171 dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
172 NFS_SERVER(inode
)->hostname
,
174 (long long)NFS_FILEID(inode
),
175 (unsigned long long)rdata
->args
.pgbase
,
179 result
= NFS_PROTO(inode
)->read(rdata
);
183 * Even if we had a partial success we can't mark the page
187 if (result
== -EISDIR
)
192 rdata
->args
.pgbase
+= result
;
193 nfs_add_stats(inode
, NFSIOS_SERVERREADBYTES
, result
);
195 /* Note: result == 0 should only happen if we're caching
196 * a write that extends the file and punches a hole.
198 if (rdata
->res
.eof
!= 0 || result
== 0)
201 spin_lock(&inode
->i_lock
);
202 NFS_I(inode
)->cache_validity
|= NFS_INO_INVALID_ATIME
;
203 spin_unlock(&inode
->i_lock
);
205 nfs_readpage_truncate_uninitialised_page(rdata
);
206 if (rdata
->res
.eof
|| rdata
->res
.count
== rdata
->args
.count
)
207 SetPageUptodate(page
);
212 nfs_readdata_free(rdata
);
216 static int nfs_readpage_async(struct nfs_open_context
*ctx
, struct inode
*inode
,
219 LIST_HEAD(one_request
);
220 struct nfs_page
*new;
223 len
= nfs_page_length(inode
, page
);
225 return nfs_return_empty_page(page
);
226 new = nfs_create_request(ctx
, inode
, page
, 0, len
);
231 if (len
< PAGE_CACHE_SIZE
)
232 memclear_highpage_flush(page
, len
, PAGE_CACHE_SIZE
- len
);
234 nfs_list_add_request(new, &one_request
);
235 nfs_pagein_one(&one_request
, inode
);
239 static void nfs_readpage_release(struct nfs_page
*req
)
241 unlock_page(req
->wb_page
);
243 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
244 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
245 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
247 (long long)req_offset(req
));
248 nfs_clear_request(req
);
249 nfs_release_request(req
);
253 * Set up the NFS read request struct
255 static void nfs_read_rpcsetup(struct nfs_page
*req
, struct nfs_read_data
*data
,
256 const struct rpc_call_ops
*call_ops
,
257 unsigned int count
, unsigned int offset
)
263 data
->inode
= inode
= req
->wb_context
->dentry
->d_inode
;
264 data
->cred
= req
->wb_context
->cred
;
266 data
->args
.fh
= NFS_FH(inode
);
267 data
->args
.offset
= req_offset(req
) + offset
;
268 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
269 data
->args
.pages
= data
->pagevec
;
270 data
->args
.count
= count
;
271 data
->args
.context
= req
->wb_context
;
273 data
->res
.fattr
= &data
->fattr
;
274 data
->res
.count
= count
;
276 nfs_fattr_init(&data
->fattr
);
278 /* Set up the initial task struct. */
279 flags
= RPC_TASK_ASYNC
| (IS_SWAPFILE(inode
)? NFS_RPC_SWAPFLAGS
: 0);
280 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, call_ops
, data
);
281 NFS_PROTO(inode
)->read_setup(data
);
283 data
->task
.tk_cookie
= (unsigned long)inode
;
285 dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
288 (long long)NFS_FILEID(inode
),
290 (unsigned long long)data
->args
.offset
);
294 nfs_async_read_error(struct list_head
*head
)
296 struct nfs_page
*req
;
298 while (!list_empty(head
)) {
299 req
= nfs_list_entry(head
->next
);
300 nfs_list_remove_request(req
);
301 SetPageError(req
->wb_page
);
302 nfs_readpage_release(req
);
307 * Start an async read operation
309 static void nfs_execute_read(struct nfs_read_data
*data
)
311 struct rpc_clnt
*clnt
= NFS_CLIENT(data
->inode
);
314 rpc_clnt_sigmask(clnt
, &oldset
);
316 rpc_execute(&data
->task
);
318 rpc_clnt_sigunmask(clnt
, &oldset
);
322 * Generate multiple requests to fill a single page.
324 * We optimize to reduce the number of read operations on the wire. If we
325 * detect that we're reading a page, or an area of a page, that is past the
326 * end of file, we do not generate NFS read operations but just clear the
327 * parts of the page that would have come back zero from the server anyway.
329 * We rely on the cached value of i_size to make this determination; another
330 * client can fill pages on the server past our cached end-of-file, but we
331 * won't see the new data until our attribute cache is updated. This is more
332 * or less conventional NFS client behavior.
334 static int nfs_pagein_multi(struct list_head
*head
, struct inode
*inode
)
336 struct nfs_page
*req
= nfs_list_entry(head
->next
);
337 struct page
*page
= req
->wb_page
;
338 struct nfs_read_data
*data
;
339 unsigned int rsize
= NFS_SERVER(inode
)->rsize
;
340 unsigned int nbytes
, offset
;
344 nfs_list_remove_request(req
);
346 nbytes
= req
->wb_bytes
;
348 data
= nfs_readdata_alloc(1);
351 INIT_LIST_HEAD(&data
->pages
);
352 list_add(&data
->pages
, &list
);
358 atomic_set(&req
->wb_complete
, requests
);
360 ClearPageError(page
);
362 nbytes
= req
->wb_bytes
;
364 data
= list_entry(list
.next
, struct nfs_read_data
, pages
);
365 list_del_init(&data
->pages
);
367 data
->pagevec
[0] = page
;
369 if (nbytes
> rsize
) {
370 nfs_read_rpcsetup(req
, data
, &nfs_read_partial_ops
,
375 nfs_read_rpcsetup(req
, data
, &nfs_read_partial_ops
,
379 nfs_execute_read(data
);
380 } while (nbytes
!= 0);
385 while (!list_empty(&list
)) {
386 data
= list_entry(list
.next
, struct nfs_read_data
, pages
);
387 list_del(&data
->pages
);
388 nfs_readdata_free(data
);
391 nfs_readpage_release(req
);
395 static int nfs_pagein_one(struct list_head
*head
, struct inode
*inode
)
397 struct nfs_page
*req
;
399 struct nfs_read_data
*data
;
402 if (NFS_SERVER(inode
)->rsize
< PAGE_CACHE_SIZE
)
403 return nfs_pagein_multi(head
, inode
);
405 data
= nfs_readdata_alloc(NFS_SERVER(inode
)->rpages
);
409 INIT_LIST_HEAD(&data
->pages
);
410 pages
= data
->pagevec
;
412 while (!list_empty(head
)) {
413 req
= nfs_list_entry(head
->next
);
414 nfs_list_remove_request(req
);
415 nfs_list_add_request(req
, &data
->pages
);
416 ClearPageError(req
->wb_page
);
417 *pages
++ = req
->wb_page
;
418 count
+= req
->wb_bytes
;
420 req
= nfs_list_entry(data
->pages
.next
);
422 nfs_read_rpcsetup(req
, data
, &nfs_read_full_ops
, count
, 0);
424 nfs_execute_read(data
);
427 nfs_async_read_error(head
);
432 nfs_pagein_list(struct list_head
*head
, int rpages
)
434 LIST_HEAD(one_request
);
435 struct nfs_page
*req
;
437 unsigned int pages
= 0;
439 while (!list_empty(head
)) {
440 pages
+= nfs_coalesce_requests(head
, &one_request
, rpages
);
441 req
= nfs_list_entry(one_request
.next
);
442 error
= nfs_pagein_one(&one_request
, req
->wb_context
->dentry
->d_inode
);
449 nfs_async_read_error(head
);
454 * Handle a read reply that fills part of a page.
456 static void nfs_readpage_result_partial(struct rpc_task
*task
, void *calldata
)
458 struct nfs_read_data
*data
= calldata
;
459 struct nfs_page
*req
= data
->req
;
460 struct page
*page
= req
->wb_page
;
462 if (likely(task
->tk_status
>= 0))
463 nfs_readpage_truncate_uninitialised_page(data
);
466 if (nfs_readpage_result(task
, data
) != 0)
468 if (atomic_dec_and_test(&req
->wb_complete
)) {
469 if (!PageError(page
))
470 SetPageUptodate(page
);
471 nfs_readpage_release(req
);
475 static const struct rpc_call_ops nfs_read_partial_ops
= {
476 .rpc_call_done
= nfs_readpage_result_partial
,
477 .rpc_release
= nfs_readdata_release
,
480 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data
*data
)
482 unsigned int count
= data
->res
.count
;
483 unsigned int base
= data
->args
.pgbase
;
487 count
= data
->args
.count
;
488 if (unlikely(count
== 0))
490 pages
= &data
->args
.pages
[base
>> PAGE_CACHE_SHIFT
];
491 base
&= ~PAGE_CACHE_MASK
;
493 for (;count
>= PAGE_CACHE_SIZE
; count
-= PAGE_CACHE_SIZE
, pages
++)
494 SetPageUptodate(*pages
);
496 SetPageUptodate(*pages
);
499 static void nfs_readpage_set_pages_error(struct nfs_read_data
*data
)
501 unsigned int count
= data
->args
.count
;
502 unsigned int base
= data
->args
.pgbase
;
505 pages
= &data
->args
.pages
[base
>> PAGE_CACHE_SHIFT
];
506 base
&= ~PAGE_CACHE_MASK
;
508 for (;count
>= PAGE_CACHE_SIZE
; count
-= PAGE_CACHE_SIZE
, pages
++)
509 SetPageError(*pages
);
511 SetPageError(*pages
);
515 * This is the callback from RPC telling us whether a reply was
516 * received or some error occurred (timeout or socket shutdown).
518 static void nfs_readpage_result_full(struct rpc_task
*task
, void *calldata
)
520 struct nfs_read_data
*data
= calldata
;
523 * Note: nfs_readpage_result may change the values of
524 * data->args. In the multi-page case, we therefore need
525 * to ensure that we call the next nfs_readpage_set_page_uptodate()
526 * first in the multi-page case.
528 if (likely(task
->tk_status
>= 0)) {
529 nfs_readpage_truncate_uninitialised_page(data
);
530 nfs_readpage_set_pages_uptodate(data
);
532 nfs_readpage_set_pages_error(data
);
533 if (nfs_readpage_result(task
, data
) != 0)
535 while (!list_empty(&data
->pages
)) {
536 struct nfs_page
*req
= nfs_list_entry(data
->pages
.next
);
538 nfs_list_remove_request(req
);
539 nfs_readpage_release(req
);
543 static const struct rpc_call_ops nfs_read_full_ops
= {
544 .rpc_call_done
= nfs_readpage_result_full
,
545 .rpc_release
= nfs_readdata_release
,
549 * This is the callback from RPC telling us whether a reply was
550 * received or some error occurred (timeout or socket shutdown).
552 int nfs_readpage_result(struct rpc_task
*task
, struct nfs_read_data
*data
)
554 struct nfs_readargs
*argp
= &data
->args
;
555 struct nfs_readres
*resp
= &data
->res
;
558 dprintk("NFS: %4d nfs_readpage_result, (status %d)\n",
559 task
->tk_pid
, task
->tk_status
);
561 status
= NFS_PROTO(data
->inode
)->read_done(task
, data
);
565 nfs_add_stats(data
->inode
, NFSIOS_SERVERREADBYTES
, resp
->count
);
567 /* Is this a short read? */
568 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
&& !resp
->eof
) {
569 nfs_inc_stats(data
->inode
, NFSIOS_SHORTREAD
);
570 /* Has the server at least made some progress? */
571 if (resp
->count
!= 0) {
572 /* Yes, so retry the read at the end of the data */
573 argp
->offset
+= resp
->count
;
574 argp
->pgbase
+= resp
->count
;
575 argp
->count
-= resp
->count
;
576 rpc_restart_call(task
);
579 task
->tk_status
= -EIO
;
581 spin_lock(&data
->inode
->i_lock
);
582 NFS_I(data
->inode
)->cache_validity
|= NFS_INO_INVALID_ATIME
;
583 spin_unlock(&data
->inode
->i_lock
);
588 * Read a page over NFS.
589 * We read the page synchronously in the following case:
590 * - The error flag is set for this page. This happens only when a
591 * previous async read operation failed.
593 int nfs_readpage(struct file
*file
, struct page
*page
)
595 struct nfs_open_context
*ctx
;
596 struct inode
*inode
= page
->mapping
->host
;
599 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
600 page
, PAGE_CACHE_SIZE
, page
->index
);
601 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGE
);
602 nfs_add_stats(inode
, NFSIOS_READPAGES
, 1);
605 * Try to flush any pending writes to the file..
607 * NOTE! Because we own the page lock, there cannot
608 * be any new pending writes generated at this point
609 * for this page (other pages can be written to).
611 error
= nfs_wb_page(inode
, page
);
616 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
620 ctx
= get_nfs_open_context((struct nfs_open_context
*)
622 if (!IS_SYNC(inode
)) {
623 error
= nfs_readpage_async(ctx
, inode
, page
);
627 error
= nfs_readpage_sync(ctx
, inode
, page
);
628 if (error
< 0 && IS_SWAPFILE(inode
))
629 printk("Aiee.. nfs swap-in of page failed!\n");
631 put_nfs_open_context(ctx
);
639 struct nfs_readdesc
{
640 struct list_head
*head
;
641 struct nfs_open_context
*ctx
;
645 readpage_async_filler(void *data
, struct page
*page
)
647 struct nfs_readdesc
*desc
= (struct nfs_readdesc
*)data
;
648 struct inode
*inode
= page
->mapping
->host
;
649 struct nfs_page
*new;
652 nfs_wb_page(inode
, page
);
653 len
= nfs_page_length(inode
, page
);
655 return nfs_return_empty_page(page
);
656 new = nfs_create_request(desc
->ctx
, inode
, page
, 0, len
);
662 if (len
< PAGE_CACHE_SIZE
)
663 memclear_highpage_flush(page
, len
, PAGE_CACHE_SIZE
- len
);
664 nfs_list_add_request(new, desc
->head
);
668 int nfs_readpages(struct file
*filp
, struct address_space
*mapping
,
669 struct list_head
*pages
, unsigned nr_pages
)
672 struct nfs_readdesc desc
= {
675 struct inode
*inode
= mapping
->host
;
676 struct nfs_server
*server
= NFS_SERVER(inode
);
679 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
681 (long long)NFS_FILEID(inode
),
683 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGES
);
686 desc
.ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
687 if (desc
.ctx
== NULL
)
690 desc
.ctx
= get_nfs_open_context((struct nfs_open_context
*)
692 ret
= read_cache_pages(mapping
, pages
, readpage_async_filler
, &desc
);
693 if (!list_empty(&head
)) {
694 int err
= nfs_pagein_list(&head
, server
->rpages
);
696 nfs_add_stats(inode
, NFSIOS_READPAGES
, err
);
699 put_nfs_open_context(desc
.ctx
);
703 int __init
nfs_init_readpagecache(void)
705 nfs_rdata_cachep
= kmem_cache_create("nfs_read_data",
706 sizeof(struct nfs_read_data
),
707 0, SLAB_HWCACHE_ALIGN
,
709 if (nfs_rdata_cachep
== NULL
)
712 nfs_rdata_mempool
= mempool_create_slab_pool(MIN_POOL_READ
,
714 if (nfs_rdata_mempool
== NULL
)
720 void nfs_destroy_readpagecache(void)
722 mempool_destroy(nfs_rdata_mempool
);
723 if (kmem_cache_destroy(nfs_rdata_cachep
))
724 printk(KERN_INFO
"nfs_read_data: not all structures were freed\n");