6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
9 * We do an ugly hack here in order to return proper error codes to the
10 * user program when a read request failed: since generic_file_read
11 * only checks the return value of inode->i_op->readpage() which is always 0
12 * for async RPC, we set the error bit of the page to 1 when an error occurs,
13 * and make nfs_readpage transmit requests synchronously when encountering this.
14 * This is only a small problem, though, since we now retry all operations
15 * within the RPC code when root squashing is suspected.
18 #include <linux/config.h>
19 #include <linux/time.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/fcntl.h>
23 #include <linux/stat.h>
25 #include <linux/slab.h>
26 #include <linux/pagemap.h>
27 #include <linux/sunrpc/clnt.h>
28 #include <linux/nfs_fs.h>
29 #include <linux/nfs_page.h>
30 #include <linux/smp_lock.h>
32 #include <asm/system.h>
34 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
36 static int nfs_pagein_one(struct list_head
*, struct inode
*);
37 static void nfs_readpage_result_partial(struct nfs_read_data
*, int);
38 static void nfs_readpage_result_full(struct nfs_read_data
*, int);
40 static kmem_cache_t
*nfs_rdata_cachep
;
41 mempool_t
*nfs_rdata_mempool
;
43 #define MIN_POOL_READ (32)
45 void nfs_readdata_release(struct rpc_task
*task
)
47 struct nfs_read_data
*data
= (struct nfs_read_data
*)task
->tk_calldata
;
48 nfs_readdata_free(data
);
52 unsigned int nfs_page_length(struct inode
*inode
, struct page
*page
)
54 loff_t i_size
= i_size_read(inode
);
59 idx
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
60 if (page
->index
> idx
)
62 if (page
->index
!= idx
)
63 return PAGE_CACHE_SIZE
;
64 return 1 + ((i_size
- 1) & (PAGE_CACHE_SIZE
- 1));
68 int nfs_return_empty_page(struct page
*page
)
70 memclear_highpage_flush(page
, 0, PAGE_CACHE_SIZE
);
71 SetPageUptodate(page
);
77 * Read a page synchronously.
79 static int nfs_readpage_sync(struct nfs_open_context
*ctx
, struct inode
*inode
,
82 unsigned int rsize
= NFS_SERVER(inode
)->rsize
;
83 unsigned int count
= PAGE_CACHE_SIZE
;
85 struct nfs_read_data
*rdata
;
87 rdata
= nfs_readdata_alloc();
91 memset(rdata
, 0, sizeof(*rdata
));
92 rdata
->flags
= (IS_SWAPFILE(inode
)? NFS_RPC_SWAPFLAGS
: 0);
93 rdata
->cred
= ctx
->cred
;
95 INIT_LIST_HEAD(&rdata
->pages
);
96 rdata
->args
.fh
= NFS_FH(inode
);
97 rdata
->args
.context
= ctx
;
98 rdata
->args
.pages
= &page
;
99 rdata
->args
.pgbase
= 0UL;
100 rdata
->args
.count
= rsize
;
101 rdata
->res
.fattr
= &rdata
->fattr
;
103 dprintk("NFS: nfs_readpage_sync(%p)\n", page
);
106 * This works now because the socket layer never tries to DMA
107 * into this buffer directly.
111 rdata
->args
.count
= count
;
112 rdata
->res
.count
= rdata
->args
.count
;
113 rdata
->args
.offset
= page_offset(page
) + rdata
->args
.pgbase
;
115 dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
116 NFS_SERVER(inode
)->hostname
,
118 (long long)NFS_FILEID(inode
),
119 (unsigned long long)rdata
->args
.pgbase
,
123 result
= NFS_PROTO(inode
)->read(rdata
);
127 * Even if we had a partial success we can't mark the page
131 if (result
== -EISDIR
)
136 rdata
->args
.pgbase
+= result
;
137 /* Note: result == 0 should only happen if we're caching
138 * a write that extends the file and punches a hole.
140 if (rdata
->res
.eof
!= 0 || result
== 0)
143 spin_lock(&inode
->i_lock
);
144 NFS_I(inode
)->cache_validity
|= NFS_INO_INVALID_ATIME
;
145 spin_unlock(&inode
->i_lock
);
148 memclear_highpage_flush(page
, rdata
->args
.pgbase
, count
);
149 SetPageUptodate(page
);
151 ClearPageError(page
);
156 nfs_readdata_free(rdata
);
160 static int nfs_readpage_async(struct nfs_open_context
*ctx
, struct inode
*inode
,
163 LIST_HEAD(one_request
);
164 struct nfs_page
*new;
167 len
= nfs_page_length(inode
, page
);
169 return nfs_return_empty_page(page
);
170 new = nfs_create_request(ctx
, inode
, page
, 0, len
);
175 if (len
< PAGE_CACHE_SIZE
)
176 memclear_highpage_flush(page
, len
, PAGE_CACHE_SIZE
- len
);
178 nfs_list_add_request(new, &one_request
);
179 nfs_pagein_one(&one_request
, inode
);
183 static void nfs_readpage_release(struct nfs_page
*req
)
185 unlock_page(req
->wb_page
);
187 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
188 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
189 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
191 (long long)req_offset(req
));
192 nfs_clear_request(req
);
193 nfs_release_request(req
);
197 * Set up the NFS read request struct
199 static void nfs_read_rpcsetup(struct nfs_page
*req
, struct nfs_read_data
*data
,
200 unsigned int count
, unsigned int offset
)
205 data
->inode
= inode
= req
->wb_context
->dentry
->d_inode
;
206 data
->cred
= req
->wb_context
->cred
;
208 data
->args
.fh
= NFS_FH(inode
);
209 data
->args
.offset
= req_offset(req
) + offset
;
210 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
211 data
->args
.pages
= data
->pagevec
;
212 data
->args
.count
= count
;
213 data
->args
.context
= req
->wb_context
;
215 data
->res
.fattr
= &data
->fattr
;
216 data
->res
.count
= count
;
218 nfs_fattr_init(&data
->fattr
);
220 NFS_PROTO(inode
)->read_setup(data
);
222 data
->task
.tk_cookie
= (unsigned long)inode
;
223 data
->task
.tk_calldata
= data
;
224 /* Release requests */
225 data
->task
.tk_release
= nfs_readdata_release
;
227 dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
230 (long long)NFS_FILEID(inode
),
232 (unsigned long long)data
->args
.offset
);
236 nfs_async_read_error(struct list_head
*head
)
238 struct nfs_page
*req
;
240 while (!list_empty(head
)) {
241 req
= nfs_list_entry(head
->next
);
242 nfs_list_remove_request(req
);
243 SetPageError(req
->wb_page
);
244 nfs_readpage_release(req
);
249 * Start an async read operation
251 static void nfs_execute_read(struct nfs_read_data
*data
)
253 struct rpc_clnt
*clnt
= NFS_CLIENT(data
->inode
);
256 rpc_clnt_sigmask(clnt
, &oldset
);
258 rpc_execute(&data
->task
);
260 rpc_clnt_sigunmask(clnt
, &oldset
);
264 * Generate multiple requests to fill a single page.
266 * We optimize to reduce the number of read operations on the wire. If we
267 * detect that we're reading a page, or an area of a page, that is past the
268 * end of file, we do not generate NFS read operations but just clear the
269 * parts of the page that would have come back zero from the server anyway.
271 * We rely on the cached value of i_size to make this determination; another
272 * client can fill pages on the server past our cached end-of-file, but we
273 * won't see the new data until our attribute cache is updated. This is more
274 * or less conventional NFS client behavior.
276 static int nfs_pagein_multi(struct list_head
*head
, struct inode
*inode
)
278 struct nfs_page
*req
= nfs_list_entry(head
->next
);
279 struct page
*page
= req
->wb_page
;
280 struct nfs_read_data
*data
;
281 unsigned int rsize
= NFS_SERVER(inode
)->rsize
;
282 unsigned int nbytes
, offset
;
286 nfs_list_remove_request(req
);
288 nbytes
= req
->wb_bytes
;
290 data
= nfs_readdata_alloc();
293 INIT_LIST_HEAD(&data
->pages
);
294 list_add(&data
->pages
, &list
);
300 atomic_set(&req
->wb_complete
, requests
);
302 ClearPageError(page
);
304 nbytes
= req
->wb_bytes
;
306 data
= list_entry(list
.next
, struct nfs_read_data
, pages
);
307 list_del_init(&data
->pages
);
309 data
->pagevec
[0] = page
;
310 data
->complete
= nfs_readpage_result_partial
;
312 if (nbytes
> rsize
) {
313 nfs_read_rpcsetup(req
, data
, rsize
, offset
);
317 nfs_read_rpcsetup(req
, data
, nbytes
, offset
);
320 nfs_execute_read(data
);
321 } while (nbytes
!= 0);
326 while (!list_empty(&list
)) {
327 data
= list_entry(list
.next
, struct nfs_read_data
, pages
);
328 list_del(&data
->pages
);
329 nfs_readdata_free(data
);
332 nfs_readpage_release(req
);
336 static int nfs_pagein_one(struct list_head
*head
, struct inode
*inode
)
338 struct nfs_page
*req
;
340 struct nfs_read_data
*data
;
343 if (NFS_SERVER(inode
)->rsize
< PAGE_CACHE_SIZE
)
344 return nfs_pagein_multi(head
, inode
);
346 data
= nfs_readdata_alloc();
350 INIT_LIST_HEAD(&data
->pages
);
351 pages
= data
->pagevec
;
353 while (!list_empty(head
)) {
354 req
= nfs_list_entry(head
->next
);
355 nfs_list_remove_request(req
);
356 nfs_list_add_request(req
, &data
->pages
);
357 ClearPageError(req
->wb_page
);
358 *pages
++ = req
->wb_page
;
359 count
+= req
->wb_bytes
;
361 req
= nfs_list_entry(data
->pages
.next
);
363 data
->complete
= nfs_readpage_result_full
;
364 nfs_read_rpcsetup(req
, data
, count
, 0);
366 nfs_execute_read(data
);
369 nfs_async_read_error(head
);
374 nfs_pagein_list(struct list_head
*head
, int rpages
)
376 LIST_HEAD(one_request
);
377 struct nfs_page
*req
;
379 unsigned int pages
= 0;
381 while (!list_empty(head
)) {
382 pages
+= nfs_coalesce_requests(head
, &one_request
, rpages
);
383 req
= nfs_list_entry(one_request
.next
);
384 error
= nfs_pagein_one(&one_request
, req
->wb_context
->dentry
->d_inode
);
391 nfs_async_read_error(head
);
396 * Handle a read reply that fills part of a page.
398 static void nfs_readpage_result_partial(struct nfs_read_data
*data
, int status
)
400 struct nfs_page
*req
= data
->req
;
401 struct page
*page
= req
->wb_page
;
404 unsigned int request
= data
->args
.count
;
405 unsigned int result
= data
->res
.count
;
407 if (result
< request
) {
408 memclear_highpage_flush(page
,
409 data
->args
.pgbase
+ result
,
415 if (atomic_dec_and_test(&req
->wb_complete
)) {
416 if (!PageError(page
))
417 SetPageUptodate(page
);
418 nfs_readpage_release(req
);
423 * This is the callback from RPC telling us whether a reply was
424 * received or some error occurred (timeout or socket shutdown).
426 static void nfs_readpage_result_full(struct nfs_read_data
*data
, int status
)
428 unsigned int count
= data
->res
.count
;
430 while (!list_empty(&data
->pages
)) {
431 struct nfs_page
*req
= nfs_list_entry(data
->pages
.next
);
432 struct page
*page
= req
->wb_page
;
433 nfs_list_remove_request(req
);
436 if (count
< PAGE_CACHE_SIZE
) {
437 if (count
< req
->wb_bytes
)
438 memclear_highpage_flush(page
,
439 req
->wb_pgbase
+ count
,
440 req
->wb_bytes
- count
);
443 count
-= PAGE_CACHE_SIZE
;
444 SetPageUptodate(page
);
447 nfs_readpage_release(req
);
452 * This is the callback from RPC telling us whether a reply was
453 * received or some error occurred (timeout or socket shutdown).
455 void nfs_readpage_result(struct rpc_task
*task
)
457 struct nfs_read_data
*data
= (struct nfs_read_data
*)task
->tk_calldata
;
458 struct nfs_readargs
*argp
= &data
->args
;
459 struct nfs_readres
*resp
= &data
->res
;
460 int status
= task
->tk_status
;
462 dprintk("NFS: %4d nfs_readpage_result, (status %d)\n",
463 task
->tk_pid
, status
);
465 /* Is this a short read? */
466 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
&& !resp
->eof
) {
467 /* Has the server at least made some progress? */
468 if (resp
->count
!= 0) {
469 /* Yes, so retry the read at the end of the data */
470 argp
->offset
+= resp
->count
;
471 argp
->pgbase
+= resp
->count
;
472 argp
->count
-= resp
->count
;
473 rpc_restart_call(task
);
476 task
->tk_status
= -EIO
;
478 spin_lock(&data
->inode
->i_lock
);
479 NFS_I(data
->inode
)->cache_validity
|= NFS_INO_INVALID_ATIME
;
480 spin_unlock(&data
->inode
->i_lock
);
481 data
->complete(data
, status
);
485 * Read a page over NFS.
486 * We read the page synchronously in the following case:
487 * - The error flag is set for this page. This happens only when a
488 * previous async read operation failed.
490 int nfs_readpage(struct file
*file
, struct page
*page
)
492 struct nfs_open_context
*ctx
;
493 struct inode
*inode
= page
->mapping
->host
;
496 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
497 page
, PAGE_CACHE_SIZE
, page
->index
);
499 * Try to flush any pending writes to the file..
501 * NOTE! Because we own the page lock, there cannot
502 * be any new pending writes generated at this point
503 * for this page (other pages can be written to).
505 error
= nfs_wb_page(inode
, page
);
510 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
514 ctx
= get_nfs_open_context((struct nfs_open_context
*)
516 if (!IS_SYNC(inode
)) {
517 error
= nfs_readpage_async(ctx
, inode
, page
);
521 error
= nfs_readpage_sync(ctx
, inode
, page
);
522 if (error
< 0 && IS_SWAPFILE(inode
))
523 printk("Aiee.. nfs swap-in of page failed!\n");
525 put_nfs_open_context(ctx
);
533 struct nfs_readdesc
{
534 struct list_head
*head
;
535 struct nfs_open_context
*ctx
;
539 readpage_async_filler(void *data
, struct page
*page
)
541 struct nfs_readdesc
*desc
= (struct nfs_readdesc
*)data
;
542 struct inode
*inode
= page
->mapping
->host
;
543 struct nfs_page
*new;
546 nfs_wb_page(inode
, page
);
547 len
= nfs_page_length(inode
, page
);
549 return nfs_return_empty_page(page
);
550 new = nfs_create_request(desc
->ctx
, inode
, page
, 0, len
);
556 if (len
< PAGE_CACHE_SIZE
)
557 memclear_highpage_flush(page
, len
, PAGE_CACHE_SIZE
- len
);
558 nfs_list_add_request(new, desc
->head
);
562 int nfs_readpages(struct file
*filp
, struct address_space
*mapping
,
563 struct list_head
*pages
, unsigned nr_pages
)
566 struct nfs_readdesc desc
= {
569 struct inode
*inode
= mapping
->host
;
570 struct nfs_server
*server
= NFS_SERVER(inode
);
573 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
575 (long long)NFS_FILEID(inode
),
579 desc
.ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
580 if (desc
.ctx
== NULL
)
583 desc
.ctx
= get_nfs_open_context((struct nfs_open_context
*)
585 ret
= read_cache_pages(mapping
, pages
, readpage_async_filler
, &desc
);
586 if (!list_empty(&head
)) {
587 int err
= nfs_pagein_list(&head
, server
->rpages
);
591 put_nfs_open_context(desc
.ctx
);
595 int nfs_init_readpagecache(void)
597 nfs_rdata_cachep
= kmem_cache_create("nfs_read_data",
598 sizeof(struct nfs_read_data
),
599 0, SLAB_HWCACHE_ALIGN
,
601 if (nfs_rdata_cachep
== NULL
)
604 nfs_rdata_mempool
= mempool_create(MIN_POOL_READ
,
608 if (nfs_rdata_mempool
== NULL
)
614 void nfs_destroy_readpagecache(void)
616 mempool_destroy(nfs_rdata_mempool
);
617 if (kmem_cache_destroy(nfs_rdata_cachep
))
618 printk(KERN_INFO
"nfs_read_data: not all structures were freed\n");