x86, UV: Delete mapping of MMR rangs mapped by BIOS
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nfs / read.c
blob73ea5e8d66ceebbfab4079c4ab91b62809093ed7
1 /*
2 * linux/fs/nfs/read.c
4 * Block I/O for NFS
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
22 #include <asm/system.h>
24 #include "nfs4_fs.h"
25 #include "internal.h"
26 #include "iostat.h"
27 #include "fscache.h"
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
32 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
33 static const struct rpc_call_ops nfs_read_partial_ops;
34 static const struct rpc_call_ops nfs_read_full_ops;
36 static struct kmem_cache *nfs_rdata_cachep;
37 static mempool_t *nfs_rdata_mempool;
39 #define MIN_POOL_READ (32)
41 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
43 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
45 if (p) {
46 memset(p, 0, sizeof(*p));
47 INIT_LIST_HEAD(&p->pages);
48 p->npages = pagecount;
49 p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
50 if (pagecount <= ARRAY_SIZE(p->page_array))
51 p->pagevec = p->page_array;
52 else {
53 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
54 if (!p->pagevec) {
55 mempool_free(p, nfs_rdata_mempool);
56 p = NULL;
60 return p;
63 static void nfs_readdata_free(struct nfs_read_data *p)
65 if (p && (p->pagevec != &p->page_array[0]))
66 kfree(p->pagevec);
67 mempool_free(p, nfs_rdata_mempool);
70 void nfs_readdata_release(void *data)
72 struct nfs_read_data *rdata = data;
74 put_nfs_open_context(rdata->args.context);
75 nfs_readdata_free(rdata);
78 static
79 int nfs_return_empty_page(struct page *page)
81 zero_user(page, 0, PAGE_CACHE_SIZE);
82 SetPageUptodate(page);
83 unlock_page(page);
84 return 0;
87 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
89 unsigned int remainder = data->args.count - data->res.count;
90 unsigned int base = data->args.pgbase + data->res.count;
91 unsigned int pglen;
92 struct page **pages;
94 if (data->res.eof == 0 || remainder == 0)
95 return;
97 * Note: "remainder" can never be negative, since we check for
98 * this in the XDR code.
100 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
101 base &= ~PAGE_CACHE_MASK;
102 pglen = PAGE_CACHE_SIZE - base;
103 for (;;) {
104 if (remainder <= pglen) {
105 zero_user(*pages, base, remainder);
106 break;
108 zero_user(*pages, base, pglen);
109 pages++;
110 remainder -= pglen;
111 pglen = PAGE_CACHE_SIZE;
112 base = 0;
116 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
117 struct page *page)
119 LIST_HEAD(one_request);
120 struct nfs_page *new;
121 unsigned int len;
123 len = nfs_page_length(page);
124 if (len == 0)
125 return nfs_return_empty_page(page);
126 new = nfs_create_request(ctx, inode, page, 0, len);
127 if (IS_ERR(new)) {
128 unlock_page(page);
129 return PTR_ERR(new);
131 if (len < PAGE_CACHE_SIZE)
132 zero_user_segment(page, len, PAGE_CACHE_SIZE);
134 nfs_list_add_request(new, &one_request);
135 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
136 nfs_pagein_multi(inode, &one_request, 1, len, 0);
137 else
138 nfs_pagein_one(inode, &one_request, 1, len, 0);
139 return 0;
142 static void nfs_readpage_release(struct nfs_page *req)
144 struct inode *d_inode = req->wb_context->path.dentry->d_inode;
146 if (PageUptodate(req->wb_page))
147 nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
149 unlock_page(req->wb_page);
151 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
152 req->wb_context->path.dentry->d_inode->i_sb->s_id,
153 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
154 req->wb_bytes,
155 (long long)req_offset(req));
156 nfs_clear_request(req);
157 nfs_release_request(req);
161 * Set up the NFS read request struct
163 static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
164 const struct rpc_call_ops *call_ops,
165 unsigned int count, unsigned int offset)
167 struct inode *inode = req->wb_context->path.dentry->d_inode;
168 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
169 struct rpc_task *task;
170 struct rpc_message msg = {
171 .rpc_argp = &data->args,
172 .rpc_resp = &data->res,
173 .rpc_cred = req->wb_context->cred,
175 struct rpc_task_setup task_setup_data = {
176 .task = &data->task,
177 .rpc_client = NFS_CLIENT(inode),
178 .rpc_message = &msg,
179 .callback_ops = call_ops,
180 .callback_data = data,
181 .workqueue = nfsiod_workqueue,
182 .flags = RPC_TASK_ASYNC | swap_flags,
185 data->req = req;
186 data->inode = inode;
187 data->cred = msg.rpc_cred;
189 data->args.fh = NFS_FH(inode);
190 data->args.offset = req_offset(req) + offset;
191 data->args.pgbase = req->wb_pgbase + offset;
192 data->args.pages = data->pagevec;
193 data->args.count = count;
194 data->args.context = get_nfs_open_context(req->wb_context);
196 data->res.fattr = &data->fattr;
197 data->res.count = count;
198 data->res.eof = 0;
199 nfs_fattr_init(&data->fattr);
201 /* Set up the initial task struct. */
202 NFS_PROTO(inode)->read_setup(data, &msg);
204 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
205 data->task.tk_pid,
206 inode->i_sb->s_id,
207 (long long)NFS_FILEID(inode),
208 count,
209 (unsigned long long)data->args.offset);
211 task = rpc_run_task(&task_setup_data);
212 if (IS_ERR(task))
213 return PTR_ERR(task);
214 rpc_put_task(task);
215 return 0;
218 static void
219 nfs_async_read_error(struct list_head *head)
221 struct nfs_page *req;
223 while (!list_empty(head)) {
224 req = nfs_list_entry(head->next);
225 nfs_list_remove_request(req);
226 SetPageError(req->wb_page);
227 nfs_readpage_release(req);
232 * Generate multiple requests to fill a single page.
234 * We optimize to reduce the number of read operations on the wire. If we
235 * detect that we're reading a page, or an area of a page, that is past the
236 * end of file, we do not generate NFS read operations but just clear the
237 * parts of the page that would have come back zero from the server anyway.
239 * We rely on the cached value of i_size to make this determination; another
240 * client can fill pages on the server past our cached end-of-file, but we
241 * won't see the new data until our attribute cache is updated. This is more
242 * or less conventional NFS client behavior.
244 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
246 struct nfs_page *req = nfs_list_entry(head->next);
247 struct page *page = req->wb_page;
248 struct nfs_read_data *data;
249 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
250 unsigned int offset;
251 int requests = 0;
252 int ret = 0;
253 LIST_HEAD(list);
255 nfs_list_remove_request(req);
257 nbytes = count;
258 do {
259 size_t len = min(nbytes,rsize);
261 data = nfs_readdata_alloc(1);
262 if (!data)
263 goto out_bad;
264 list_add(&data->pages, &list);
265 requests++;
266 nbytes -= len;
267 } while(nbytes != 0);
268 atomic_set(&req->wb_complete, requests);
270 ClearPageError(page);
271 offset = 0;
272 nbytes = count;
273 do {
274 int ret2;
276 data = list_entry(list.next, struct nfs_read_data, pages);
277 list_del_init(&data->pages);
279 data->pagevec[0] = page;
281 if (nbytes < rsize)
282 rsize = nbytes;
283 ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
284 rsize, offset);
285 if (ret == 0)
286 ret = ret2;
287 offset += rsize;
288 nbytes -= rsize;
289 } while (nbytes != 0);
291 return ret;
293 out_bad:
294 while (!list_empty(&list)) {
295 data = list_entry(list.next, struct nfs_read_data, pages);
296 list_del(&data->pages);
297 nfs_readdata_free(data);
299 SetPageError(page);
300 nfs_readpage_release(req);
301 return -ENOMEM;
304 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
306 struct nfs_page *req;
307 struct page **pages;
308 struct nfs_read_data *data;
309 int ret = -ENOMEM;
311 data = nfs_readdata_alloc(npages);
312 if (!data)
313 goto out_bad;
315 pages = data->pagevec;
316 while (!list_empty(head)) {
317 req = nfs_list_entry(head->next);
318 nfs_list_remove_request(req);
319 nfs_list_add_request(req, &data->pages);
320 ClearPageError(req->wb_page);
321 *pages++ = req->wb_page;
323 req = nfs_list_entry(data->pages.next);
325 return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
326 out_bad:
327 nfs_async_read_error(head);
328 return ret;
332 * This is the callback from RPC telling us whether a reply was
333 * received or some error occurred (timeout or socket shutdown).
335 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
337 int status;
339 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
340 task->tk_status);
342 status = NFS_PROTO(data->inode)->read_done(task, data);
343 if (status != 0)
344 return status;
346 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
348 if (task->tk_status == -ESTALE) {
349 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
350 nfs_mark_for_revalidate(data->inode);
352 return 0;
355 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
357 struct nfs_readargs *argp = &data->args;
358 struct nfs_readres *resp = &data->res;
360 if (resp->eof || resp->count == argp->count)
361 goto out;
363 /* This is a short read! */
364 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
365 /* Has the server at least made some progress? */
366 if (resp->count == 0)
367 goto out;
369 /* Yes, so retry the read at the end of the data */
370 argp->offset += resp->count;
371 argp->pgbase += resp->count;
372 argp->count -= resp->count;
373 nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
374 return;
375 out:
376 nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
377 &data->res.seq_res);
378 return;
383 * Handle a read reply that fills part of a page.
385 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
387 struct nfs_read_data *data = calldata;
389 if (nfs_readpage_result(task, data) != 0)
390 return;
391 if (task->tk_status < 0)
392 return;
394 nfs_readpage_truncate_uninitialised_page(data);
395 nfs_readpage_retry(task, data);
398 static void nfs_readpage_release_partial(void *calldata)
400 struct nfs_read_data *data = calldata;
401 struct nfs_page *req = data->req;
402 struct page *page = req->wb_page;
403 int status = data->task.tk_status;
405 if (status < 0)
406 SetPageError(page);
408 if (atomic_dec_and_test(&req->wb_complete)) {
409 if (!PageError(page))
410 SetPageUptodate(page);
411 nfs_readpage_release(req);
413 nfs_readdata_release(calldata);
416 #if defined(CONFIG_NFS_V4_1)
417 void nfs_read_prepare(struct rpc_task *task, void *calldata)
419 struct nfs_read_data *data = calldata;
421 if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
422 &data->args.seq_args, &data->res.seq_res,
423 0, task))
424 return;
425 rpc_call_start(task);
427 #endif /* CONFIG_NFS_V4_1 */
429 static const struct rpc_call_ops nfs_read_partial_ops = {
430 #if defined(CONFIG_NFS_V4_1)
431 .rpc_call_prepare = nfs_read_prepare,
432 #endif /* CONFIG_NFS_V4_1 */
433 .rpc_call_done = nfs_readpage_result_partial,
434 .rpc_release = nfs_readpage_release_partial,
437 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
439 unsigned int count = data->res.count;
440 unsigned int base = data->args.pgbase;
441 struct page **pages;
443 if (data->res.eof)
444 count = data->args.count;
445 if (unlikely(count == 0))
446 return;
447 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
448 base &= ~PAGE_CACHE_MASK;
449 count += base;
450 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
451 SetPageUptodate(*pages);
452 if (count == 0)
453 return;
454 /* Was this a short read? */
455 if (data->res.eof || data->res.count == data->args.count)
456 SetPageUptodate(*pages);
460 * This is the callback from RPC telling us whether a reply was
461 * received or some error occurred (timeout or socket shutdown).
463 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
465 struct nfs_read_data *data = calldata;
467 if (nfs_readpage_result(task, data) != 0)
468 return;
469 if (task->tk_status < 0)
470 return;
472 * Note: nfs_readpage_retry may change the values of
473 * data->args. In the multi-page case, we therefore need
474 * to ensure that we call nfs_readpage_set_pages_uptodate()
475 * first.
477 nfs_readpage_truncate_uninitialised_page(data);
478 nfs_readpage_set_pages_uptodate(data);
479 nfs_readpage_retry(task, data);
482 static void nfs_readpage_release_full(void *calldata)
484 struct nfs_read_data *data = calldata;
486 while (!list_empty(&data->pages)) {
487 struct nfs_page *req = nfs_list_entry(data->pages.next);
489 nfs_list_remove_request(req);
490 nfs_readpage_release(req);
492 nfs_readdata_release(calldata);
495 static const struct rpc_call_ops nfs_read_full_ops = {
496 #if defined(CONFIG_NFS_V4_1)
497 .rpc_call_prepare = nfs_read_prepare,
498 #endif /* CONFIG_NFS_V4_1 */
499 .rpc_call_done = nfs_readpage_result_full,
500 .rpc_release = nfs_readpage_release_full,
504 * Read a page over NFS.
505 * We read the page synchronously in the following case:
506 * - The error flag is set for this page. This happens only when a
507 * previous async read operation failed.
509 int nfs_readpage(struct file *file, struct page *page)
511 struct nfs_open_context *ctx;
512 struct inode *inode = page->mapping->host;
513 int error;
515 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
516 page, PAGE_CACHE_SIZE, page->index);
517 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
518 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
521 * Try to flush any pending writes to the file..
523 * NOTE! Because we own the page lock, there cannot
524 * be any new pending writes generated at this point
525 * for this page (other pages can be written to).
527 error = nfs_wb_page(inode, page);
528 if (error)
529 goto out_unlock;
530 if (PageUptodate(page))
531 goto out_unlock;
533 error = -ESTALE;
534 if (NFS_STALE(inode))
535 goto out_unlock;
537 if (file == NULL) {
538 error = -EBADF;
539 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
540 if (ctx == NULL)
541 goto out_unlock;
542 } else
543 ctx = get_nfs_open_context(nfs_file_open_context(file));
545 if (!IS_SYNC(inode)) {
546 error = nfs_readpage_from_fscache(ctx, inode, page);
547 if (error == 0)
548 goto out;
551 error = nfs_readpage_async(ctx, inode, page);
553 out:
554 put_nfs_open_context(ctx);
555 return error;
556 out_unlock:
557 unlock_page(page);
558 return error;
561 struct nfs_readdesc {
562 struct nfs_pageio_descriptor *pgio;
563 struct nfs_open_context *ctx;
566 static int
567 readpage_async_filler(void *data, struct page *page)
569 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
570 struct inode *inode = page->mapping->host;
571 struct nfs_page *new;
572 unsigned int len;
573 int error;
575 len = nfs_page_length(page);
576 if (len == 0)
577 return nfs_return_empty_page(page);
579 new = nfs_create_request(desc->ctx, inode, page, 0, len);
580 if (IS_ERR(new))
581 goto out_error;
583 if (len < PAGE_CACHE_SIZE)
584 zero_user_segment(page, len, PAGE_CACHE_SIZE);
585 if (!nfs_pageio_add_request(desc->pgio, new)) {
586 error = desc->pgio->pg_error;
587 goto out_unlock;
589 return 0;
590 out_error:
591 error = PTR_ERR(new);
592 SetPageError(page);
593 out_unlock:
594 unlock_page(page);
595 return error;
598 int nfs_readpages(struct file *filp, struct address_space *mapping,
599 struct list_head *pages, unsigned nr_pages)
601 struct nfs_pageio_descriptor pgio;
602 struct nfs_readdesc desc = {
603 .pgio = &pgio,
605 struct inode *inode = mapping->host;
606 struct nfs_server *server = NFS_SERVER(inode);
607 size_t rsize = server->rsize;
608 unsigned long npages;
609 int ret = -ESTALE;
611 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
612 inode->i_sb->s_id,
613 (long long)NFS_FILEID(inode),
614 nr_pages);
615 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
617 if (NFS_STALE(inode))
618 goto out;
620 if (filp == NULL) {
621 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
622 if (desc.ctx == NULL)
623 return -EBADF;
624 } else
625 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
627 /* attempt to read as many of the pages as possible from the cache
628 * - this returns -ENOBUFS immediately if the cookie is negative
630 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
631 pages, &nr_pages);
632 if (ret == 0)
633 goto read_complete; /* all pages were read */
635 if (rsize < PAGE_CACHE_SIZE)
636 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
637 else
638 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
640 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
642 nfs_pageio_complete(&pgio);
643 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
644 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
645 read_complete:
646 put_nfs_open_context(desc.ctx);
647 out:
648 return ret;
651 int __init nfs_init_readpagecache(void)
653 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
654 sizeof(struct nfs_read_data),
655 0, SLAB_HWCACHE_ALIGN,
656 NULL);
657 if (nfs_rdata_cachep == NULL)
658 return -ENOMEM;
660 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
661 nfs_rdata_cachep);
662 if (nfs_rdata_mempool == NULL)
663 return -ENOMEM;
665 return 0;
668 void nfs_destroy_readpagecache(void)
670 mempool_destroy(nfs_rdata_mempool);
671 kmem_cache_destroy(nfs_rdata_cachep);