kernel/delayacct.c: remove redundant checking in __delayacct_add_tsk()
[linux-2.6/btrfs-unstable.git] / fs / cachefiles / rdwr.c
blobebaff368120da71e8f4ff83405d22c74cf01f3d8
1 /* Storage object read/write
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/swap.h>
16 #include "internal.h"
19 * detect wake up events generated by the unlocking of pages in which we're
20 * interested
21 * - we use this to detect read completion of backing pages
22 * - the caller holds the waitqueue lock
24 static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
25 int sync, void *_key)
27 struct cachefiles_one_read *monitor =
28 container_of(wait, struct cachefiles_one_read, monitor);
29 struct cachefiles_object *object;
30 struct wait_bit_key *key = _key;
31 struct page *page = wait->private;
33 ASSERT(key);
35 _enter("{%lu},%u,%d,{%p,%u}",
36 monitor->netfs_page->index, mode, sync,
37 key->flags, key->bit_nr);
39 if (key->flags != &page->flags ||
40 key->bit_nr != PG_locked)
41 return 0;
43 _debug("--- monitor %p %lx ---", page, page->flags);
45 if (!PageUptodate(page) && !PageError(page)) {
46 /* unlocked, not uptodate and not erronous? */
47 _debug("page probably truncated");
50 /* remove from the waitqueue */
51 list_del(&wait->task_list);
53 /* move onto the action list and queue for FS-Cache thread pool */
54 ASSERT(monitor->op);
56 object = container_of(monitor->op->op.object,
57 struct cachefiles_object, fscache);
59 spin_lock(&object->work_lock);
60 list_add_tail(&monitor->op_link, &monitor->op->to_do);
61 spin_unlock(&object->work_lock);
63 fscache_enqueue_retrieval(monitor->op);
64 return 0;
68 * handle a probably truncated page
69 * - check to see if the page is still relevant and reissue the read if
70 * possible
71 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
72 * must wait again and 0 if successful
74 static int cachefiles_read_reissue(struct cachefiles_object *object,
75 struct cachefiles_one_read *monitor)
77 struct address_space *bmapping = object->backer->d_inode->i_mapping;
78 struct page *backpage = monitor->back_page, *backpage2;
79 int ret;
81 _enter("{ino=%lx},{%lx,%lx}",
82 object->backer->d_inode->i_ino,
83 backpage->index, backpage->flags);
85 /* skip if the page was truncated away completely */
86 if (backpage->mapping != bmapping) {
87 _leave(" = -ENODATA [mapping]");
88 return -ENODATA;
91 backpage2 = find_get_page(bmapping, backpage->index);
92 if (!backpage2) {
93 _leave(" = -ENODATA [gone]");
94 return -ENODATA;
97 if (backpage != backpage2) {
98 put_page(backpage2);
99 _leave(" = -ENODATA [different]");
100 return -ENODATA;
103 /* the page is still there and we already have a ref on it, so we don't
104 * need a second */
105 put_page(backpage2);
107 INIT_LIST_HEAD(&monitor->op_link);
108 add_page_wait_queue(backpage, &monitor->monitor);
110 if (trylock_page(backpage)) {
111 ret = -EIO;
112 if (PageError(backpage))
113 goto unlock_discard;
114 ret = 0;
115 if (PageUptodate(backpage))
116 goto unlock_discard;
118 _debug("reissue read");
119 ret = bmapping->a_ops->readpage(NULL, backpage);
120 if (ret < 0)
121 goto unlock_discard;
124 /* but the page may have been read before the monitor was installed, so
125 * the monitor may miss the event - so we have to ensure that we do get
126 * one in such a case */
127 if (trylock_page(backpage)) {
128 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
129 unlock_page(backpage);
132 /* it'll reappear on the todo list */
133 _leave(" = -EINPROGRESS");
134 return -EINPROGRESS;
136 unlock_discard:
137 unlock_page(backpage);
138 spin_lock_irq(&object->work_lock);
139 list_del(&monitor->op_link);
140 spin_unlock_irq(&object->work_lock);
141 _leave(" = %d", ret);
142 return ret;
146 * copy data from backing pages to netfs pages to complete a read operation
147 * - driven by FS-Cache's thread pool
149 static void cachefiles_read_copier(struct fscache_operation *_op)
151 struct cachefiles_one_read *monitor;
152 struct cachefiles_object *object;
153 struct fscache_retrieval *op;
154 struct pagevec pagevec;
155 int error, max;
157 op = container_of(_op, struct fscache_retrieval, op);
158 object = container_of(op->op.object,
159 struct cachefiles_object, fscache);
161 _enter("{ino=%lu}", object->backer->d_inode->i_ino);
163 pagevec_init(&pagevec, 0);
165 max = 8;
166 spin_lock_irq(&object->work_lock);
168 while (!list_empty(&op->to_do)) {
169 monitor = list_entry(op->to_do.next,
170 struct cachefiles_one_read, op_link);
171 list_del(&monitor->op_link);
173 spin_unlock_irq(&object->work_lock);
175 _debug("- copy {%lu}", monitor->back_page->index);
177 recheck:
178 if (test_bit(FSCACHE_COOKIE_INVALIDATING,
179 &object->fscache.cookie->flags)) {
180 error = -ESTALE;
181 } else if (PageUptodate(monitor->back_page)) {
182 copy_highpage(monitor->netfs_page, monitor->back_page);
183 fscache_mark_page_cached(monitor->op,
184 monitor->netfs_page);
185 error = 0;
186 } else if (!PageError(monitor->back_page)) {
187 /* the page has probably been truncated */
188 error = cachefiles_read_reissue(object, monitor);
189 if (error == -EINPROGRESS)
190 goto next;
191 goto recheck;
192 } else {
193 cachefiles_io_error_obj(
194 object,
195 "Readpage failed on backing file %lx",
196 (unsigned long) monitor->back_page->flags);
197 error = -EIO;
200 page_cache_release(monitor->back_page);
202 fscache_end_io(op, monitor->netfs_page, error);
203 page_cache_release(monitor->netfs_page);
204 fscache_retrieval_complete(op, 1);
205 fscache_put_retrieval(op);
206 kfree(monitor);
208 next:
209 /* let the thread pool have some air occasionally */
210 max--;
211 if (max < 0 || need_resched()) {
212 if (!list_empty(&op->to_do))
213 fscache_enqueue_retrieval(op);
214 _leave(" [maxed out]");
215 return;
218 spin_lock_irq(&object->work_lock);
221 spin_unlock_irq(&object->work_lock);
222 _leave("");
226 * read the corresponding page to the given set from the backing file
227 * - an uncertain page is simply discarded, to be tried again another time
229 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
230 struct fscache_retrieval *op,
231 struct page *netpage)
233 struct cachefiles_one_read *monitor;
234 struct address_space *bmapping;
235 struct page *newpage, *backpage;
236 int ret;
238 _enter("");
240 _debug("read back %p{%lu,%d}",
241 netpage, netpage->index, page_count(netpage));
243 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
244 if (!monitor)
245 goto nomem;
247 monitor->netfs_page = netpage;
248 monitor->op = fscache_get_retrieval(op);
250 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
252 /* attempt to get hold of the backing page */
253 bmapping = object->backer->d_inode->i_mapping;
254 newpage = NULL;
256 for (;;) {
257 backpage = find_get_page(bmapping, netpage->index);
258 if (backpage)
259 goto backing_page_already_present;
261 if (!newpage) {
262 newpage = __page_cache_alloc(cachefiles_gfp |
263 __GFP_COLD);
264 if (!newpage)
265 goto nomem_monitor;
268 ret = add_to_page_cache(newpage, bmapping,
269 netpage->index, cachefiles_gfp);
270 if (ret == 0)
271 goto installed_new_backing_page;
272 if (ret != -EEXIST)
273 goto nomem_page;
276 /* we've installed a new backing page, so now we need to add it
277 * to the LRU list and start it reading */
278 installed_new_backing_page:
279 _debug("- new %p", newpage);
281 backpage = newpage;
282 newpage = NULL;
284 lru_cache_add_file(backpage);
286 read_backing_page:
287 ret = bmapping->a_ops->readpage(NULL, backpage);
288 if (ret < 0)
289 goto read_error;
291 /* set the monitor to transfer the data across */
292 monitor_backing_page:
293 _debug("- monitor add");
295 /* install the monitor */
296 page_cache_get(monitor->netfs_page);
297 page_cache_get(backpage);
298 monitor->back_page = backpage;
299 monitor->monitor.private = backpage;
300 add_page_wait_queue(backpage, &monitor->monitor);
301 monitor = NULL;
303 /* but the page may have been read before the monitor was installed, so
304 * the monitor may miss the event - so we have to ensure that we do get
305 * one in such a case */
306 if (trylock_page(backpage)) {
307 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
308 unlock_page(backpage);
310 goto success;
312 /* if the backing page is already present, it can be in one of
313 * three states: read in progress, read failed or read okay */
314 backing_page_already_present:
315 _debug("- present");
317 if (newpage) {
318 page_cache_release(newpage);
319 newpage = NULL;
322 if (PageError(backpage))
323 goto io_error;
325 if (PageUptodate(backpage))
326 goto backing_page_already_uptodate;
328 if (!trylock_page(backpage))
329 goto monitor_backing_page;
330 _debug("read %p {%lx}", backpage, backpage->flags);
331 goto read_backing_page;
333 /* the backing page is already up to date, attach the netfs
334 * page to the pagecache and LRU and copy the data across */
335 backing_page_already_uptodate:
336 _debug("- uptodate");
338 fscache_mark_page_cached(op, netpage);
340 copy_highpage(netpage, backpage);
341 fscache_end_io(op, netpage, 0);
342 fscache_retrieval_complete(op, 1);
344 success:
345 _debug("success");
346 ret = 0;
348 out:
349 if (backpage)
350 page_cache_release(backpage);
351 if (monitor) {
352 fscache_put_retrieval(monitor->op);
353 kfree(monitor);
355 _leave(" = %d", ret);
356 return ret;
358 read_error:
359 _debug("read error %d", ret);
360 if (ret == -ENOMEM) {
361 fscache_retrieval_complete(op, 1);
362 goto out;
364 io_error:
365 cachefiles_io_error_obj(object, "Page read error on backing file");
366 fscache_retrieval_complete(op, 1);
367 ret = -ENOBUFS;
368 goto out;
370 nomem_page:
371 page_cache_release(newpage);
372 nomem_monitor:
373 fscache_put_retrieval(monitor->op);
374 kfree(monitor);
375 nomem:
376 fscache_retrieval_complete(op, 1);
377 _leave(" = -ENOMEM");
378 return -ENOMEM;
382 * read a page from the cache or allocate a block in which to store it
383 * - cache withdrawal is prevented by the caller
384 * - returns -EINTR if interrupted
385 * - returns -ENOMEM if ran out of memory
386 * - returns -ENOBUFS if no buffers can be made available
387 * - returns -ENOBUFS if page is beyond EOF
388 * - if the page is backed by a block in the cache:
389 * - a read will be started which will call the callback on completion
390 * - 0 will be returned
391 * - else if the page is unbacked:
392 * - the metadata will be retained
393 * - -ENODATA will be returned
395 int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
396 struct page *page,
397 gfp_t gfp)
399 struct cachefiles_object *object;
400 struct cachefiles_cache *cache;
401 struct pagevec pagevec;
402 struct inode *inode;
403 sector_t block0, block;
404 unsigned shift;
405 int ret;
407 object = container_of(op->op.object,
408 struct cachefiles_object, fscache);
409 cache = container_of(object->fscache.cache,
410 struct cachefiles_cache, cache);
412 _enter("{%p},{%lx},,,", object, page->index);
414 if (!object->backer)
415 goto enobufs;
417 inode = object->backer->d_inode;
418 ASSERT(S_ISREG(inode->i_mode));
419 ASSERT(inode->i_mapping->a_ops->bmap);
420 ASSERT(inode->i_mapping->a_ops->readpages);
422 /* calculate the shift required to use bmap */
423 if (inode->i_sb->s_blocksize > PAGE_SIZE)
424 goto enobufs;
426 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
428 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
429 op->op.flags |= FSCACHE_OP_ASYNC;
430 op->op.processor = cachefiles_read_copier;
432 pagevec_init(&pagevec, 0);
434 /* we assume the absence or presence of the first block is a good
435 * enough indication for the page as a whole
436 * - TODO: don't use bmap() for this as it is _not_ actually good
437 * enough for this as it doesn't indicate errors, but it's all we've
438 * got for the moment
440 block0 = page->index;
441 block0 <<= shift;
443 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
444 _debug("%llx -> %llx",
445 (unsigned long long) block0,
446 (unsigned long long) block);
448 if (block) {
449 /* submit the apparently valid page to the backing fs to be
450 * read from disk */
451 ret = cachefiles_read_backing_file_one(object, op, page);
452 } else if (cachefiles_has_space(cache, 0, 1) == 0) {
453 /* there's space in the cache we can use */
454 fscache_mark_page_cached(op, page);
455 fscache_retrieval_complete(op, 1);
456 ret = -ENODATA;
457 } else {
458 goto enobufs;
461 _leave(" = %d", ret);
462 return ret;
464 enobufs:
465 fscache_retrieval_complete(op, 1);
466 _leave(" = -ENOBUFS");
467 return -ENOBUFS;
471 * read the corresponding pages to the given set from the backing file
472 * - any uncertain pages are simply discarded, to be tried again another time
474 static int cachefiles_read_backing_file(struct cachefiles_object *object,
475 struct fscache_retrieval *op,
476 struct list_head *list)
478 struct cachefiles_one_read *monitor = NULL;
479 struct address_space *bmapping = object->backer->d_inode->i_mapping;
480 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
481 int ret = 0;
483 _enter("");
485 list_for_each_entry_safe(netpage, _n, list, lru) {
486 list_del(&netpage->lru);
488 _debug("read back %p{%lu,%d}",
489 netpage, netpage->index, page_count(netpage));
491 if (!monitor) {
492 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
493 if (!monitor)
494 goto nomem;
496 monitor->op = fscache_get_retrieval(op);
497 init_waitqueue_func_entry(&monitor->monitor,
498 cachefiles_read_waiter);
501 for (;;) {
502 backpage = find_get_page(bmapping, netpage->index);
503 if (backpage)
504 goto backing_page_already_present;
506 if (!newpage) {
507 newpage = __page_cache_alloc(cachefiles_gfp |
508 __GFP_COLD);
509 if (!newpage)
510 goto nomem;
513 ret = add_to_page_cache(newpage, bmapping,
514 netpage->index, cachefiles_gfp);
515 if (ret == 0)
516 goto installed_new_backing_page;
517 if (ret != -EEXIST)
518 goto nomem;
521 /* we've installed a new backing page, so now we need to add it
522 * to the LRU list and start it reading */
523 installed_new_backing_page:
524 _debug("- new %p", newpage);
526 backpage = newpage;
527 newpage = NULL;
529 lru_cache_add_file(backpage);
531 reread_backing_page:
532 ret = bmapping->a_ops->readpage(NULL, backpage);
533 if (ret < 0)
534 goto read_error;
536 /* add the netfs page to the pagecache and LRU, and set the
537 * monitor to transfer the data across */
538 monitor_backing_page:
539 _debug("- monitor add");
541 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
542 cachefiles_gfp);
543 if (ret < 0) {
544 if (ret == -EEXIST) {
545 page_cache_release(netpage);
546 fscache_retrieval_complete(op, 1);
547 continue;
549 goto nomem;
552 lru_cache_add_file(netpage);
554 /* install a monitor */
555 page_cache_get(netpage);
556 monitor->netfs_page = netpage;
558 page_cache_get(backpage);
559 monitor->back_page = backpage;
560 monitor->monitor.private = backpage;
561 add_page_wait_queue(backpage, &monitor->monitor);
562 monitor = NULL;
564 /* but the page may have been read before the monitor was
565 * installed, so the monitor may miss the event - so we have to
566 * ensure that we do get one in such a case */
567 if (trylock_page(backpage)) {
568 _debug("2unlock %p {%lx}", backpage, backpage->flags);
569 unlock_page(backpage);
572 page_cache_release(backpage);
573 backpage = NULL;
575 page_cache_release(netpage);
576 netpage = NULL;
577 continue;
579 /* if the backing page is already present, it can be in one of
580 * three states: read in progress, read failed or read okay */
581 backing_page_already_present:
582 _debug("- present %p", backpage);
584 if (PageError(backpage))
585 goto io_error;
587 if (PageUptodate(backpage))
588 goto backing_page_already_uptodate;
590 _debug("- not ready %p{%lx}", backpage, backpage->flags);
592 if (!trylock_page(backpage))
593 goto monitor_backing_page;
595 if (PageError(backpage)) {
596 _debug("error %lx", backpage->flags);
597 unlock_page(backpage);
598 goto io_error;
601 if (PageUptodate(backpage))
602 goto backing_page_already_uptodate_unlock;
604 /* we've locked a page that's neither up to date nor erroneous,
605 * so we need to attempt to read it again */
606 goto reread_backing_page;
608 /* the backing page is already up to date, attach the netfs
609 * page to the pagecache and LRU and copy the data across */
610 backing_page_already_uptodate_unlock:
611 _debug("uptodate %lx", backpage->flags);
612 unlock_page(backpage);
613 backing_page_already_uptodate:
614 _debug("- uptodate");
616 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
617 cachefiles_gfp);
618 if (ret < 0) {
619 if (ret == -EEXIST) {
620 page_cache_release(netpage);
621 fscache_retrieval_complete(op, 1);
622 continue;
624 goto nomem;
627 copy_highpage(netpage, backpage);
629 page_cache_release(backpage);
630 backpage = NULL;
632 fscache_mark_page_cached(op, netpage);
634 lru_cache_add_file(netpage);
636 /* the netpage is unlocked and marked up to date here */
637 fscache_end_io(op, netpage, 0);
638 page_cache_release(netpage);
639 netpage = NULL;
640 fscache_retrieval_complete(op, 1);
641 continue;
644 netpage = NULL;
646 _debug("out");
648 out:
649 /* tidy up */
650 if (newpage)
651 page_cache_release(newpage);
652 if (netpage)
653 page_cache_release(netpage);
654 if (backpage)
655 page_cache_release(backpage);
656 if (monitor) {
657 fscache_put_retrieval(op);
658 kfree(monitor);
661 list_for_each_entry_safe(netpage, _n, list, lru) {
662 list_del(&netpage->lru);
663 page_cache_release(netpage);
664 fscache_retrieval_complete(op, 1);
667 _leave(" = %d", ret);
668 return ret;
670 nomem:
671 _debug("nomem");
672 ret = -ENOMEM;
673 goto record_page_complete;
675 read_error:
676 _debug("read error %d", ret);
677 if (ret == -ENOMEM)
678 goto record_page_complete;
679 io_error:
680 cachefiles_io_error_obj(object, "Page read error on backing file");
681 ret = -ENOBUFS;
682 record_page_complete:
683 fscache_retrieval_complete(op, 1);
684 goto out;
688 * read a list of pages from the cache or allocate blocks in which to store
689 * them
691 int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
692 struct list_head *pages,
693 unsigned *nr_pages,
694 gfp_t gfp)
696 struct cachefiles_object *object;
697 struct cachefiles_cache *cache;
698 struct list_head backpages;
699 struct pagevec pagevec;
700 struct inode *inode;
701 struct page *page, *_n;
702 unsigned shift, nrbackpages;
703 int ret, ret2, space;
705 object = container_of(op->op.object,
706 struct cachefiles_object, fscache);
707 cache = container_of(object->fscache.cache,
708 struct cachefiles_cache, cache);
710 _enter("{OBJ%x,%d},,%d,,",
711 object->fscache.debug_id, atomic_read(&op->op.usage),
712 *nr_pages);
714 if (!object->backer)
715 goto all_enobufs;
717 space = 1;
718 if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
719 space = 0;
721 inode = object->backer->d_inode;
722 ASSERT(S_ISREG(inode->i_mode));
723 ASSERT(inode->i_mapping->a_ops->bmap);
724 ASSERT(inode->i_mapping->a_ops->readpages);
726 /* calculate the shift required to use bmap */
727 if (inode->i_sb->s_blocksize > PAGE_SIZE)
728 goto all_enobufs;
730 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
732 pagevec_init(&pagevec, 0);
734 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
735 op->op.flags |= FSCACHE_OP_ASYNC;
736 op->op.processor = cachefiles_read_copier;
738 INIT_LIST_HEAD(&backpages);
739 nrbackpages = 0;
741 ret = space ? -ENODATA : -ENOBUFS;
742 list_for_each_entry_safe(page, _n, pages, lru) {
743 sector_t block0, block;
745 /* we assume the absence or presence of the first block is a
746 * good enough indication for the page as a whole
747 * - TODO: don't use bmap() for this as it is _not_ actually
748 * good enough for this as it doesn't indicate errors, but
749 * it's all we've got for the moment
751 block0 = page->index;
752 block0 <<= shift;
754 block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
755 block0);
756 _debug("%llx -> %llx",
757 (unsigned long long) block0,
758 (unsigned long long) block);
760 if (block) {
761 /* we have data - add it to the list to give to the
762 * backing fs */
763 list_move(&page->lru, &backpages);
764 (*nr_pages)--;
765 nrbackpages++;
766 } else if (space && pagevec_add(&pagevec, page) == 0) {
767 fscache_mark_pages_cached(op, &pagevec);
768 fscache_retrieval_complete(op, 1);
769 ret = -ENODATA;
770 } else {
771 fscache_retrieval_complete(op, 1);
775 if (pagevec_count(&pagevec) > 0)
776 fscache_mark_pages_cached(op, &pagevec);
778 if (list_empty(pages))
779 ret = 0;
781 /* submit the apparently valid pages to the backing fs to be read from
782 * disk */
783 if (nrbackpages > 0) {
784 ret2 = cachefiles_read_backing_file(object, op, &backpages);
785 if (ret2 == -ENOMEM || ret2 == -EINTR)
786 ret = ret2;
789 _leave(" = %d [nr=%u%s]",
790 ret, *nr_pages, list_empty(pages) ? " empty" : "");
791 return ret;
793 all_enobufs:
794 fscache_retrieval_complete(op, *nr_pages);
795 return -ENOBUFS;
799 * allocate a block in the cache in which to store a page
800 * - cache withdrawal is prevented by the caller
801 * - returns -EINTR if interrupted
802 * - returns -ENOMEM if ran out of memory
803 * - returns -ENOBUFS if no buffers can be made available
804 * - returns -ENOBUFS if page is beyond EOF
805 * - otherwise:
806 * - the metadata will be retained
807 * - 0 will be returned
809 int cachefiles_allocate_page(struct fscache_retrieval *op,
810 struct page *page,
811 gfp_t gfp)
813 struct cachefiles_object *object;
814 struct cachefiles_cache *cache;
815 int ret;
817 object = container_of(op->op.object,
818 struct cachefiles_object, fscache);
819 cache = container_of(object->fscache.cache,
820 struct cachefiles_cache, cache);
822 _enter("%p,{%lx},", object, page->index);
824 ret = cachefiles_has_space(cache, 0, 1);
825 if (ret == 0)
826 fscache_mark_page_cached(op, page);
827 else
828 ret = -ENOBUFS;
830 fscache_retrieval_complete(op, 1);
831 _leave(" = %d", ret);
832 return ret;
836 * allocate blocks in the cache in which to store a set of pages
837 * - cache withdrawal is prevented by the caller
838 * - returns -EINTR if interrupted
839 * - returns -ENOMEM if ran out of memory
840 * - returns -ENOBUFS if some buffers couldn't be made available
841 * - returns -ENOBUFS if some pages are beyond EOF
842 * - otherwise:
843 * - -ENODATA will be returned
844 * - metadata will be retained for any page marked
846 int cachefiles_allocate_pages(struct fscache_retrieval *op,
847 struct list_head *pages,
848 unsigned *nr_pages,
849 gfp_t gfp)
851 struct cachefiles_object *object;
852 struct cachefiles_cache *cache;
853 struct pagevec pagevec;
854 struct page *page;
855 int ret;
857 object = container_of(op->op.object,
858 struct cachefiles_object, fscache);
859 cache = container_of(object->fscache.cache,
860 struct cachefiles_cache, cache);
862 _enter("%p,,,%d,", object, *nr_pages);
864 ret = cachefiles_has_space(cache, 0, *nr_pages);
865 if (ret == 0) {
866 pagevec_init(&pagevec, 0);
868 list_for_each_entry(page, pages, lru) {
869 if (pagevec_add(&pagevec, page) == 0)
870 fscache_mark_pages_cached(op, &pagevec);
873 if (pagevec_count(&pagevec) > 0)
874 fscache_mark_pages_cached(op, &pagevec);
875 ret = -ENODATA;
876 } else {
877 ret = -ENOBUFS;
880 fscache_retrieval_complete(op, *nr_pages);
881 _leave(" = %d", ret);
882 return ret;
886 * request a page be stored in the cache
887 * - cache withdrawal is prevented by the caller
888 * - this request may be ignored if there's no cache block available, in which
889 * case -ENOBUFS will be returned
890 * - if the op is in progress, 0 will be returned
892 int cachefiles_write_page(struct fscache_storage *op, struct page *page)
894 struct cachefiles_object *object;
895 struct cachefiles_cache *cache;
896 mm_segment_t old_fs;
897 struct file *file;
898 struct path path;
899 loff_t pos, eof;
900 size_t len;
901 void *data;
902 int ret;
904 ASSERT(op != NULL);
905 ASSERT(page != NULL);
907 object = container_of(op->op.object,
908 struct cachefiles_object, fscache);
910 _enter("%p,%p{%lx},,,", object, page, page->index);
912 if (!object->backer) {
913 _leave(" = -ENOBUFS");
914 return -ENOBUFS;
917 ASSERT(S_ISREG(object->backer->d_inode->i_mode));
919 cache = container_of(object->fscache.cache,
920 struct cachefiles_cache, cache);
922 /* write the page to the backing filesystem and let it store it in its
923 * own time */
924 path.mnt = cache->mnt;
925 path.dentry = object->backer;
926 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
927 if (IS_ERR(file)) {
928 ret = PTR_ERR(file);
929 } else {
930 ret = -EIO;
931 if (file->f_op->write) {
932 pos = (loff_t) page->index << PAGE_SHIFT;
934 /* we mustn't write more data than we have, so we have
935 * to beware of a partial page at EOF */
936 eof = object->fscache.store_limit_l;
937 len = PAGE_SIZE;
938 if (eof & ~PAGE_MASK) {
939 ASSERTCMP(pos, <, eof);
940 if (eof - pos < PAGE_SIZE) {
941 _debug("cut short %llx to %llx",
942 pos, eof);
943 len = eof - pos;
944 ASSERTCMP(pos + len, ==, eof);
948 data = kmap(page);
949 file_start_write(file);
950 old_fs = get_fs();
951 set_fs(KERNEL_DS);
952 ret = file->f_op->write(
953 file, (const void __user *) data, len, &pos);
954 set_fs(old_fs);
955 kunmap(page);
956 file_end_write(file);
957 if (ret != len)
958 ret = -EIO;
960 fput(file);
963 if (ret < 0) {
964 if (ret == -EIO)
965 cachefiles_io_error_obj(
966 object, "Write page to backing file failed");
967 ret = -ENOBUFS;
970 _leave(" = %d", ret);
971 return ret;
975 * detach a backing block from a page
976 * - cache withdrawal is prevented by the caller
978 void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
980 struct cachefiles_object *object;
981 struct cachefiles_cache *cache;
983 object = container_of(_object, struct cachefiles_object, fscache);
984 cache = container_of(object->fscache.cache,
985 struct cachefiles_cache, cache);
987 _enter("%p,{%lu}", object, page->index);
989 spin_unlock(&object->fscache.cookie->lock);