1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
21 * check to see if a page is being written to the cache
23 bool __fscache_check_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
28 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
33 EXPORT_SYMBOL(__fscache_check_page_write
);
36 * wait for a page to finish being written to the cache
38 void __fscache_wait_on_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
40 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
42 wait_event(*wq
, !__fscache_check_page_write(cookie
, page
));
44 EXPORT_SYMBOL(__fscache_wait_on_page_write
);
47 * decide whether a page can be released, possibly by cancelling a store to it
48 * - we're allowed to sleep if __GFP_WAIT is flagged
50 bool __fscache_maybe_release_page(struct fscache_cookie
*cookie
,
57 _enter("%p,%p,%x", cookie
, page
, gfp
);
60 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
63 fscache_stat(&fscache_n_store_vmscan_not_storing
);
64 __fscache_uncache_page(cookie
, page
);
68 /* see if the page is actually undergoing storage - if so we can't get
69 * rid of it till the cache has finished with it */
70 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
71 FSCACHE_COOKIE_STORING_TAG
)) {
76 /* the page is pending storage, so we attempt to cancel the store and
77 * discard the store request so that the page can be reclaimed */
78 spin_lock(&cookie
->stores_lock
);
81 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
82 FSCACHE_COOKIE_STORING_TAG
)) {
83 /* the page started to undergo storage whilst we were looking,
84 * so now we can only wait or return */
85 spin_unlock(&cookie
->stores_lock
);
89 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
90 spin_unlock(&cookie
->stores_lock
);
93 fscache_stat(&fscache_n_store_vmscan_cancelled
);
94 fscache_stat(&fscache_n_store_radix_deletes
);
95 ASSERTCMP(xpage
, ==, page
);
97 fscache_stat(&fscache_n_store_vmscan_gone
);
100 wake_up_bit(&cookie
->flags
, 0);
102 page_cache_release(xpage
);
103 __fscache_uncache_page(cookie
, page
);
107 /* we might want to wait here, but that could deadlock the allocator as
108 * the work threads writing to the cache may all end up sleeping
109 * on memory allocation */
110 fscache_stat(&fscache_n_store_vmscan_busy
);
113 EXPORT_SYMBOL(__fscache_maybe_release_page
);
116 * note that a page has finished being written to the cache
118 static void fscache_end_page_write(struct fscache_object
*object
,
121 struct fscache_cookie
*cookie
;
122 struct page
*xpage
= NULL
;
124 spin_lock(&object
->lock
);
125 cookie
= object
->cookie
;
127 /* delete the page from the tree if it is now no longer
129 spin_lock(&cookie
->stores_lock
);
130 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
131 FSCACHE_COOKIE_STORING_TAG
);
132 if (!radix_tree_tag_get(&cookie
->stores
, page
->index
,
133 FSCACHE_COOKIE_PENDING_TAG
)) {
134 fscache_stat(&fscache_n_store_radix_deletes
);
135 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
137 spin_unlock(&cookie
->stores_lock
);
138 wake_up_bit(&cookie
->flags
, 0);
140 spin_unlock(&object
->lock
);
142 page_cache_release(xpage
);
146 * actually apply the changed attributes to a cache object
148 static void fscache_attr_changed_op(struct fscache_operation
*op
)
150 struct fscache_object
*object
= op
->object
;
153 _enter("{OBJ%x OP%x}", object
->debug_id
, op
->debug_id
);
155 fscache_stat(&fscache_n_attr_changed_calls
);
157 if (fscache_object_is_active(object
)) {
158 fscache_stat(&fscache_n_cop_attr_changed
);
159 ret
= object
->cache
->ops
->attr_changed(object
);
160 fscache_stat_d(&fscache_n_cop_attr_changed
);
162 fscache_abort_object(object
);
169 * notification that the attributes on an object have changed
171 int __fscache_attr_changed(struct fscache_cookie
*cookie
)
173 struct fscache_operation
*op
;
174 struct fscache_object
*object
;
176 _enter("%p", cookie
);
178 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
180 fscache_stat(&fscache_n_attr_changed
);
182 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
184 fscache_stat(&fscache_n_attr_changed_nomem
);
185 _leave(" = -ENOMEM");
189 fscache_operation_init(op
, fscache_attr_changed_op
, NULL
);
190 op
->flags
= FSCACHE_OP_ASYNC
| (1 << FSCACHE_OP_EXCLUSIVE
);
192 spin_lock(&cookie
->lock
);
194 if (hlist_empty(&cookie
->backing_objects
))
196 object
= hlist_entry(cookie
->backing_objects
.first
,
197 struct fscache_object
, cookie_link
);
199 if (fscache_submit_exclusive_op(object
, op
) < 0)
201 spin_unlock(&cookie
->lock
);
202 fscache_stat(&fscache_n_attr_changed_ok
);
203 fscache_put_operation(op
);
208 spin_unlock(&cookie
->lock
);
210 fscache_stat(&fscache_n_attr_changed_nobufs
);
211 _leave(" = %d", -ENOBUFS
);
214 EXPORT_SYMBOL(__fscache_attr_changed
);
217 * release a retrieval op reference
219 static void fscache_release_retrieval_op(struct fscache_operation
*_op
)
221 struct fscache_retrieval
*op
=
222 container_of(_op
, struct fscache_retrieval
, op
);
224 _enter("{OP%x}", op
->op
.debug_id
);
226 fscache_hist(fscache_retrieval_histogram
, op
->start_time
);
228 fscache_put_context(op
->op
.object
->cookie
, op
->context
);
234 * allocate a retrieval op
236 static struct fscache_retrieval
*fscache_alloc_retrieval(
237 struct address_space
*mapping
,
238 fscache_rw_complete_t end_io_func
,
241 struct fscache_retrieval
*op
;
243 /* allocate a retrieval operation and attempt to submit it */
244 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
246 fscache_stat(&fscache_n_retrievals_nomem
);
250 fscache_operation_init(&op
->op
, NULL
, fscache_release_retrieval_op
);
251 op
->op
.flags
= FSCACHE_OP_MYTHREAD
| (1 << FSCACHE_OP_WAITING
);
252 op
->mapping
= mapping
;
253 op
->end_io_func
= end_io_func
;
254 op
->context
= context
;
255 op
->start_time
= jiffies
;
256 INIT_LIST_HEAD(&op
->to_do
);
261 * wait for a deferred lookup to complete
263 static int fscache_wait_for_deferred_lookup(struct fscache_cookie
*cookie
)
269 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
)) {
270 _leave(" = 0 [imm]");
274 fscache_stat(&fscache_n_retrievals_wait
);
277 if (wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
278 fscache_wait_bit_interruptible
,
279 TASK_INTERRUPTIBLE
) != 0) {
280 fscache_stat(&fscache_n_retrievals_intr
);
281 _leave(" = -ERESTARTSYS");
285 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
));
288 fscache_hist(fscache_retrieval_delay_histogram
, jif
);
289 _leave(" = 0 [dly]");
294 * wait for an object to become active (or dead)
296 static int fscache_wait_for_retrieval_activation(struct fscache_object
*object
,
297 struct fscache_retrieval
*op
,
298 atomic_t
*stat_op_waits
,
299 atomic_t
*stat_object_dead
)
303 if (!test_bit(FSCACHE_OP_WAITING
, &op
->op
.flags
))
307 fscache_stat(stat_op_waits
);
308 if (wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
309 fscache_wait_bit_interruptible
,
310 TASK_INTERRUPTIBLE
) < 0) {
311 ret
= fscache_cancel_op(&op
->op
);
315 /* it's been removed from the pending queue by another party,
316 * so we should get to run shortly */
317 wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
318 fscache_wait_bit
, TASK_UNINTERRUPTIBLE
);
323 if (unlikely(fscache_object_is_dead(object
))) {
324 fscache_stat(stat_object_dead
);
331 * read a page from the cache or allocate a block in which to store it
333 * -ENOMEM - out of memory, nothing done
334 * -ERESTARTSYS - interrupted
335 * -ENOBUFS - no backing object available in which to cache the block
336 * -ENODATA - no data available in the backing object for this block
337 * 0 - dispatched a read - it'll call end_io_func() when finished
339 int __fscache_read_or_alloc_page(struct fscache_cookie
*cookie
,
341 fscache_rw_complete_t end_io_func
,
345 struct fscache_retrieval
*op
;
346 struct fscache_object
*object
;
349 _enter("%p,%p,,,", cookie
, page
);
351 fscache_stat(&fscache_n_retrievals
);
353 if (hlist_empty(&cookie
->backing_objects
))
356 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
357 ASSERTCMP(page
, !=, NULL
);
359 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
362 op
= fscache_alloc_retrieval(page
->mapping
, end_io_func
, context
);
364 _leave(" = -ENOMEM");
368 spin_lock(&cookie
->lock
);
370 if (hlist_empty(&cookie
->backing_objects
))
372 object
= hlist_entry(cookie
->backing_objects
.first
,
373 struct fscache_object
, cookie_link
);
375 ASSERTCMP(object
->state
, >, FSCACHE_OBJECT_LOOKING_UP
);
377 atomic_inc(&object
->n_reads
);
378 set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
380 if (fscache_submit_op(object
, &op
->op
) < 0)
382 spin_unlock(&cookie
->lock
);
384 fscache_stat(&fscache_n_retrieval_ops
);
386 /* pin the netfs read context in case we need to do the actual netfs
387 * read because we've encountered a cache read failure */
388 fscache_get_context(object
->cookie
, op
->context
);
390 /* we wait for the operation to become active, and then process it
391 * *here*, in this thread, and not in the thread pool */
392 ret
= fscache_wait_for_retrieval_activation(
394 __fscache_stat(&fscache_n_retrieval_op_waits
),
395 __fscache_stat(&fscache_n_retrievals_object_dead
));
399 /* ask the cache to honour the operation */
400 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
401 fscache_stat(&fscache_n_cop_allocate_page
);
402 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
403 fscache_stat_d(&fscache_n_cop_allocate_page
);
407 fscache_stat(&fscache_n_cop_read_or_alloc_page
);
408 ret
= object
->cache
->ops
->read_or_alloc_page(op
, page
, gfp
);
409 fscache_stat_d(&fscache_n_cop_read_or_alloc_page
);
414 fscache_stat(&fscache_n_retrievals_nomem
);
415 else if (ret
== -ERESTARTSYS
)
416 fscache_stat(&fscache_n_retrievals_intr
);
417 else if (ret
== -ENODATA
)
418 fscache_stat(&fscache_n_retrievals_nodata
);
420 fscache_stat(&fscache_n_retrievals_nobufs
);
422 fscache_stat(&fscache_n_retrievals_ok
);
424 fscache_put_retrieval(op
);
425 _leave(" = %d", ret
);
429 spin_unlock(&cookie
->lock
);
432 fscache_stat(&fscache_n_retrievals_nobufs
);
433 _leave(" = -ENOBUFS");
436 EXPORT_SYMBOL(__fscache_read_or_alloc_page
);
439 * read a list of page from the cache or allocate a block in which to store
442 * -ENOMEM - out of memory, some pages may be being read
443 * -ERESTARTSYS - interrupted, some pages may be being read
444 * -ENOBUFS - no backing object or space available in which to cache any
445 * pages not being read
446 * -ENODATA - no data available in the backing object for some or all of
448 * 0 - dispatched a read on all pages
450 * end_io_func() will be called for each page read from the cache as it is
451 * finishes being read
453 * any pages for which a read is dispatched will be removed from pages and
456 int __fscache_read_or_alloc_pages(struct fscache_cookie
*cookie
,
457 struct address_space
*mapping
,
458 struct list_head
*pages
,
460 fscache_rw_complete_t end_io_func
,
464 struct fscache_retrieval
*op
;
465 struct fscache_object
*object
;
468 _enter("%p,,%d,,,", cookie
, *nr_pages
);
470 fscache_stat(&fscache_n_retrievals
);
472 if (hlist_empty(&cookie
->backing_objects
))
475 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
476 ASSERTCMP(*nr_pages
, >, 0);
477 ASSERT(!list_empty(pages
));
479 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
482 op
= fscache_alloc_retrieval(mapping
, end_io_func
, context
);
486 spin_lock(&cookie
->lock
);
488 if (hlist_empty(&cookie
->backing_objects
))
490 object
= hlist_entry(cookie
->backing_objects
.first
,
491 struct fscache_object
, cookie_link
);
493 atomic_inc(&object
->n_reads
);
494 set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
496 if (fscache_submit_op(object
, &op
->op
) < 0)
498 spin_unlock(&cookie
->lock
);
500 fscache_stat(&fscache_n_retrieval_ops
);
502 /* pin the netfs read context in case we need to do the actual netfs
503 * read because we've encountered a cache read failure */
504 fscache_get_context(object
->cookie
, op
->context
);
506 /* we wait for the operation to become active, and then process it
507 * *here*, in this thread, and not in the thread pool */
508 ret
= fscache_wait_for_retrieval_activation(
510 __fscache_stat(&fscache_n_retrieval_op_waits
),
511 __fscache_stat(&fscache_n_retrievals_object_dead
));
515 /* ask the cache to honour the operation */
516 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
517 fscache_stat(&fscache_n_cop_allocate_pages
);
518 ret
= object
->cache
->ops
->allocate_pages(
519 op
, pages
, nr_pages
, gfp
);
520 fscache_stat_d(&fscache_n_cop_allocate_pages
);
522 fscache_stat(&fscache_n_cop_read_or_alloc_pages
);
523 ret
= object
->cache
->ops
->read_or_alloc_pages(
524 op
, pages
, nr_pages
, gfp
);
525 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages
);
530 fscache_stat(&fscache_n_retrievals_nomem
);
531 else if (ret
== -ERESTARTSYS
)
532 fscache_stat(&fscache_n_retrievals_intr
);
533 else if (ret
== -ENODATA
)
534 fscache_stat(&fscache_n_retrievals_nodata
);
536 fscache_stat(&fscache_n_retrievals_nobufs
);
538 fscache_stat(&fscache_n_retrievals_ok
);
540 fscache_put_retrieval(op
);
541 _leave(" = %d", ret
);
545 spin_unlock(&cookie
->lock
);
548 fscache_stat(&fscache_n_retrievals_nobufs
);
549 _leave(" = -ENOBUFS");
552 EXPORT_SYMBOL(__fscache_read_or_alloc_pages
);
555 * allocate a block in the cache on which to store a page
557 * -ENOMEM - out of memory, nothing done
558 * -ERESTARTSYS - interrupted
559 * -ENOBUFS - no backing object available in which to cache the block
560 * 0 - block allocated
562 int __fscache_alloc_page(struct fscache_cookie
*cookie
,
566 struct fscache_retrieval
*op
;
567 struct fscache_object
*object
;
570 _enter("%p,%p,,,", cookie
, page
);
572 fscache_stat(&fscache_n_allocs
);
574 if (hlist_empty(&cookie
->backing_objects
))
577 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
578 ASSERTCMP(page
, !=, NULL
);
580 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
583 op
= fscache_alloc_retrieval(page
->mapping
, NULL
, NULL
);
587 spin_lock(&cookie
->lock
);
589 if (hlist_empty(&cookie
->backing_objects
))
591 object
= hlist_entry(cookie
->backing_objects
.first
,
592 struct fscache_object
, cookie_link
);
594 if (fscache_submit_op(object
, &op
->op
) < 0)
596 spin_unlock(&cookie
->lock
);
598 fscache_stat(&fscache_n_alloc_ops
);
600 ret
= fscache_wait_for_retrieval_activation(
602 __fscache_stat(&fscache_n_alloc_op_waits
),
603 __fscache_stat(&fscache_n_allocs_object_dead
));
607 /* ask the cache to honour the operation */
608 fscache_stat(&fscache_n_cop_allocate_page
);
609 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
610 fscache_stat_d(&fscache_n_cop_allocate_page
);
613 if (ret
== -ERESTARTSYS
)
614 fscache_stat(&fscache_n_allocs_intr
);
616 fscache_stat(&fscache_n_allocs_nobufs
);
618 fscache_stat(&fscache_n_allocs_ok
);
620 fscache_put_retrieval(op
);
621 _leave(" = %d", ret
);
625 spin_unlock(&cookie
->lock
);
628 fscache_stat(&fscache_n_allocs_nobufs
);
629 _leave(" = -ENOBUFS");
632 EXPORT_SYMBOL(__fscache_alloc_page
);
635 * release a write op reference
637 static void fscache_release_write_op(struct fscache_operation
*_op
)
639 _enter("{OP%x}", _op
->debug_id
);
643 * perform the background storage of a page into the cache
645 static void fscache_write_op(struct fscache_operation
*_op
)
647 struct fscache_storage
*op
=
648 container_of(_op
, struct fscache_storage
, op
);
649 struct fscache_object
*object
= op
->op
.object
;
650 struct fscache_cookie
*cookie
;
656 _enter("{OP%x,%d}", op
->op
.debug_id
, atomic_read(&op
->op
.usage
));
658 spin_lock(&object
->lock
);
659 cookie
= object
->cookie
;
661 if (!fscache_object_is_active(object
) || !cookie
) {
662 spin_unlock(&object
->lock
);
667 spin_lock(&cookie
->stores_lock
);
669 fscache_stat(&fscache_n_store_calls
);
671 /* find a page to store */
673 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0, 1,
674 FSCACHE_COOKIE_PENDING_TAG
);
678 _debug("gang %d [%lx]", n
, page
->index
);
679 if (page
->index
> op
->store_limit
) {
680 fscache_stat(&fscache_n_store_pages_over_limit
);
684 radix_tree_tag_set(&cookie
->stores
, page
->index
,
685 FSCACHE_COOKIE_STORING_TAG
);
686 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
687 FSCACHE_COOKIE_PENDING_TAG
);
689 spin_unlock(&cookie
->stores_lock
);
690 spin_unlock(&object
->lock
);
692 fscache_stat(&fscache_n_store_pages
);
693 fscache_stat(&fscache_n_cop_write_page
);
694 ret
= object
->cache
->ops
->write_page(op
, page
);
695 fscache_stat_d(&fscache_n_cop_write_page
);
696 fscache_end_page_write(object
, page
);
698 fscache_abort_object(object
);
700 fscache_enqueue_operation(&op
->op
);
707 /* this writer is going away and there aren't any more things to
710 spin_unlock(&cookie
->stores_lock
);
711 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
712 spin_unlock(&object
->lock
);
717 * request a page be stored in the cache
719 * -ENOMEM - out of memory, nothing done
720 * -ENOBUFS - no backing object available in which to cache the page
721 * 0 - dispatched a write - it'll call end_io_func() when finished
723 * if the cookie still has a backing object at this point, that object can be
724 * in one of a few states with respect to storage processing:
726 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
729 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
732 * (b) writes deferred till post-creation (mark page for writing and
733 * return immediately)
735 * (2) negative lookup, object created, initial fill being made from netfs
736 * (FSCACHE_COOKIE_INITIAL_FILL is set)
738 * (a) fill point not yet reached this page (mark page for writing and
741 * (b) fill point passed this page (queue op to store this page)
743 * (3) object extant (queue op to store this page)
745 * any other state is invalid
747 int __fscache_write_page(struct fscache_cookie
*cookie
,
751 struct fscache_storage
*op
;
752 struct fscache_object
*object
;
755 _enter("%p,%x,", cookie
, (u32
) page
->flags
);
757 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
758 ASSERT(PageFsCache(page
));
760 fscache_stat(&fscache_n_stores
);
762 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
766 fscache_operation_init(&op
->op
, fscache_write_op
,
767 fscache_release_write_op
);
768 op
->op
.flags
= FSCACHE_OP_ASYNC
| (1 << FSCACHE_OP_WAITING
);
770 ret
= radix_tree_preload(gfp
& ~__GFP_HIGHMEM
);
775 spin_lock(&cookie
->lock
);
777 if (hlist_empty(&cookie
->backing_objects
))
779 object
= hlist_entry(cookie
->backing_objects
.first
,
780 struct fscache_object
, cookie_link
);
781 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
784 /* add the page to the pending-storage radix tree on the backing
786 spin_lock(&object
->lock
);
787 spin_lock(&cookie
->stores_lock
);
789 _debug("store limit %llx", (unsigned long long) object
->store_limit
);
791 ret
= radix_tree_insert(&cookie
->stores
, page
->index
, page
);
795 _debug("insert failed %d", ret
);
796 goto nobufs_unlock_obj
;
799 radix_tree_tag_set(&cookie
->stores
, page
->index
,
800 FSCACHE_COOKIE_PENDING_TAG
);
801 page_cache_get(page
);
803 /* we only want one writer at a time, but we do need to queue new
804 * writers after exclusive ops */
805 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
))
806 goto already_pending
;
808 spin_unlock(&cookie
->stores_lock
);
809 spin_unlock(&object
->lock
);
811 op
->op
.debug_id
= atomic_inc_return(&fscache_op_debug_id
);
812 op
->store_limit
= object
->store_limit
;
814 if (fscache_submit_op(object
, &op
->op
) < 0)
817 spin_unlock(&cookie
->lock
);
818 radix_tree_preload_end();
819 fscache_stat(&fscache_n_store_ops
);
820 fscache_stat(&fscache_n_stores_ok
);
822 /* the work queue now carries its own ref on the object */
823 fscache_put_operation(&op
->op
);
828 fscache_stat(&fscache_n_stores_again
);
830 spin_unlock(&cookie
->stores_lock
);
831 spin_unlock(&object
->lock
);
832 spin_unlock(&cookie
->lock
);
833 radix_tree_preload_end();
835 fscache_stat(&fscache_n_stores_ok
);
840 spin_lock(&cookie
->stores_lock
);
841 radix_tree_delete(&cookie
->stores
, page
->index
);
842 spin_unlock(&cookie
->stores_lock
);
843 page_cache_release(page
);
848 spin_unlock(&cookie
->stores_lock
);
849 spin_unlock(&object
->lock
);
851 spin_unlock(&cookie
->lock
);
852 radix_tree_preload_end();
854 fscache_stat(&fscache_n_stores_nobufs
);
855 _leave(" = -ENOBUFS");
861 fscache_stat(&fscache_n_stores_oom
);
862 _leave(" = -ENOMEM");
865 EXPORT_SYMBOL(__fscache_write_page
);
868 * remove a page from the cache
870 void __fscache_uncache_page(struct fscache_cookie
*cookie
, struct page
*page
)
872 struct fscache_object
*object
;
876 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
877 ASSERTCMP(page
, !=, NULL
);
879 fscache_stat(&fscache_n_uncaches
);
881 /* cache withdrawal may beat us to it */
882 if (!PageFsCache(page
))
886 spin_lock(&cookie
->lock
);
888 if (hlist_empty(&cookie
->backing_objects
)) {
889 ClearPageFsCache(page
);
893 object
= hlist_entry(cookie
->backing_objects
.first
,
894 struct fscache_object
, cookie_link
);
896 /* there might now be stuff on disk we could read */
897 clear_bit(FSCACHE_COOKIE_NO_DATA_YET
, &cookie
->flags
);
899 /* only invoke the cache backend if we managed to mark the page
900 * uncached here; this deals with synchronisation vs withdrawal */
901 if (TestClearPageFsCache(page
) &&
902 object
->cache
->ops
->uncache_page
) {
903 /* the cache backend releases the cookie lock */
904 fscache_stat(&fscache_n_cop_uncache_page
);
905 object
->cache
->ops
->uncache_page(object
, page
);
906 fscache_stat_d(&fscache_n_cop_uncache_page
);
911 spin_unlock(&cookie
->lock
);
915 EXPORT_SYMBOL(__fscache_uncache_page
);
918 * fscache_mark_pages_cached - Mark pages as being cached
919 * @op: The retrieval op pages are being marked for
920 * @pagevec: The pages to be marked
922 * Mark a bunch of netfs pages as being cached. After this is called,
923 * the netfs must call fscache_uncache_page() to remove the mark.
925 void fscache_mark_pages_cached(struct fscache_retrieval
*op
,
926 struct pagevec
*pagevec
)
928 struct fscache_cookie
*cookie
= op
->op
.object
->cookie
;
931 #ifdef CONFIG_FSCACHE_STATS
932 atomic_add(pagevec
->nr
, &fscache_n_marks
);
935 for (loop
= 0; loop
< pagevec
->nr
; loop
++) {
936 struct page
*page
= pagevec
->pages
[loop
];
938 _debug("- mark %p{%lx}", page
, page
->index
);
939 if (TestSetPageFsCache(page
)) {
940 static bool once_only
;
943 printk(KERN_WARNING
"FS-Cache:"
944 " Cookie type %s marked page %lx"
946 cookie
->def
->name
, page
->index
);
951 if (cookie
->def
->mark_pages_cached
)
952 cookie
->def
->mark_pages_cached(cookie
->netfs_data
,
953 op
->mapping
, pagevec
);
954 pagevec_reinit(pagevec
);
956 EXPORT_SYMBOL(fscache_mark_pages_cached
);
959 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
960 * to be associated with the given cookie.
962 void __fscache_uncache_all_inode_pages(struct fscache_cookie
*cookie
,
965 struct address_space
*mapping
= inode
->i_mapping
;
970 _enter("%p,%p", cookie
, inode
);
972 if (!mapping
|| mapping
->nrpages
== 0) {
973 _leave(" [no pages]");
977 pagevec_init(&pvec
, 0);
980 if (!pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
))
982 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
983 struct page
*page
= pvec
.pages
[i
];
985 if (PageFsCache(page
)) {
986 __fscache_wait_on_page_write(cookie
, page
);
987 __fscache_uncache_page(cookie
, page
);
990 pagevec_release(&pvec
);
996 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages
);