1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
21 * check to see if a page is being written to the cache
23 bool __fscache_check_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
28 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
33 EXPORT_SYMBOL(__fscache_check_page_write
);
36 * wait for a page to finish being written to the cache
38 void __fscache_wait_on_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
40 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
42 wait_event(*wq
, !__fscache_check_page_write(cookie
, page
));
44 EXPORT_SYMBOL(__fscache_wait_on_page_write
);
47 * decide whether a page can be released, possibly by cancelling a store to it
48 * - we're allowed to sleep if __GFP_WAIT is flagged
50 bool __fscache_maybe_release_page(struct fscache_cookie
*cookie
,
57 _enter("%p,%p,%x", cookie
, page
, gfp
);
61 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
64 fscache_stat(&fscache_n_store_vmscan_not_storing
);
65 __fscache_uncache_page(cookie
, page
);
69 /* see if the page is actually undergoing storage - if so we can't get
70 * rid of it till the cache has finished with it */
71 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
72 FSCACHE_COOKIE_STORING_TAG
)) {
77 /* the page is pending storage, so we attempt to cancel the store and
78 * discard the store request so that the page can be reclaimed */
79 spin_lock(&cookie
->stores_lock
);
82 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
83 FSCACHE_COOKIE_STORING_TAG
)) {
84 /* the page started to undergo storage whilst we were looking,
85 * so now we can only wait or return */
86 spin_unlock(&cookie
->stores_lock
);
90 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
91 spin_unlock(&cookie
->stores_lock
);
94 fscache_stat(&fscache_n_store_vmscan_cancelled
);
95 fscache_stat(&fscache_n_store_radix_deletes
);
96 ASSERTCMP(xpage
, ==, page
);
98 fscache_stat(&fscache_n_store_vmscan_gone
);
101 wake_up_bit(&cookie
->flags
, 0);
103 page_cache_release(xpage
);
104 __fscache_uncache_page(cookie
, page
);
108 /* We will wait here if we're allowed to, but that could deadlock the
109 * allocator as the work threads writing to the cache may all end up
110 * sleeping on memory allocation, so we may need to impose a timeout
112 if (!(gfp
& __GFP_WAIT
)) {
113 fscache_stat(&fscache_n_store_vmscan_busy
);
117 fscache_stat(&fscache_n_store_vmscan_wait
);
118 __fscache_wait_on_page_write(cookie
, page
);
122 EXPORT_SYMBOL(__fscache_maybe_release_page
);
125 * note that a page has finished being written to the cache
127 static void fscache_end_page_write(struct fscache_object
*object
,
130 struct fscache_cookie
*cookie
;
131 struct page
*xpage
= NULL
;
133 spin_lock(&object
->lock
);
134 cookie
= object
->cookie
;
136 /* delete the page from the tree if it is now no longer
138 spin_lock(&cookie
->stores_lock
);
139 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
140 FSCACHE_COOKIE_STORING_TAG
);
141 if (!radix_tree_tag_get(&cookie
->stores
, page
->index
,
142 FSCACHE_COOKIE_PENDING_TAG
)) {
143 fscache_stat(&fscache_n_store_radix_deletes
);
144 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
146 spin_unlock(&cookie
->stores_lock
);
147 wake_up_bit(&cookie
->flags
, 0);
149 spin_unlock(&object
->lock
);
151 page_cache_release(xpage
);
155 * actually apply the changed attributes to a cache object
157 static void fscache_attr_changed_op(struct fscache_operation
*op
)
159 struct fscache_object
*object
= op
->object
;
162 _enter("{OBJ%x OP%x}", object
->debug_id
, op
->debug_id
);
164 fscache_stat(&fscache_n_attr_changed_calls
);
166 if (fscache_object_is_active(object
)) {
167 fscache_stat(&fscache_n_cop_attr_changed
);
168 ret
= object
->cache
->ops
->attr_changed(object
);
169 fscache_stat_d(&fscache_n_cop_attr_changed
);
171 fscache_abort_object(object
);
174 fscache_op_complete(op
, true);
179 * notification that the attributes on an object have changed
181 int __fscache_attr_changed(struct fscache_cookie
*cookie
)
183 struct fscache_operation
*op
;
184 struct fscache_object
*object
;
186 _enter("%p", cookie
);
188 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
190 fscache_stat(&fscache_n_attr_changed
);
192 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
194 fscache_stat(&fscache_n_attr_changed_nomem
);
195 _leave(" = -ENOMEM");
199 fscache_operation_init(op
, fscache_attr_changed_op
, NULL
);
200 op
->flags
= FSCACHE_OP_ASYNC
| (1 << FSCACHE_OP_EXCLUSIVE
);
202 spin_lock(&cookie
->lock
);
204 if (hlist_empty(&cookie
->backing_objects
))
206 object
= hlist_entry(cookie
->backing_objects
.first
,
207 struct fscache_object
, cookie_link
);
209 if (fscache_submit_exclusive_op(object
, op
) < 0)
211 spin_unlock(&cookie
->lock
);
212 fscache_stat(&fscache_n_attr_changed_ok
);
213 fscache_put_operation(op
);
218 spin_unlock(&cookie
->lock
);
220 fscache_stat(&fscache_n_attr_changed_nobufs
);
221 _leave(" = %d", -ENOBUFS
);
224 EXPORT_SYMBOL(__fscache_attr_changed
);
227 * release a retrieval op reference
229 static void fscache_release_retrieval_op(struct fscache_operation
*_op
)
231 struct fscache_retrieval
*op
=
232 container_of(_op
, struct fscache_retrieval
, op
);
234 _enter("{OP%x}", op
->op
.debug_id
);
236 ASSERTCMP(op
->n_pages
, ==, 0);
238 fscache_hist(fscache_retrieval_histogram
, op
->start_time
);
240 fscache_put_context(op
->op
.object
->cookie
, op
->context
);
246 * allocate a retrieval op
248 static struct fscache_retrieval
*fscache_alloc_retrieval(
249 struct address_space
*mapping
,
250 fscache_rw_complete_t end_io_func
,
253 struct fscache_retrieval
*op
;
255 /* allocate a retrieval operation and attempt to submit it */
256 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
258 fscache_stat(&fscache_n_retrievals_nomem
);
262 fscache_operation_init(&op
->op
, NULL
, fscache_release_retrieval_op
);
263 op
->op
.flags
= FSCACHE_OP_MYTHREAD
| (1 << FSCACHE_OP_WAITING
);
264 op
->mapping
= mapping
;
265 op
->end_io_func
= end_io_func
;
266 op
->context
= context
;
267 op
->start_time
= jiffies
;
268 INIT_LIST_HEAD(&op
->to_do
);
273 * wait for a deferred lookup to complete
275 static int fscache_wait_for_deferred_lookup(struct fscache_cookie
*cookie
)
281 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
)) {
282 _leave(" = 0 [imm]");
286 fscache_stat(&fscache_n_retrievals_wait
);
289 if (wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
290 fscache_wait_bit_interruptible
,
291 TASK_INTERRUPTIBLE
) != 0) {
292 fscache_stat(&fscache_n_retrievals_intr
);
293 _leave(" = -ERESTARTSYS");
297 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
));
300 fscache_hist(fscache_retrieval_delay_histogram
, jif
);
301 _leave(" = 0 [dly]");
306 * Handle cancellation of a pending retrieval op
308 static void fscache_do_cancel_retrieval(struct fscache_operation
*_op
)
310 struct fscache_retrieval
*op
=
311 container_of(_op
, struct fscache_retrieval
, op
);
317 * wait for an object to become active (or dead)
319 static int fscache_wait_for_retrieval_activation(struct fscache_object
*object
,
320 struct fscache_retrieval
*op
,
321 atomic_t
*stat_op_waits
,
322 atomic_t
*stat_object_dead
)
326 if (!test_bit(FSCACHE_OP_WAITING
, &op
->op
.flags
))
330 fscache_stat(stat_op_waits
);
331 if (wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
332 fscache_wait_bit_interruptible
,
333 TASK_INTERRUPTIBLE
) != 0) {
334 ret
= fscache_cancel_op(&op
->op
, fscache_do_cancel_retrieval
);
338 /* it's been removed from the pending queue by another party,
339 * so we should get to run shortly */
340 wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
341 fscache_wait_bit
, TASK_UNINTERRUPTIBLE
);
346 if (op
->op
.state
== FSCACHE_OP_ST_CANCELLED
) {
347 fscache_stat(stat_object_dead
);
348 _leave(" = -ENOBUFS [cancelled]");
351 if (unlikely(fscache_object_is_dead(object
))) {
352 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__
, op
->op
.state
);
353 fscache_cancel_op(&op
->op
, fscache_do_cancel_retrieval
);
354 fscache_stat(stat_object_dead
);
361 * read a page from the cache or allocate a block in which to store it
363 * -ENOMEM - out of memory, nothing done
364 * -ERESTARTSYS - interrupted
365 * -ENOBUFS - no backing object available in which to cache the block
366 * -ENODATA - no data available in the backing object for this block
367 * 0 - dispatched a read - it'll call end_io_func() when finished
369 int __fscache_read_or_alloc_page(struct fscache_cookie
*cookie
,
371 fscache_rw_complete_t end_io_func
,
375 struct fscache_retrieval
*op
;
376 struct fscache_object
*object
;
379 _enter("%p,%p,,,", cookie
, page
);
381 fscache_stat(&fscache_n_retrievals
);
383 if (hlist_empty(&cookie
->backing_objects
))
386 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
387 _leave(" = -ENOBUFS [invalidating]");
391 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
392 ASSERTCMP(page
, !=, NULL
);
394 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
397 op
= fscache_alloc_retrieval(page
->mapping
, end_io_func
, context
);
399 _leave(" = -ENOMEM");
404 spin_lock(&cookie
->lock
);
406 if (hlist_empty(&cookie
->backing_objects
))
408 object
= hlist_entry(cookie
->backing_objects
.first
,
409 struct fscache_object
, cookie_link
);
411 ASSERTCMP(object
->state
, >, FSCACHE_OBJECT_LOOKING_UP
);
413 atomic_inc(&object
->n_reads
);
414 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
416 if (fscache_submit_op(object
, &op
->op
) < 0)
417 goto nobufs_unlock_dec
;
418 spin_unlock(&cookie
->lock
);
420 fscache_stat(&fscache_n_retrieval_ops
);
422 /* pin the netfs read context in case we need to do the actual netfs
423 * read because we've encountered a cache read failure */
424 fscache_get_context(object
->cookie
, op
->context
);
426 /* we wait for the operation to become active, and then process it
427 * *here*, in this thread, and not in the thread pool */
428 ret
= fscache_wait_for_retrieval_activation(
430 __fscache_stat(&fscache_n_retrieval_op_waits
),
431 __fscache_stat(&fscache_n_retrievals_object_dead
));
435 /* ask the cache to honour the operation */
436 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
437 fscache_stat(&fscache_n_cop_allocate_page
);
438 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
439 fscache_stat_d(&fscache_n_cop_allocate_page
);
443 fscache_stat(&fscache_n_cop_read_or_alloc_page
);
444 ret
= object
->cache
->ops
->read_or_alloc_page(op
, page
, gfp
);
445 fscache_stat_d(&fscache_n_cop_read_or_alloc_page
);
450 fscache_stat(&fscache_n_retrievals_nomem
);
451 else if (ret
== -ERESTARTSYS
)
452 fscache_stat(&fscache_n_retrievals_intr
);
453 else if (ret
== -ENODATA
)
454 fscache_stat(&fscache_n_retrievals_nodata
);
456 fscache_stat(&fscache_n_retrievals_nobufs
);
458 fscache_stat(&fscache_n_retrievals_ok
);
460 fscache_put_retrieval(op
);
461 _leave(" = %d", ret
);
465 atomic_dec(&object
->n_reads
);
467 spin_unlock(&cookie
->lock
);
470 fscache_stat(&fscache_n_retrievals_nobufs
);
471 _leave(" = -ENOBUFS");
474 EXPORT_SYMBOL(__fscache_read_or_alloc_page
);
477 * read a list of page from the cache or allocate a block in which to store
480 * -ENOMEM - out of memory, some pages may be being read
481 * -ERESTARTSYS - interrupted, some pages may be being read
482 * -ENOBUFS - no backing object or space available in which to cache any
483 * pages not being read
484 * -ENODATA - no data available in the backing object for some or all of
486 * 0 - dispatched a read on all pages
488 * end_io_func() will be called for each page read from the cache as it is
489 * finishes being read
491 * any pages for which a read is dispatched will be removed from pages and
494 int __fscache_read_or_alloc_pages(struct fscache_cookie
*cookie
,
495 struct address_space
*mapping
,
496 struct list_head
*pages
,
498 fscache_rw_complete_t end_io_func
,
502 struct fscache_retrieval
*op
;
503 struct fscache_object
*object
;
506 _enter("%p,,%d,,,", cookie
, *nr_pages
);
508 fscache_stat(&fscache_n_retrievals
);
510 if (hlist_empty(&cookie
->backing_objects
))
513 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
514 _leave(" = -ENOBUFS [invalidating]");
518 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
519 ASSERTCMP(*nr_pages
, >, 0);
520 ASSERT(!list_empty(pages
));
522 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
525 op
= fscache_alloc_retrieval(mapping
, end_io_func
, context
);
528 op
->n_pages
= *nr_pages
;
530 spin_lock(&cookie
->lock
);
532 if (hlist_empty(&cookie
->backing_objects
))
534 object
= hlist_entry(cookie
->backing_objects
.first
,
535 struct fscache_object
, cookie_link
);
537 atomic_inc(&object
->n_reads
);
538 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
540 if (fscache_submit_op(object
, &op
->op
) < 0)
541 goto nobufs_unlock_dec
;
542 spin_unlock(&cookie
->lock
);
544 fscache_stat(&fscache_n_retrieval_ops
);
546 /* pin the netfs read context in case we need to do the actual netfs
547 * read because we've encountered a cache read failure */
548 fscache_get_context(object
->cookie
, op
->context
);
550 /* we wait for the operation to become active, and then process it
551 * *here*, in this thread, and not in the thread pool */
552 ret
= fscache_wait_for_retrieval_activation(
554 __fscache_stat(&fscache_n_retrieval_op_waits
),
555 __fscache_stat(&fscache_n_retrievals_object_dead
));
559 /* ask the cache to honour the operation */
560 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
561 fscache_stat(&fscache_n_cop_allocate_pages
);
562 ret
= object
->cache
->ops
->allocate_pages(
563 op
, pages
, nr_pages
, gfp
);
564 fscache_stat_d(&fscache_n_cop_allocate_pages
);
566 fscache_stat(&fscache_n_cop_read_or_alloc_pages
);
567 ret
= object
->cache
->ops
->read_or_alloc_pages(
568 op
, pages
, nr_pages
, gfp
);
569 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages
);
574 fscache_stat(&fscache_n_retrievals_nomem
);
575 else if (ret
== -ERESTARTSYS
)
576 fscache_stat(&fscache_n_retrievals_intr
);
577 else if (ret
== -ENODATA
)
578 fscache_stat(&fscache_n_retrievals_nodata
);
580 fscache_stat(&fscache_n_retrievals_nobufs
);
582 fscache_stat(&fscache_n_retrievals_ok
);
584 fscache_put_retrieval(op
);
585 _leave(" = %d", ret
);
589 atomic_dec(&object
->n_reads
);
591 spin_unlock(&cookie
->lock
);
594 fscache_stat(&fscache_n_retrievals_nobufs
);
595 _leave(" = -ENOBUFS");
598 EXPORT_SYMBOL(__fscache_read_or_alloc_pages
);
601 * allocate a block in the cache on which to store a page
603 * -ENOMEM - out of memory, nothing done
604 * -ERESTARTSYS - interrupted
605 * -ENOBUFS - no backing object available in which to cache the block
606 * 0 - block allocated
608 int __fscache_alloc_page(struct fscache_cookie
*cookie
,
612 struct fscache_retrieval
*op
;
613 struct fscache_object
*object
;
616 _enter("%p,%p,,,", cookie
, page
);
618 fscache_stat(&fscache_n_allocs
);
620 if (hlist_empty(&cookie
->backing_objects
))
623 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
624 ASSERTCMP(page
, !=, NULL
);
626 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
627 _leave(" = -ENOBUFS [invalidating]");
631 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
634 op
= fscache_alloc_retrieval(page
->mapping
, NULL
, NULL
);
639 spin_lock(&cookie
->lock
);
641 if (hlist_empty(&cookie
->backing_objects
))
643 object
= hlist_entry(cookie
->backing_objects
.first
,
644 struct fscache_object
, cookie_link
);
646 if (fscache_submit_op(object
, &op
->op
) < 0)
648 spin_unlock(&cookie
->lock
);
650 fscache_stat(&fscache_n_alloc_ops
);
652 ret
= fscache_wait_for_retrieval_activation(
654 __fscache_stat(&fscache_n_alloc_op_waits
),
655 __fscache_stat(&fscache_n_allocs_object_dead
));
659 /* ask the cache to honour the operation */
660 fscache_stat(&fscache_n_cop_allocate_page
);
661 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
662 fscache_stat_d(&fscache_n_cop_allocate_page
);
665 if (ret
== -ERESTARTSYS
)
666 fscache_stat(&fscache_n_allocs_intr
);
668 fscache_stat(&fscache_n_allocs_nobufs
);
670 fscache_stat(&fscache_n_allocs_ok
);
672 fscache_put_retrieval(op
);
673 _leave(" = %d", ret
);
677 spin_unlock(&cookie
->lock
);
680 fscache_stat(&fscache_n_allocs_nobufs
);
681 _leave(" = -ENOBUFS");
684 EXPORT_SYMBOL(__fscache_alloc_page
);
687 * release a write op reference
689 static void fscache_release_write_op(struct fscache_operation
*_op
)
691 _enter("{OP%x}", _op
->debug_id
);
695 * perform the background storage of a page into the cache
697 static void fscache_write_op(struct fscache_operation
*_op
)
699 struct fscache_storage
*op
=
700 container_of(_op
, struct fscache_storage
, op
);
701 struct fscache_object
*object
= op
->op
.object
;
702 struct fscache_cookie
*cookie
;
708 _enter("{OP%x,%d}", op
->op
.debug_id
, atomic_read(&op
->op
.usage
));
710 spin_lock(&object
->lock
);
711 cookie
= object
->cookie
;
713 if (!fscache_object_is_active(object
)) {
714 /* If we get here, then the on-disk cache object likely longer
715 * exists, so we should just cancel this write operation.
717 spin_unlock(&object
->lock
);
718 fscache_op_complete(&op
->op
, false);
719 _leave(" [inactive]");
724 /* If we get here, then the cookie belonging to the object was
725 * detached, probably by the cookie being withdrawn due to
726 * memory pressure, which means that the pages we might write
727 * to the cache from no longer exist - therefore, we can just
728 * cancel this write operation.
730 spin_unlock(&object
->lock
);
731 fscache_op_complete(&op
->op
, false);
732 _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}",
733 _op
->flags
, _op
->state
, object
->state
, object
->flags
);
737 spin_lock(&cookie
->stores_lock
);
739 fscache_stat(&fscache_n_store_calls
);
741 /* find a page to store */
743 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0, 1,
744 FSCACHE_COOKIE_PENDING_TAG
);
748 _debug("gang %d [%lx]", n
, page
->index
);
749 if (page
->index
> op
->store_limit
) {
750 fscache_stat(&fscache_n_store_pages_over_limit
);
754 radix_tree_tag_set(&cookie
->stores
, page
->index
,
755 FSCACHE_COOKIE_STORING_TAG
);
756 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
757 FSCACHE_COOKIE_PENDING_TAG
);
759 spin_unlock(&cookie
->stores_lock
);
760 spin_unlock(&object
->lock
);
762 fscache_stat(&fscache_n_store_pages
);
763 fscache_stat(&fscache_n_cop_write_page
);
764 ret
= object
->cache
->ops
->write_page(op
, page
);
765 fscache_stat_d(&fscache_n_cop_write_page
);
766 fscache_end_page_write(object
, page
);
768 fscache_abort_object(object
);
769 fscache_op_complete(&op
->op
, true);
771 fscache_enqueue_operation(&op
->op
);
778 /* this writer is going away and there aren't any more things to
781 spin_unlock(&cookie
->stores_lock
);
782 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
783 spin_unlock(&object
->lock
);
784 fscache_op_complete(&op
->op
, true);
789 * Clear the pages pending writing for invalidation
791 void fscache_invalidate_writes(struct fscache_cookie
*cookie
)
799 while (spin_lock(&cookie
->stores_lock
),
800 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0,
802 FSCACHE_COOKIE_PENDING_TAG
),
804 for (i
= n
- 1; i
>= 0; i
--) {
806 radix_tree_delete(&cookie
->stores
, page
->index
);
809 spin_unlock(&cookie
->stores_lock
);
811 for (i
= n
- 1; i
>= 0; i
--)
812 page_cache_release(results
[i
]);
815 spin_unlock(&cookie
->stores_lock
);
820 * request a page be stored in the cache
822 * -ENOMEM - out of memory, nothing done
823 * -ENOBUFS - no backing object available in which to cache the page
824 * 0 - dispatched a write - it'll call end_io_func() when finished
826 * if the cookie still has a backing object at this point, that object can be
827 * in one of a few states with respect to storage processing:
829 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
832 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
835 * (b) writes deferred till post-creation (mark page for writing and
836 * return immediately)
838 * (2) negative lookup, object created, initial fill being made from netfs
839 * (FSCACHE_COOKIE_INITIAL_FILL is set)
841 * (a) fill point not yet reached this page (mark page for writing and
844 * (b) fill point passed this page (queue op to store this page)
846 * (3) object extant (queue op to store this page)
848 * any other state is invalid
850 int __fscache_write_page(struct fscache_cookie
*cookie
,
854 struct fscache_storage
*op
;
855 struct fscache_object
*object
;
858 _enter("%p,%x,", cookie
, (u32
) page
->flags
);
860 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
861 ASSERT(PageFsCache(page
));
863 fscache_stat(&fscache_n_stores
);
865 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
866 _leave(" = -ENOBUFS [invalidating]");
870 op
= kzalloc(sizeof(*op
), GFP_NOIO
| __GFP_NOMEMALLOC
| __GFP_NORETRY
);
874 fscache_operation_init(&op
->op
, fscache_write_op
,
875 fscache_release_write_op
);
876 op
->op
.flags
= FSCACHE_OP_ASYNC
| (1 << FSCACHE_OP_WAITING
);
878 ret
= radix_tree_preload(gfp
& ~__GFP_HIGHMEM
);
883 spin_lock(&cookie
->lock
);
885 if (hlist_empty(&cookie
->backing_objects
))
887 object
= hlist_entry(cookie
->backing_objects
.first
,
888 struct fscache_object
, cookie_link
);
889 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
892 /* add the page to the pending-storage radix tree on the backing
894 spin_lock(&object
->lock
);
895 spin_lock(&cookie
->stores_lock
);
897 _debug("store limit %llx", (unsigned long long) object
->store_limit
);
899 ret
= radix_tree_insert(&cookie
->stores
, page
->index
, page
);
903 _debug("insert failed %d", ret
);
904 goto nobufs_unlock_obj
;
907 radix_tree_tag_set(&cookie
->stores
, page
->index
,
908 FSCACHE_COOKIE_PENDING_TAG
);
909 page_cache_get(page
);
911 /* we only want one writer at a time, but we do need to queue new
912 * writers after exclusive ops */
913 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
))
914 goto already_pending
;
916 spin_unlock(&cookie
->stores_lock
);
917 spin_unlock(&object
->lock
);
919 op
->op
.debug_id
= atomic_inc_return(&fscache_op_debug_id
);
920 op
->store_limit
= object
->store_limit
;
922 if (fscache_submit_op(object
, &op
->op
) < 0)
925 spin_unlock(&cookie
->lock
);
926 radix_tree_preload_end();
927 fscache_stat(&fscache_n_store_ops
);
928 fscache_stat(&fscache_n_stores_ok
);
930 /* the work queue now carries its own ref on the object */
931 fscache_put_operation(&op
->op
);
936 fscache_stat(&fscache_n_stores_again
);
938 spin_unlock(&cookie
->stores_lock
);
939 spin_unlock(&object
->lock
);
940 spin_unlock(&cookie
->lock
);
941 radix_tree_preload_end();
943 fscache_stat(&fscache_n_stores_ok
);
948 spin_lock(&cookie
->stores_lock
);
949 radix_tree_delete(&cookie
->stores
, page
->index
);
950 spin_unlock(&cookie
->stores_lock
);
951 page_cache_release(page
);
956 spin_unlock(&cookie
->stores_lock
);
957 spin_unlock(&object
->lock
);
959 spin_unlock(&cookie
->lock
);
960 radix_tree_preload_end();
962 fscache_stat(&fscache_n_stores_nobufs
);
963 _leave(" = -ENOBUFS");
969 fscache_stat(&fscache_n_stores_oom
);
970 _leave(" = -ENOMEM");
973 EXPORT_SYMBOL(__fscache_write_page
);
976 * remove a page from the cache
978 void __fscache_uncache_page(struct fscache_cookie
*cookie
, struct page
*page
)
980 struct fscache_object
*object
;
984 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
985 ASSERTCMP(page
, !=, NULL
);
987 fscache_stat(&fscache_n_uncaches
);
989 /* cache withdrawal may beat us to it */
990 if (!PageFsCache(page
))
994 spin_lock(&cookie
->lock
);
996 if (hlist_empty(&cookie
->backing_objects
)) {
997 ClearPageFsCache(page
);
1001 object
= hlist_entry(cookie
->backing_objects
.first
,
1002 struct fscache_object
, cookie_link
);
1004 /* there might now be stuff on disk we could read */
1005 clear_bit(FSCACHE_COOKIE_NO_DATA_YET
, &cookie
->flags
);
1007 /* only invoke the cache backend if we managed to mark the page
1008 * uncached here; this deals with synchronisation vs withdrawal */
1009 if (TestClearPageFsCache(page
) &&
1010 object
->cache
->ops
->uncache_page
) {
1011 /* the cache backend releases the cookie lock */
1012 fscache_stat(&fscache_n_cop_uncache_page
);
1013 object
->cache
->ops
->uncache_page(object
, page
);
1014 fscache_stat_d(&fscache_n_cop_uncache_page
);
1019 spin_unlock(&cookie
->lock
);
1023 EXPORT_SYMBOL(__fscache_uncache_page
);
1026 * fscache_mark_page_cached - Mark a page as being cached
1027 * @op: The retrieval op pages are being marked for
1028 * @page: The page to be marked
1030 * Mark a netfs page as being cached. After this is called, the netfs
1031 * must call fscache_uncache_page() to remove the mark.
1033 void fscache_mark_page_cached(struct fscache_retrieval
*op
, struct page
*page
)
1035 struct fscache_cookie
*cookie
= op
->op
.object
->cookie
;
1037 #ifdef CONFIG_FSCACHE_STATS
1038 atomic_inc(&fscache_n_marks
);
1041 _debug("- mark %p{%lx}", page
, page
->index
);
1042 if (TestSetPageFsCache(page
)) {
1043 static bool once_only
;
1046 printk(KERN_WARNING
"FS-Cache:"
1047 " Cookie type %s marked page %lx"
1048 " multiple times\n",
1049 cookie
->def
->name
, page
->index
);
1053 if (cookie
->def
->mark_page_cached
)
1054 cookie
->def
->mark_page_cached(cookie
->netfs_data
,
1057 EXPORT_SYMBOL(fscache_mark_page_cached
);
1060 * fscache_mark_pages_cached - Mark pages as being cached
1061 * @op: The retrieval op pages are being marked for
1062 * @pagevec: The pages to be marked
1064 * Mark a bunch of netfs pages as being cached. After this is called,
1065 * the netfs must call fscache_uncache_page() to remove the mark.
1067 void fscache_mark_pages_cached(struct fscache_retrieval
*op
,
1068 struct pagevec
*pagevec
)
1072 for (loop
= 0; loop
< pagevec
->nr
; loop
++)
1073 fscache_mark_page_cached(op
, pagevec
->pages
[loop
]);
1075 pagevec_reinit(pagevec
);
1077 EXPORT_SYMBOL(fscache_mark_pages_cached
);
1080 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1081 * to be associated with the given cookie.
1083 void __fscache_uncache_all_inode_pages(struct fscache_cookie
*cookie
,
1084 struct inode
*inode
)
1086 struct address_space
*mapping
= inode
->i_mapping
;
1087 struct pagevec pvec
;
1091 _enter("%p,%p", cookie
, inode
);
1093 if (!mapping
|| mapping
->nrpages
== 0) {
1094 _leave(" [no pages]");
1098 pagevec_init(&pvec
, 0);
1101 if (!pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
))
1103 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
1104 struct page
*page
= pvec
.pages
[i
];
1106 if (PageFsCache(page
)) {
1107 __fscache_wait_on_page_write(cookie
, page
);
1108 __fscache_uncache_page(cookie
, page
);
1111 pagevec_release(&pvec
);
1117 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages
);