1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
20 * check to see if a page is being written to the cache
22 bool __fscache_check_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
27 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
32 EXPORT_SYMBOL(__fscache_check_page_write
);
35 * wait for a page to finish being written to the cache
37 void __fscache_wait_on_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
39 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
41 wait_event(*wq
, !__fscache_check_page_write(cookie
, page
));
43 EXPORT_SYMBOL(__fscache_wait_on_page_write
);
46 * decide whether a page can be released, possibly by cancelling a store to it
47 * - we're allowed to sleep if __GFP_WAIT is flagged
49 bool __fscache_maybe_release_page(struct fscache_cookie
*cookie
,
56 _enter("%p,%p,%x", cookie
, page
, gfp
);
59 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
62 fscache_stat(&fscache_n_store_vmscan_not_storing
);
63 __fscache_uncache_page(cookie
, page
);
67 /* see if the page is actually undergoing storage - if so we can't get
68 * rid of it till the cache has finished with it */
69 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
70 FSCACHE_COOKIE_STORING_TAG
)) {
75 /* the page is pending storage, so we attempt to cancel the store and
76 * discard the store request so that the page can be reclaimed */
77 spin_lock(&cookie
->stores_lock
);
80 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
81 FSCACHE_COOKIE_STORING_TAG
)) {
82 /* the page started to undergo storage whilst we were looking,
83 * so now we can only wait or return */
84 spin_unlock(&cookie
->stores_lock
);
88 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
89 spin_unlock(&cookie
->stores_lock
);
92 fscache_stat(&fscache_n_store_vmscan_cancelled
);
93 fscache_stat(&fscache_n_store_radix_deletes
);
94 ASSERTCMP(xpage
, ==, page
);
96 fscache_stat(&fscache_n_store_vmscan_gone
);
99 wake_up_bit(&cookie
->flags
, 0);
101 page_cache_release(xpage
);
102 __fscache_uncache_page(cookie
, page
);
106 /* we might want to wait here, but that could deadlock the allocator as
107 * the slow-work threads writing to the cache may all end up sleeping
108 * on memory allocation */
109 fscache_stat(&fscache_n_store_vmscan_busy
);
112 EXPORT_SYMBOL(__fscache_maybe_release_page
);
115 * note that a page has finished being written to the cache
117 static void fscache_end_page_write(struct fscache_object
*object
,
120 struct fscache_cookie
*cookie
;
121 struct page
*xpage
= NULL
;
123 spin_lock(&object
->lock
);
124 cookie
= object
->cookie
;
126 /* delete the page from the tree if it is now no longer
128 spin_lock(&cookie
->stores_lock
);
129 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
130 FSCACHE_COOKIE_STORING_TAG
);
131 if (!radix_tree_tag_get(&cookie
->stores
, page
->index
,
132 FSCACHE_COOKIE_PENDING_TAG
)) {
133 fscache_stat(&fscache_n_store_radix_deletes
);
134 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
136 spin_unlock(&cookie
->stores_lock
);
137 wake_up_bit(&cookie
->flags
, 0);
139 spin_unlock(&object
->lock
);
141 page_cache_release(xpage
);
145 * actually apply the changed attributes to a cache object
147 static void fscache_attr_changed_op(struct fscache_operation
*op
)
149 struct fscache_object
*object
= op
->object
;
152 _enter("{OBJ%x OP%x}", object
->debug_id
, op
->debug_id
);
154 fscache_stat(&fscache_n_attr_changed_calls
);
156 if (fscache_object_is_active(object
)) {
157 fscache_set_op_state(op
, "CallFS");
158 fscache_stat(&fscache_n_cop_attr_changed
);
159 ret
= object
->cache
->ops
->attr_changed(object
);
160 fscache_stat_d(&fscache_n_cop_attr_changed
);
161 fscache_set_op_state(op
, "Done");
163 fscache_abort_object(object
);
170 * notification that the attributes on an object have changed
172 int __fscache_attr_changed(struct fscache_cookie
*cookie
)
174 struct fscache_operation
*op
;
175 struct fscache_object
*object
;
177 _enter("%p", cookie
);
179 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
181 fscache_stat(&fscache_n_attr_changed
);
183 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
185 fscache_stat(&fscache_n_attr_changed_nomem
);
186 _leave(" = -ENOMEM");
190 fscache_operation_init(op
, NULL
);
191 fscache_operation_init_slow(op
, fscache_attr_changed_op
);
192 op
->flags
= FSCACHE_OP_SLOW
| (1 << FSCACHE_OP_EXCLUSIVE
);
193 fscache_set_op_name(op
, "Attr");
195 spin_lock(&cookie
->lock
);
197 if (hlist_empty(&cookie
->backing_objects
))
199 object
= hlist_entry(cookie
->backing_objects
.first
,
200 struct fscache_object
, cookie_link
);
202 if (fscache_submit_exclusive_op(object
, op
) < 0)
204 spin_unlock(&cookie
->lock
);
205 fscache_stat(&fscache_n_attr_changed_ok
);
206 fscache_put_operation(op
);
211 spin_unlock(&cookie
->lock
);
213 fscache_stat(&fscache_n_attr_changed_nobufs
);
214 _leave(" = %d", -ENOBUFS
);
217 EXPORT_SYMBOL(__fscache_attr_changed
);
220 * handle secondary execution given to a retrieval op on behalf of the
223 static void fscache_retrieval_work(struct work_struct
*work
)
225 struct fscache_retrieval
*op
=
226 container_of(work
, struct fscache_retrieval
, op
.fast_work
);
229 _enter("{OP%x}", op
->op
.debug_id
);
232 op
->op
.processor(&op
->op
);
233 fscache_hist(fscache_ops_histogram
, start
);
234 fscache_put_operation(&op
->op
);
238 * release a retrieval op reference
240 static void fscache_release_retrieval_op(struct fscache_operation
*_op
)
242 struct fscache_retrieval
*op
=
243 container_of(_op
, struct fscache_retrieval
, op
);
245 _enter("{OP%x}", op
->op
.debug_id
);
247 fscache_hist(fscache_retrieval_histogram
, op
->start_time
);
249 fscache_put_context(op
->op
.object
->cookie
, op
->context
);
255 * allocate a retrieval op
257 static struct fscache_retrieval
*fscache_alloc_retrieval(
258 struct address_space
*mapping
,
259 fscache_rw_complete_t end_io_func
,
262 struct fscache_retrieval
*op
;
264 /* allocate a retrieval operation and attempt to submit it */
265 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
267 fscache_stat(&fscache_n_retrievals_nomem
);
271 fscache_operation_init(&op
->op
, fscache_release_retrieval_op
);
272 op
->op
.flags
= FSCACHE_OP_MYTHREAD
| (1 << FSCACHE_OP_WAITING
);
273 op
->mapping
= mapping
;
274 op
->end_io_func
= end_io_func
;
275 op
->context
= context
;
276 op
->start_time
= jiffies
;
277 INIT_WORK(&op
->op
.fast_work
, fscache_retrieval_work
);
278 INIT_LIST_HEAD(&op
->to_do
);
279 fscache_set_op_name(&op
->op
, "Retr");
284 * wait for a deferred lookup to complete
286 static int fscache_wait_for_deferred_lookup(struct fscache_cookie
*cookie
)
292 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
)) {
293 _leave(" = 0 [imm]");
297 fscache_stat(&fscache_n_retrievals_wait
);
300 if (wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
301 fscache_wait_bit_interruptible
,
302 TASK_INTERRUPTIBLE
) != 0) {
303 fscache_stat(&fscache_n_retrievals_intr
);
304 _leave(" = -ERESTARTSYS");
308 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
));
311 fscache_hist(fscache_retrieval_delay_histogram
, jif
);
312 _leave(" = 0 [dly]");
317 * wait for an object to become active (or dead)
319 static int fscache_wait_for_retrieval_activation(struct fscache_object
*object
,
320 struct fscache_retrieval
*op
,
321 atomic_t
*stat_op_waits
,
322 atomic_t
*stat_object_dead
)
326 if (!test_bit(FSCACHE_OP_WAITING
, &op
->op
.flags
))
330 fscache_stat(stat_op_waits
);
331 if (wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
332 fscache_wait_bit_interruptible
,
333 TASK_INTERRUPTIBLE
) < 0) {
334 ret
= fscache_cancel_op(&op
->op
);
338 /* it's been removed from the pending queue by another party,
339 * so we should get to run shortly */
340 wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
341 fscache_wait_bit
, TASK_UNINTERRUPTIBLE
);
346 if (unlikely(fscache_object_is_dead(object
))) {
347 fscache_stat(stat_object_dead
);
354 * read a page from the cache or allocate a block in which to store it
356 * -ENOMEM - out of memory, nothing done
357 * -ERESTARTSYS - interrupted
358 * -ENOBUFS - no backing object available in which to cache the block
359 * -ENODATA - no data available in the backing object for this block
360 * 0 - dispatched a read - it'll call end_io_func() when finished
362 int __fscache_read_or_alloc_page(struct fscache_cookie
*cookie
,
364 fscache_rw_complete_t end_io_func
,
368 struct fscache_retrieval
*op
;
369 struct fscache_object
*object
;
372 _enter("%p,%p,,,", cookie
, page
);
374 fscache_stat(&fscache_n_retrievals
);
376 if (hlist_empty(&cookie
->backing_objects
))
379 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
380 ASSERTCMP(page
, !=, NULL
);
382 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
385 op
= fscache_alloc_retrieval(page
->mapping
, end_io_func
, context
);
387 _leave(" = -ENOMEM");
390 fscache_set_op_name(&op
->op
, "RetrRA1");
392 spin_lock(&cookie
->lock
);
394 if (hlist_empty(&cookie
->backing_objects
))
396 object
= hlist_entry(cookie
->backing_objects
.first
,
397 struct fscache_object
, cookie_link
);
399 ASSERTCMP(object
->state
, >, FSCACHE_OBJECT_LOOKING_UP
);
401 atomic_inc(&object
->n_reads
);
402 set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
404 if (fscache_submit_op(object
, &op
->op
) < 0)
406 spin_unlock(&cookie
->lock
);
408 fscache_stat(&fscache_n_retrieval_ops
);
410 /* pin the netfs read context in case we need to do the actual netfs
411 * read because we've encountered a cache read failure */
412 fscache_get_context(object
->cookie
, op
->context
);
414 /* we wait for the operation to become active, and then process it
415 * *here*, in this thread, and not in the thread pool */
416 ret
= fscache_wait_for_retrieval_activation(
418 __fscache_stat(&fscache_n_retrieval_op_waits
),
419 __fscache_stat(&fscache_n_retrievals_object_dead
));
423 /* ask the cache to honour the operation */
424 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
425 fscache_stat(&fscache_n_cop_allocate_page
);
426 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
427 fscache_stat_d(&fscache_n_cop_allocate_page
);
431 fscache_stat(&fscache_n_cop_read_or_alloc_page
);
432 ret
= object
->cache
->ops
->read_or_alloc_page(op
, page
, gfp
);
433 fscache_stat_d(&fscache_n_cop_read_or_alloc_page
);
438 fscache_stat(&fscache_n_retrievals_nomem
);
439 else if (ret
== -ERESTARTSYS
)
440 fscache_stat(&fscache_n_retrievals_intr
);
441 else if (ret
== -ENODATA
)
442 fscache_stat(&fscache_n_retrievals_nodata
);
444 fscache_stat(&fscache_n_retrievals_nobufs
);
446 fscache_stat(&fscache_n_retrievals_ok
);
448 fscache_put_retrieval(op
);
449 _leave(" = %d", ret
);
453 spin_unlock(&cookie
->lock
);
456 fscache_stat(&fscache_n_retrievals_nobufs
);
457 _leave(" = -ENOBUFS");
460 EXPORT_SYMBOL(__fscache_read_or_alloc_page
);
463 * read a list of page from the cache or allocate a block in which to store
466 * -ENOMEM - out of memory, some pages may be being read
467 * -ERESTARTSYS - interrupted, some pages may be being read
468 * -ENOBUFS - no backing object or space available in which to cache any
469 * pages not being read
470 * -ENODATA - no data available in the backing object for some or all of
472 * 0 - dispatched a read on all pages
474 * end_io_func() will be called for each page read from the cache as it is
475 * finishes being read
477 * any pages for which a read is dispatched will be removed from pages and
480 int __fscache_read_or_alloc_pages(struct fscache_cookie
*cookie
,
481 struct address_space
*mapping
,
482 struct list_head
*pages
,
484 fscache_rw_complete_t end_io_func
,
488 struct fscache_retrieval
*op
;
489 struct fscache_object
*object
;
492 _enter("%p,,%d,,,", cookie
, *nr_pages
);
494 fscache_stat(&fscache_n_retrievals
);
496 if (hlist_empty(&cookie
->backing_objects
))
499 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
500 ASSERTCMP(*nr_pages
, >, 0);
501 ASSERT(!list_empty(pages
));
503 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
506 op
= fscache_alloc_retrieval(mapping
, end_io_func
, context
);
509 fscache_set_op_name(&op
->op
, "RetrRAN");
511 spin_lock(&cookie
->lock
);
513 if (hlist_empty(&cookie
->backing_objects
))
515 object
= hlist_entry(cookie
->backing_objects
.first
,
516 struct fscache_object
, cookie_link
);
518 atomic_inc(&object
->n_reads
);
519 set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
521 if (fscache_submit_op(object
, &op
->op
) < 0)
523 spin_unlock(&cookie
->lock
);
525 fscache_stat(&fscache_n_retrieval_ops
);
527 /* pin the netfs read context in case we need to do the actual netfs
528 * read because we've encountered a cache read failure */
529 fscache_get_context(object
->cookie
, op
->context
);
531 /* we wait for the operation to become active, and then process it
532 * *here*, in this thread, and not in the thread pool */
533 ret
= fscache_wait_for_retrieval_activation(
535 __fscache_stat(&fscache_n_retrieval_op_waits
),
536 __fscache_stat(&fscache_n_retrievals_object_dead
));
540 /* ask the cache to honour the operation */
541 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
542 fscache_stat(&fscache_n_cop_allocate_pages
);
543 ret
= object
->cache
->ops
->allocate_pages(
544 op
, pages
, nr_pages
, gfp
);
545 fscache_stat_d(&fscache_n_cop_allocate_pages
);
547 fscache_stat(&fscache_n_cop_read_or_alloc_pages
);
548 ret
= object
->cache
->ops
->read_or_alloc_pages(
549 op
, pages
, nr_pages
, gfp
);
550 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages
);
555 fscache_stat(&fscache_n_retrievals_nomem
);
556 else if (ret
== -ERESTARTSYS
)
557 fscache_stat(&fscache_n_retrievals_intr
);
558 else if (ret
== -ENODATA
)
559 fscache_stat(&fscache_n_retrievals_nodata
);
561 fscache_stat(&fscache_n_retrievals_nobufs
);
563 fscache_stat(&fscache_n_retrievals_ok
);
565 fscache_put_retrieval(op
);
566 _leave(" = %d", ret
);
570 spin_unlock(&cookie
->lock
);
573 fscache_stat(&fscache_n_retrievals_nobufs
);
574 _leave(" = -ENOBUFS");
577 EXPORT_SYMBOL(__fscache_read_or_alloc_pages
);
580 * allocate a block in the cache on which to store a page
582 * -ENOMEM - out of memory, nothing done
583 * -ERESTARTSYS - interrupted
584 * -ENOBUFS - no backing object available in which to cache the block
585 * 0 - block allocated
587 int __fscache_alloc_page(struct fscache_cookie
*cookie
,
591 struct fscache_retrieval
*op
;
592 struct fscache_object
*object
;
595 _enter("%p,%p,,,", cookie
, page
);
597 fscache_stat(&fscache_n_allocs
);
599 if (hlist_empty(&cookie
->backing_objects
))
602 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
603 ASSERTCMP(page
, !=, NULL
);
605 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
608 op
= fscache_alloc_retrieval(page
->mapping
, NULL
, NULL
);
611 fscache_set_op_name(&op
->op
, "RetrAL1");
613 spin_lock(&cookie
->lock
);
615 if (hlist_empty(&cookie
->backing_objects
))
617 object
= hlist_entry(cookie
->backing_objects
.first
,
618 struct fscache_object
, cookie_link
);
620 if (fscache_submit_op(object
, &op
->op
) < 0)
622 spin_unlock(&cookie
->lock
);
624 fscache_stat(&fscache_n_alloc_ops
);
626 ret
= fscache_wait_for_retrieval_activation(
628 __fscache_stat(&fscache_n_alloc_op_waits
),
629 __fscache_stat(&fscache_n_allocs_object_dead
));
633 /* ask the cache to honour the operation */
634 fscache_stat(&fscache_n_cop_allocate_page
);
635 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
636 fscache_stat_d(&fscache_n_cop_allocate_page
);
639 if (ret
== -ERESTARTSYS
)
640 fscache_stat(&fscache_n_allocs_intr
);
642 fscache_stat(&fscache_n_allocs_nobufs
);
644 fscache_stat(&fscache_n_allocs_ok
);
646 fscache_put_retrieval(op
);
647 _leave(" = %d", ret
);
651 spin_unlock(&cookie
->lock
);
654 fscache_stat(&fscache_n_allocs_nobufs
);
655 _leave(" = -ENOBUFS");
658 EXPORT_SYMBOL(__fscache_alloc_page
);
661 * release a write op reference
663 static void fscache_release_write_op(struct fscache_operation
*_op
)
665 _enter("{OP%x}", _op
->debug_id
);
669 * perform the background storage of a page into the cache
671 static void fscache_write_op(struct fscache_operation
*_op
)
673 struct fscache_storage
*op
=
674 container_of(_op
, struct fscache_storage
, op
);
675 struct fscache_object
*object
= op
->op
.object
;
676 struct fscache_cookie
*cookie
;
682 _enter("{OP%x,%d}", op
->op
.debug_id
, atomic_read(&op
->op
.usage
));
684 fscache_set_op_state(&op
->op
, "GetPage");
686 spin_lock(&object
->lock
);
687 cookie
= object
->cookie
;
689 if (!fscache_object_is_active(object
) || !cookie
) {
690 spin_unlock(&object
->lock
);
695 spin_lock(&cookie
->stores_lock
);
697 fscache_stat(&fscache_n_store_calls
);
699 /* find a page to store */
701 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0, 1,
702 FSCACHE_COOKIE_PENDING_TAG
);
706 _debug("gang %d [%lx]", n
, page
->index
);
707 if (page
->index
> op
->store_limit
) {
708 fscache_stat(&fscache_n_store_pages_over_limit
);
713 radix_tree_tag_set(&cookie
->stores
, page
->index
,
714 FSCACHE_COOKIE_STORING_TAG
);
715 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
716 FSCACHE_COOKIE_PENDING_TAG
);
719 spin_unlock(&cookie
->stores_lock
);
720 spin_unlock(&object
->lock
);
723 fscache_set_op_state(&op
->op
, "Store");
724 fscache_stat(&fscache_n_store_pages
);
725 fscache_stat(&fscache_n_cop_write_page
);
726 ret
= object
->cache
->ops
->write_page(op
, page
);
727 fscache_stat_d(&fscache_n_cop_write_page
);
728 fscache_set_op_state(&op
->op
, "EndWrite");
729 fscache_end_page_write(object
, page
);
731 fscache_set_op_state(&op
->op
, "Abort");
732 fscache_abort_object(object
);
734 fscache_enqueue_operation(&op
->op
);
742 /* this writer is going away and there aren't any more things to
745 spin_unlock(&cookie
->stores_lock
);
746 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
747 spin_unlock(&object
->lock
);
752 * request a page be stored in the cache
754 * -ENOMEM - out of memory, nothing done
755 * -ENOBUFS - no backing object available in which to cache the page
756 * 0 - dispatched a write - it'll call end_io_func() when finished
758 * if the cookie still has a backing object at this point, that object can be
759 * in one of a few states with respect to storage processing:
761 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
764 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
767 * (b) writes deferred till post-creation (mark page for writing and
768 * return immediately)
770 * (2) negative lookup, object created, initial fill being made from netfs
771 * (FSCACHE_COOKIE_INITIAL_FILL is set)
773 * (a) fill point not yet reached this page (mark page for writing and
776 * (b) fill point passed this page (queue op to store this page)
778 * (3) object extant (queue op to store this page)
780 * any other state is invalid
782 int __fscache_write_page(struct fscache_cookie
*cookie
,
786 struct fscache_storage
*op
;
787 struct fscache_object
*object
;
790 _enter("%p,%x,", cookie
, (u32
) page
->flags
);
792 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
793 ASSERT(PageFsCache(page
));
795 fscache_stat(&fscache_n_stores
);
797 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
801 fscache_operation_init(&op
->op
, fscache_release_write_op
);
802 fscache_operation_init_slow(&op
->op
, fscache_write_op
);
803 op
->op
.flags
= FSCACHE_OP_SLOW
| (1 << FSCACHE_OP_WAITING
);
804 fscache_set_op_name(&op
->op
, "Write1");
806 ret
= radix_tree_preload(gfp
& ~__GFP_HIGHMEM
);
811 spin_lock(&cookie
->lock
);
813 if (hlist_empty(&cookie
->backing_objects
))
815 object
= hlist_entry(cookie
->backing_objects
.first
,
816 struct fscache_object
, cookie_link
);
817 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
820 /* add the page to the pending-storage radix tree on the backing
822 spin_lock(&object
->lock
);
823 spin_lock(&cookie
->stores_lock
);
825 _debug("store limit %llx", (unsigned long long) object
->store_limit
);
827 ret
= radix_tree_insert(&cookie
->stores
, page
->index
, page
);
831 _debug("insert failed %d", ret
);
832 goto nobufs_unlock_obj
;
835 radix_tree_tag_set(&cookie
->stores
, page
->index
,
836 FSCACHE_COOKIE_PENDING_TAG
);
837 page_cache_get(page
);
839 /* we only want one writer at a time, but we do need to queue new
840 * writers after exclusive ops */
841 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
))
842 goto already_pending
;
844 spin_unlock(&cookie
->stores_lock
);
845 spin_unlock(&object
->lock
);
847 op
->op
.debug_id
= atomic_inc_return(&fscache_op_debug_id
);
848 op
->store_limit
= object
->store_limit
;
850 if (fscache_submit_op(object
, &op
->op
) < 0)
853 spin_unlock(&cookie
->lock
);
854 radix_tree_preload_end();
855 fscache_stat(&fscache_n_store_ops
);
856 fscache_stat(&fscache_n_stores_ok
);
858 /* the slow work queue now carries its own ref on the object */
859 fscache_put_operation(&op
->op
);
864 fscache_stat(&fscache_n_stores_again
);
866 spin_unlock(&cookie
->stores_lock
);
867 spin_unlock(&object
->lock
);
868 spin_unlock(&cookie
->lock
);
869 radix_tree_preload_end();
871 fscache_stat(&fscache_n_stores_ok
);
876 spin_lock(&cookie
->stores_lock
);
877 radix_tree_delete(&cookie
->stores
, page
->index
);
878 spin_unlock(&cookie
->stores_lock
);
879 page_cache_release(page
);
884 spin_unlock(&object
->lock
);
886 spin_unlock(&cookie
->lock
);
887 radix_tree_preload_end();
889 fscache_stat(&fscache_n_stores_nobufs
);
890 _leave(" = -ENOBUFS");
896 fscache_stat(&fscache_n_stores_oom
);
897 _leave(" = -ENOMEM");
900 EXPORT_SYMBOL(__fscache_write_page
);
903 * remove a page from the cache
905 void __fscache_uncache_page(struct fscache_cookie
*cookie
, struct page
*page
)
907 struct fscache_object
*object
;
911 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
912 ASSERTCMP(page
, !=, NULL
);
914 fscache_stat(&fscache_n_uncaches
);
916 /* cache withdrawal may beat us to it */
917 if (!PageFsCache(page
))
921 spin_lock(&cookie
->lock
);
923 if (hlist_empty(&cookie
->backing_objects
)) {
924 ClearPageFsCache(page
);
928 object
= hlist_entry(cookie
->backing_objects
.first
,
929 struct fscache_object
, cookie_link
);
931 /* there might now be stuff on disk we could read */
932 clear_bit(FSCACHE_COOKIE_NO_DATA_YET
, &cookie
->flags
);
934 /* only invoke the cache backend if we managed to mark the page
935 * uncached here; this deals with synchronisation vs withdrawal */
936 if (TestClearPageFsCache(page
) &&
937 object
->cache
->ops
->uncache_page
) {
938 /* the cache backend releases the cookie lock */
939 fscache_stat(&fscache_n_cop_uncache_page
);
940 object
->cache
->ops
->uncache_page(object
, page
);
941 fscache_stat_d(&fscache_n_cop_uncache_page
);
946 spin_unlock(&cookie
->lock
);
950 EXPORT_SYMBOL(__fscache_uncache_page
);
953 * fscache_mark_pages_cached - Mark pages as being cached
954 * @op: The retrieval op pages are being marked for
955 * @pagevec: The pages to be marked
957 * Mark a bunch of netfs pages as being cached. After this is called,
958 * the netfs must call fscache_uncache_page() to remove the mark.
960 void fscache_mark_pages_cached(struct fscache_retrieval
*op
,
961 struct pagevec
*pagevec
)
963 struct fscache_cookie
*cookie
= op
->op
.object
->cookie
;
966 #ifdef CONFIG_FSCACHE_STATS
967 atomic_add(pagevec
->nr
, &fscache_n_marks
);
970 for (loop
= 0; loop
< pagevec
->nr
; loop
++) {
971 struct page
*page
= pagevec
->pages
[loop
];
973 _debug("- mark %p{%lx}", page
, page
->index
);
974 if (TestSetPageFsCache(page
)) {
975 static bool once_only
;
978 printk(KERN_WARNING
"FS-Cache:"
979 " Cookie type %s marked page %lx"
981 cookie
->def
->name
, page
->index
);
986 if (cookie
->def
->mark_pages_cached
)
987 cookie
->def
->mark_pages_cached(cookie
->netfs_data
,
988 op
->mapping
, pagevec
);
989 pagevec_reinit(pagevec
);
991 EXPORT_SYMBOL(fscache_mark_pages_cached
);