1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Storage object read/write
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/mount.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/swap.h>
15 * detect wake up events generated by the unlocking of pages in which we're
17 * - we use this to detect read completion of backing pages
18 * - the caller holds the waitqueue lock
20 static int cachefiles_read_waiter(wait_queue_entry_t
*wait
, unsigned mode
,
23 struct cachefiles_one_read
*monitor
=
24 container_of(wait
, struct cachefiles_one_read
, monitor
);
25 struct cachefiles_object
*object
;
26 struct fscache_retrieval
*op
= monitor
->op
;
27 struct wait_bit_key
*key
= _key
;
28 struct page
*page
= wait
->private;
32 _enter("{%lu},%u,%d,{%p,%u}",
33 monitor
->netfs_page
->index
, mode
, sync
,
34 key
->flags
, key
->bit_nr
);
36 if (key
->flags
!= &page
->flags
||
37 key
->bit_nr
!= PG_locked
)
40 _debug("--- monitor %p %lx ---", page
, page
->flags
);
42 if (!PageUptodate(page
) && !PageError(page
)) {
43 /* unlocked, not uptodate and not erronous? */
44 _debug("page probably truncated");
47 /* remove from the waitqueue */
48 list_del(&wait
->entry
);
50 /* move onto the action list and queue for FS-Cache thread pool */
53 /* We need to temporarily bump the usage count as we don't own a ref
54 * here otherwise cachefiles_read_copier() may free the op between the
55 * monitor being enqueued on the op->to_do list and the op getting
56 * enqueued on the work queue.
58 fscache_get_retrieval(op
);
60 object
= container_of(op
->op
.object
, struct cachefiles_object
, fscache
);
61 spin_lock(&object
->work_lock
);
62 list_add_tail(&monitor
->op_link
, &op
->to_do
);
63 spin_unlock(&object
->work_lock
);
65 fscache_enqueue_retrieval(op
);
66 fscache_put_retrieval(op
);
71 * handle a probably truncated page
72 * - check to see if the page is still relevant and reissue the read if
74 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
75 * must wait again and 0 if successful
77 static int cachefiles_read_reissue(struct cachefiles_object
*object
,
78 struct cachefiles_one_read
*monitor
)
80 struct address_space
*bmapping
= d_backing_inode(object
->backer
)->i_mapping
;
81 struct page
*backpage
= monitor
->back_page
, *backpage2
;
84 _enter("{ino=%lx},{%lx,%lx}",
85 d_backing_inode(object
->backer
)->i_ino
,
86 backpage
->index
, backpage
->flags
);
88 /* skip if the page was truncated away completely */
89 if (backpage
->mapping
!= bmapping
) {
90 _leave(" = -ENODATA [mapping]");
94 backpage2
= find_get_page(bmapping
, backpage
->index
);
96 _leave(" = -ENODATA [gone]");
100 if (backpage
!= backpage2
) {
102 _leave(" = -ENODATA [different]");
106 /* the page is still there and we already have a ref on it, so we don't
110 INIT_LIST_HEAD(&monitor
->op_link
);
111 add_page_wait_queue(backpage
, &monitor
->monitor
);
113 if (trylock_page(backpage
)) {
115 if (PageError(backpage
))
118 if (PageUptodate(backpage
))
121 _debug("reissue read");
122 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
127 /* but the page may have been read before the monitor was installed, so
128 * the monitor may miss the event - so we have to ensure that we do get
129 * one in such a case */
130 if (trylock_page(backpage
)) {
131 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
132 unlock_page(backpage
);
135 /* it'll reappear on the todo list */
136 _leave(" = -EINPROGRESS");
140 unlock_page(backpage
);
141 spin_lock_irq(&object
->work_lock
);
142 list_del(&monitor
->op_link
);
143 spin_unlock_irq(&object
->work_lock
);
144 _leave(" = %d", ret
);
149 * copy data from backing pages to netfs pages to complete a read operation
150 * - driven by FS-Cache's thread pool
152 static void cachefiles_read_copier(struct fscache_operation
*_op
)
154 struct cachefiles_one_read
*monitor
;
155 struct cachefiles_object
*object
;
156 struct fscache_retrieval
*op
;
159 op
= container_of(_op
, struct fscache_retrieval
, op
);
160 object
= container_of(op
->op
.object
,
161 struct cachefiles_object
, fscache
);
163 _enter("{ino=%lu}", d_backing_inode(object
->backer
)->i_ino
);
166 spin_lock_irq(&object
->work_lock
);
168 while (!list_empty(&op
->to_do
)) {
169 monitor
= list_entry(op
->to_do
.next
,
170 struct cachefiles_one_read
, op_link
);
171 list_del(&monitor
->op_link
);
173 spin_unlock_irq(&object
->work_lock
);
175 _debug("- copy {%lu}", monitor
->back_page
->index
);
178 if (test_bit(FSCACHE_COOKIE_INVALIDATING
,
179 &object
->fscache
.cookie
->flags
)) {
181 } else if (PageUptodate(monitor
->back_page
)) {
182 copy_highpage(monitor
->netfs_page
, monitor
->back_page
);
183 fscache_mark_page_cached(monitor
->op
,
184 monitor
->netfs_page
);
186 } else if (!PageError(monitor
->back_page
)) {
187 /* the page has probably been truncated */
188 error
= cachefiles_read_reissue(object
, monitor
);
189 if (error
== -EINPROGRESS
)
193 cachefiles_io_error_obj(
195 "Readpage failed on backing file %lx",
196 (unsigned long) monitor
->back_page
->flags
);
200 put_page(monitor
->back_page
);
202 fscache_end_io(op
, monitor
->netfs_page
, error
);
203 put_page(monitor
->netfs_page
);
204 fscache_retrieval_complete(op
, 1);
205 fscache_put_retrieval(op
);
209 /* let the thread pool have some air occasionally */
211 if (max
< 0 || need_resched()) {
212 if (!list_empty(&op
->to_do
))
213 fscache_enqueue_retrieval(op
);
214 _leave(" [maxed out]");
218 spin_lock_irq(&object
->work_lock
);
221 spin_unlock_irq(&object
->work_lock
);
226 * read the corresponding page to the given set from the backing file
227 * - an uncertain page is simply discarded, to be tried again another time
229 static int cachefiles_read_backing_file_one(struct cachefiles_object
*object
,
230 struct fscache_retrieval
*op
,
231 struct page
*netpage
)
233 struct cachefiles_one_read
*monitor
;
234 struct address_space
*bmapping
;
235 struct page
*newpage
, *backpage
;
240 _debug("read back %p{%lu,%d}",
241 netpage
, netpage
->index
, page_count(netpage
));
243 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
247 monitor
->netfs_page
= netpage
;
248 monitor
->op
= fscache_get_retrieval(op
);
250 init_waitqueue_func_entry(&monitor
->monitor
, cachefiles_read_waiter
);
252 /* attempt to get hold of the backing page */
253 bmapping
= d_backing_inode(object
->backer
)->i_mapping
;
257 backpage
= find_get_page(bmapping
, netpage
->index
);
259 goto backing_page_already_present
;
262 newpage
= __page_cache_alloc(cachefiles_gfp
);
267 ret
= add_to_page_cache_lru(newpage
, bmapping
,
268 netpage
->index
, cachefiles_gfp
);
270 goto installed_new_backing_page
;
275 /* we've installed a new backing page, so now we need to start
277 installed_new_backing_page
:
278 _debug("- new %p", newpage
);
284 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
288 /* set the monitor to transfer the data across */
289 monitor_backing_page
:
290 _debug("- monitor add");
292 /* install the monitor */
293 get_page(monitor
->netfs_page
);
295 monitor
->back_page
= backpage
;
296 monitor
->monitor
.private = backpage
;
297 add_page_wait_queue(backpage
, &monitor
->monitor
);
300 /* but the page may have been read before the monitor was installed, so
301 * the monitor may miss the event - so we have to ensure that we do get
302 * one in such a case */
303 if (trylock_page(backpage
)) {
304 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
305 unlock_page(backpage
);
309 /* if the backing page is already present, it can be in one of
310 * three states: read in progress, read failed or read okay */
311 backing_page_already_present
:
319 if (PageError(backpage
))
322 if (PageUptodate(backpage
))
323 goto backing_page_already_uptodate
;
325 if (!trylock_page(backpage
))
326 goto monitor_backing_page
;
327 _debug("read %p {%lx}", backpage
, backpage
->flags
);
328 goto read_backing_page
;
330 /* the backing page is already up to date, attach the netfs
331 * page to the pagecache and LRU and copy the data across */
332 backing_page_already_uptodate
:
333 _debug("- uptodate");
335 fscache_mark_page_cached(op
, netpage
);
337 copy_highpage(netpage
, backpage
);
338 fscache_end_io(op
, netpage
, 0);
339 fscache_retrieval_complete(op
, 1);
349 fscache_put_retrieval(monitor
->op
);
352 _leave(" = %d", ret
);
356 _debug("read error %d", ret
);
357 if (ret
== -ENOMEM
) {
358 fscache_retrieval_complete(op
, 1);
362 cachefiles_io_error_obj(object
, "Page read error on backing file");
363 fscache_retrieval_complete(op
, 1);
370 fscache_put_retrieval(monitor
->op
);
373 fscache_retrieval_complete(op
, 1);
374 _leave(" = -ENOMEM");
379 * read a page from the cache or allocate a block in which to store it
380 * - cache withdrawal is prevented by the caller
381 * - returns -EINTR if interrupted
382 * - returns -ENOMEM if ran out of memory
383 * - returns -ENOBUFS if no buffers can be made available
384 * - returns -ENOBUFS if page is beyond EOF
385 * - if the page is backed by a block in the cache:
386 * - a read will be started which will call the callback on completion
387 * - 0 will be returned
388 * - else if the page is unbacked:
389 * - the metadata will be retained
390 * - -ENODATA will be returned
392 int cachefiles_read_or_alloc_page(struct fscache_retrieval
*op
,
396 struct cachefiles_object
*object
;
397 struct cachefiles_cache
*cache
;
399 sector_t block0
, block
;
403 object
= container_of(op
->op
.object
,
404 struct cachefiles_object
, fscache
);
405 cache
= container_of(object
->fscache
.cache
,
406 struct cachefiles_cache
, cache
);
408 _enter("{%p},{%lx},,,", object
, page
->index
);
413 inode
= d_backing_inode(object
->backer
);
414 ASSERT(S_ISREG(inode
->i_mode
));
415 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
416 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
418 /* calculate the shift required to use bmap */
419 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
421 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
422 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
423 op
->op
.processor
= cachefiles_read_copier
;
425 /* we assume the absence or presence of the first block is a good
426 * enough indication for the page as a whole
427 * - TODO: don't use bmap() for this as it is _not_ actually good
428 * enough for this as it doesn't indicate errors, but it's all we've
431 block0
= page
->index
;
434 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, block0
);
435 _debug("%llx -> %llx",
436 (unsigned long long) block0
,
437 (unsigned long long) block
);
440 /* submit the apparently valid page to the backing fs to be
442 ret
= cachefiles_read_backing_file_one(object
, op
, page
);
443 } else if (cachefiles_has_space(cache
, 0, 1) == 0) {
444 /* there's space in the cache we can use */
445 fscache_mark_page_cached(op
, page
);
446 fscache_retrieval_complete(op
, 1);
452 _leave(" = %d", ret
);
456 fscache_retrieval_complete(op
, 1);
457 _leave(" = -ENOBUFS");
462 * read the corresponding pages to the given set from the backing file
463 * - any uncertain pages are simply discarded, to be tried again another time
465 static int cachefiles_read_backing_file(struct cachefiles_object
*object
,
466 struct fscache_retrieval
*op
,
467 struct list_head
*list
)
469 struct cachefiles_one_read
*monitor
= NULL
;
470 struct address_space
*bmapping
= d_backing_inode(object
->backer
)->i_mapping
;
471 struct page
*newpage
= NULL
, *netpage
, *_n
, *backpage
= NULL
;
476 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
477 list_del(&netpage
->lru
);
479 _debug("read back %p{%lu,%d}",
480 netpage
, netpage
->index
, page_count(netpage
));
483 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
487 monitor
->op
= fscache_get_retrieval(op
);
488 init_waitqueue_func_entry(&monitor
->monitor
,
489 cachefiles_read_waiter
);
493 backpage
= find_get_page(bmapping
, netpage
->index
);
495 goto backing_page_already_present
;
498 newpage
= __page_cache_alloc(cachefiles_gfp
);
503 ret
= add_to_page_cache_lru(newpage
, bmapping
,
507 goto installed_new_backing_page
;
512 /* we've installed a new backing page, so now we need
513 * to start it reading */
514 installed_new_backing_page
:
515 _debug("- new %p", newpage
);
521 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
525 /* add the netfs page to the pagecache and LRU, and set the
526 * monitor to transfer the data across */
527 monitor_backing_page
:
528 _debug("- monitor add");
530 ret
= add_to_page_cache_lru(netpage
, op
->mapping
,
531 netpage
->index
, cachefiles_gfp
);
533 if (ret
== -EEXIST
) {
538 fscache_retrieval_complete(op
, 1);
544 /* install a monitor */
546 monitor
->netfs_page
= netpage
;
549 monitor
->back_page
= backpage
;
550 monitor
->monitor
.private = backpage
;
551 add_page_wait_queue(backpage
, &monitor
->monitor
);
554 /* but the page may have been read before the monitor was
555 * installed, so the monitor may miss the event - so we have to
556 * ensure that we do get one in such a case */
557 if (trylock_page(backpage
)) {
558 _debug("2unlock %p {%lx}", backpage
, backpage
->flags
);
559 unlock_page(backpage
);
569 /* if the backing page is already present, it can be in one of
570 * three states: read in progress, read failed or read okay */
571 backing_page_already_present
:
572 _debug("- present %p", backpage
);
574 if (PageError(backpage
))
577 if (PageUptodate(backpage
))
578 goto backing_page_already_uptodate
;
580 _debug("- not ready %p{%lx}", backpage
, backpage
->flags
);
582 if (!trylock_page(backpage
))
583 goto monitor_backing_page
;
585 if (PageError(backpage
)) {
586 _debug("error %lx", backpage
->flags
);
587 unlock_page(backpage
);
591 if (PageUptodate(backpage
))
592 goto backing_page_already_uptodate_unlock
;
594 /* we've locked a page that's neither up to date nor erroneous,
595 * so we need to attempt to read it again */
596 goto reread_backing_page
;
598 /* the backing page is already up to date, attach the netfs
599 * page to the pagecache and LRU and copy the data across */
600 backing_page_already_uptodate_unlock
:
601 _debug("uptodate %lx", backpage
->flags
);
602 unlock_page(backpage
);
603 backing_page_already_uptodate
:
604 _debug("- uptodate");
606 ret
= add_to_page_cache_lru(netpage
, op
->mapping
,
607 netpage
->index
, cachefiles_gfp
);
609 if (ret
== -EEXIST
) {
614 fscache_retrieval_complete(op
, 1);
620 copy_highpage(netpage
, backpage
);
625 fscache_mark_page_cached(op
, netpage
);
627 /* the netpage is unlocked and marked up to date here */
628 fscache_end_io(op
, netpage
, 0);
631 fscache_retrieval_complete(op
, 1);
648 fscache_put_retrieval(op
);
652 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
653 list_del(&netpage
->lru
);
655 fscache_retrieval_complete(op
, 1);
658 _leave(" = %d", ret
);
664 goto record_page_complete
;
667 _debug("read error %d", ret
);
669 goto record_page_complete
;
671 cachefiles_io_error_obj(object
, "Page read error on backing file");
673 record_page_complete
:
674 fscache_retrieval_complete(op
, 1);
679 * read a list of pages from the cache or allocate blocks in which to store
682 int cachefiles_read_or_alloc_pages(struct fscache_retrieval
*op
,
683 struct list_head
*pages
,
687 struct cachefiles_object
*object
;
688 struct cachefiles_cache
*cache
;
689 struct list_head backpages
;
690 struct pagevec pagevec
;
692 struct page
*page
, *_n
;
693 unsigned shift
, nrbackpages
;
694 int ret
, ret2
, space
;
696 object
= container_of(op
->op
.object
,
697 struct cachefiles_object
, fscache
);
698 cache
= container_of(object
->fscache
.cache
,
699 struct cachefiles_cache
, cache
);
701 _enter("{OBJ%x,%d},,%d,,",
702 object
->fscache
.debug_id
, atomic_read(&op
->op
.usage
),
709 if (cachefiles_has_space(cache
, 0, *nr_pages
) < 0)
712 inode
= d_backing_inode(object
->backer
);
713 ASSERT(S_ISREG(inode
->i_mode
));
714 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
715 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
717 /* calculate the shift required to use bmap */
718 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
720 pagevec_init(&pagevec
);
722 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
723 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
724 op
->op
.processor
= cachefiles_read_copier
;
726 INIT_LIST_HEAD(&backpages
);
729 ret
= space
? -ENODATA
: -ENOBUFS
;
730 list_for_each_entry_safe(page
, _n
, pages
, lru
) {
731 sector_t block0
, block
;
733 /* we assume the absence or presence of the first block is a
734 * good enough indication for the page as a whole
735 * - TODO: don't use bmap() for this as it is _not_ actually
736 * good enough for this as it doesn't indicate errors, but
737 * it's all we've got for the moment
739 block0
= page
->index
;
742 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
,
744 _debug("%llx -> %llx",
745 (unsigned long long) block0
,
746 (unsigned long long) block
);
749 /* we have data - add it to the list to give to the
751 list_move(&page
->lru
, &backpages
);
754 } else if (space
&& pagevec_add(&pagevec
, page
) == 0) {
755 fscache_mark_pages_cached(op
, &pagevec
);
756 fscache_retrieval_complete(op
, 1);
759 fscache_retrieval_complete(op
, 1);
763 if (pagevec_count(&pagevec
) > 0)
764 fscache_mark_pages_cached(op
, &pagevec
);
766 if (list_empty(pages
))
769 /* submit the apparently valid pages to the backing fs to be read from
771 if (nrbackpages
> 0) {
772 ret2
= cachefiles_read_backing_file(object
, op
, &backpages
);
773 if (ret2
== -ENOMEM
|| ret2
== -EINTR
)
777 _leave(" = %d [nr=%u%s]",
778 ret
, *nr_pages
, list_empty(pages
) ? " empty" : "");
782 fscache_retrieval_complete(op
, *nr_pages
);
787 * allocate a block in the cache in which to store a page
788 * - cache withdrawal is prevented by the caller
789 * - returns -EINTR if interrupted
790 * - returns -ENOMEM if ran out of memory
791 * - returns -ENOBUFS if no buffers can be made available
792 * - returns -ENOBUFS if page is beyond EOF
794 * - the metadata will be retained
795 * - 0 will be returned
797 int cachefiles_allocate_page(struct fscache_retrieval
*op
,
801 struct cachefiles_object
*object
;
802 struct cachefiles_cache
*cache
;
805 object
= container_of(op
->op
.object
,
806 struct cachefiles_object
, fscache
);
807 cache
= container_of(object
->fscache
.cache
,
808 struct cachefiles_cache
, cache
);
810 _enter("%p,{%lx},", object
, page
->index
);
812 ret
= cachefiles_has_space(cache
, 0, 1);
814 fscache_mark_page_cached(op
, page
);
818 fscache_retrieval_complete(op
, 1);
819 _leave(" = %d", ret
);
824 * allocate blocks in the cache in which to store a set of pages
825 * - cache withdrawal is prevented by the caller
826 * - returns -EINTR if interrupted
827 * - returns -ENOMEM if ran out of memory
828 * - returns -ENOBUFS if some buffers couldn't be made available
829 * - returns -ENOBUFS if some pages are beyond EOF
831 * - -ENODATA will be returned
832 * - metadata will be retained for any page marked
834 int cachefiles_allocate_pages(struct fscache_retrieval
*op
,
835 struct list_head
*pages
,
839 struct cachefiles_object
*object
;
840 struct cachefiles_cache
*cache
;
841 struct pagevec pagevec
;
845 object
= container_of(op
->op
.object
,
846 struct cachefiles_object
, fscache
);
847 cache
= container_of(object
->fscache
.cache
,
848 struct cachefiles_cache
, cache
);
850 _enter("%p,,,%d,", object
, *nr_pages
);
852 ret
= cachefiles_has_space(cache
, 0, *nr_pages
);
854 pagevec_init(&pagevec
);
856 list_for_each_entry(page
, pages
, lru
) {
857 if (pagevec_add(&pagevec
, page
) == 0)
858 fscache_mark_pages_cached(op
, &pagevec
);
861 if (pagevec_count(&pagevec
) > 0)
862 fscache_mark_pages_cached(op
, &pagevec
);
868 fscache_retrieval_complete(op
, *nr_pages
);
869 _leave(" = %d", ret
);
874 * request a page be stored in the cache
875 * - cache withdrawal is prevented by the caller
876 * - this request may be ignored if there's no cache block available, in which
877 * case -ENOBUFS will be returned
878 * - if the op is in progress, 0 will be returned
880 int cachefiles_write_page(struct fscache_storage
*op
, struct page
*page
)
882 struct cachefiles_object
*object
;
883 struct cachefiles_cache
*cache
;
892 ASSERT(page
!= NULL
);
894 object
= container_of(op
->op
.object
,
895 struct cachefiles_object
, fscache
);
897 _enter("%p,%p{%lx},,,", object
, page
, page
->index
);
899 if (!object
->backer
) {
900 _leave(" = -ENOBUFS");
904 ASSERT(d_is_reg(object
->backer
));
906 cache
= container_of(object
->fscache
.cache
,
907 struct cachefiles_cache
, cache
);
909 pos
= (loff_t
)page
->index
<< PAGE_SHIFT
;
911 /* We mustn't write more data than we have, so we have to beware of a
912 * partial page at EOF.
914 eof
= object
->fscache
.store_limit_l
;
918 /* write the page to the backing filesystem and let it store it in its
920 path
.mnt
= cache
->mnt
;
921 path
.dentry
= object
->backer
;
922 file
= dentry_open(&path
, O_RDWR
| O_LARGEFILE
, cache
->cache_cred
);
929 if (eof
& ~PAGE_MASK
) {
930 if (eof
- pos
< PAGE_SIZE
) {
931 _debug("cut short %llx to %llx",
934 ASSERTCMP(pos
+ len
, ==, eof
);
939 ret
= __kernel_write(file
, data
, len
, &pos
);
952 cachefiles_io_error_obj(object
,
953 "Write page to backing file failed");
955 _leave(" = -ENOBUFS [%d]", ret
);
960 * detach a backing block from a page
961 * - cache withdrawal is prevented by the caller
963 void cachefiles_uncache_page(struct fscache_object
*_object
, struct page
*page
)
964 __releases(&object
->fscache
.cookie
->lock
)
966 struct cachefiles_object
*object
;
968 object
= container_of(_object
, struct cachefiles_object
, fscache
);
970 _enter("%p,{%lu}", object
, page
->index
);
972 spin_unlock(&object
->fscache
.cookie
->lock
);