2 * V9FS cache definitions.
4 * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to:
17 * Free Software Foundation
18 * 51 Franklin Street, Fifth Floor
19 * Boston, MA 02111-1301 USA
23 #include <linux/jiffies.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/stat.h>
27 #include <linux/sched.h>
29 #include <net/9p/9p.h>
34 #define CACHETAG_LEN 11
36 struct fscache_netfs v9fs_cache_netfs
= {
42 * v9fs_random_cachetag - Generate a random tag to be associated
43 * with a new cache session.
45 * The value of jiffies is used for a fairly randomly cache tag.
49 int v9fs_random_cachetag(struct v9fs_session_info
*v9ses
)
51 v9ses
->cachetag
= kmalloc(CACHETAG_LEN
, GFP_KERNEL
);
55 return scnprintf(v9ses
->cachetag
, CACHETAG_LEN
, "%lu", jiffies
);
58 static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data
,
59 void *buffer
, uint16_t bufmax
)
61 struct v9fs_session_info
*v9ses
;
64 v9ses
= (struct v9fs_session_info
*)cookie_netfs_data
;
65 p9_debug(P9_DEBUG_FSC
, "session %p buf %p size %u\n",
66 v9ses
, buffer
, bufmax
);
69 klen
= strlen(v9ses
->cachetag
);
74 memcpy(buffer
, v9ses
->cachetag
, klen
);
75 p9_debug(P9_DEBUG_FSC
, "cache session tag %s\n", v9ses
->cachetag
);
79 const struct fscache_cookie_def v9fs_cache_session_index_def
= {
81 .type
= FSCACHE_COOKIE_TYPE_INDEX
,
82 .get_key
= v9fs_cache_session_get_key
,
85 void v9fs_cache_session_get_cookie(struct v9fs_session_info
*v9ses
)
87 /* If no cache session tag was specified, we generate a random one. */
89 v9fs_random_cachetag(v9ses
);
91 v9ses
->fscache
= fscache_acquire_cookie(v9fs_cache_netfs
.primary_index
,
92 &v9fs_cache_session_index_def
,
94 p9_debug(P9_DEBUG_FSC
, "session %p get cookie %p\n",
95 v9ses
, v9ses
->fscache
);
98 void v9fs_cache_session_put_cookie(struct v9fs_session_info
*v9ses
)
100 p9_debug(P9_DEBUG_FSC
, "session %p put cookie %p\n",
101 v9ses
, v9ses
->fscache
);
102 fscache_relinquish_cookie(v9ses
->fscache
, 0);
103 v9ses
->fscache
= NULL
;
107 static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data
,
108 void *buffer
, uint16_t bufmax
)
110 const struct v9fs_inode
*v9inode
= cookie_netfs_data
;
111 memcpy(buffer
, &v9inode
->qid
.path
, sizeof(v9inode
->qid
.path
));
112 p9_debug(P9_DEBUG_FSC
, "inode %p get key %llu\n",
113 &v9inode
->vfs_inode
, v9inode
->qid
.path
);
114 return sizeof(v9inode
->qid
.path
);
117 static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data
,
120 const struct v9fs_inode
*v9inode
= cookie_netfs_data
;
121 *size
= i_size_read(&v9inode
->vfs_inode
);
123 p9_debug(P9_DEBUG_FSC
, "inode %p get attr %llu\n",
124 &v9inode
->vfs_inode
, *size
);
127 static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data
,
128 void *buffer
, uint16_t buflen
)
130 const struct v9fs_inode
*v9inode
= cookie_netfs_data
;
131 memcpy(buffer
, &v9inode
->qid
.version
, sizeof(v9inode
->qid
.version
));
132 p9_debug(P9_DEBUG_FSC
, "inode %p get aux %u\n",
133 &v9inode
->vfs_inode
, v9inode
->qid
.version
);
134 return sizeof(v9inode
->qid
.version
);
138 fscache_checkaux
v9fs_cache_inode_check_aux(void *cookie_netfs_data
,
142 const struct v9fs_inode
*v9inode
= cookie_netfs_data
;
144 if (buflen
!= sizeof(v9inode
->qid
.version
))
145 return FSCACHE_CHECKAUX_OBSOLETE
;
147 if (memcmp(buffer
, &v9inode
->qid
.version
,
148 sizeof(v9inode
->qid
.version
)))
149 return FSCACHE_CHECKAUX_OBSOLETE
;
151 return FSCACHE_CHECKAUX_OKAY
;
154 static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data
)
156 struct v9fs_inode
*v9inode
= cookie_netfs_data
;
161 pagevec_init(&pvec
, 0);
165 nr_pages
= pagevec_lookup(&pvec
, v9inode
->vfs_inode
.i_mapping
,
167 PAGEVEC_SIZE
- pagevec_count(&pvec
));
171 for (loop
= 0; loop
< nr_pages
; loop
++)
172 ClearPageFsCache(pvec
.pages
[loop
]);
174 first
= pvec
.pages
[nr_pages
- 1]->index
+ 1;
177 pagevec_release(&pvec
);
182 const struct fscache_cookie_def v9fs_cache_inode_index_def
= {
184 .type
= FSCACHE_COOKIE_TYPE_DATAFILE
,
185 .get_key
= v9fs_cache_inode_get_key
,
186 .get_attr
= v9fs_cache_inode_get_attr
,
187 .get_aux
= v9fs_cache_inode_get_aux
,
188 .check_aux
= v9fs_cache_inode_check_aux
,
189 .now_uncached
= v9fs_cache_inode_now_uncached
,
192 void v9fs_cache_inode_get_cookie(struct inode
*inode
)
194 struct v9fs_inode
*v9inode
;
195 struct v9fs_session_info
*v9ses
;
197 if (!S_ISREG(inode
->i_mode
))
200 v9inode
= V9FS_I(inode
);
201 if (v9inode
->fscache
)
204 v9ses
= v9fs_inode2v9ses(inode
);
205 v9inode
->fscache
= fscache_acquire_cookie(v9ses
->fscache
,
206 &v9fs_cache_inode_index_def
,
209 p9_debug(P9_DEBUG_FSC
, "inode %p get cookie %p\n",
210 inode
, v9inode
->fscache
);
213 void v9fs_cache_inode_put_cookie(struct inode
*inode
)
215 struct v9fs_inode
*v9inode
= V9FS_I(inode
);
217 if (!v9inode
->fscache
)
219 p9_debug(P9_DEBUG_FSC
, "inode %p put cookie %p\n",
220 inode
, v9inode
->fscache
);
222 fscache_relinquish_cookie(v9inode
->fscache
, 0);
223 v9inode
->fscache
= NULL
;
226 void v9fs_cache_inode_flush_cookie(struct inode
*inode
)
228 struct v9fs_inode
*v9inode
= V9FS_I(inode
);
230 if (!v9inode
->fscache
)
232 p9_debug(P9_DEBUG_FSC
, "inode %p flush cookie %p\n",
233 inode
, v9inode
->fscache
);
235 fscache_relinquish_cookie(v9inode
->fscache
, 1);
236 v9inode
->fscache
= NULL
;
239 void v9fs_cache_inode_set_cookie(struct inode
*inode
, struct file
*filp
)
241 struct v9fs_inode
*v9inode
= V9FS_I(inode
);
244 if (!v9inode
->fscache
)
247 spin_lock(&v9inode
->fscache_lock
);
248 fid
= filp
->private_data
;
249 if ((filp
->f_flags
& O_ACCMODE
) != O_RDONLY
)
250 v9fs_cache_inode_flush_cookie(inode
);
252 v9fs_cache_inode_get_cookie(inode
);
254 spin_unlock(&v9inode
->fscache_lock
);
257 void v9fs_cache_inode_reset_cookie(struct inode
*inode
)
259 struct v9fs_inode
*v9inode
= V9FS_I(inode
);
260 struct v9fs_session_info
*v9ses
;
261 struct fscache_cookie
*old
;
263 if (!v9inode
->fscache
)
266 old
= v9inode
->fscache
;
268 spin_lock(&v9inode
->fscache_lock
);
269 fscache_relinquish_cookie(v9inode
->fscache
, 1);
271 v9ses
= v9fs_inode2v9ses(inode
);
272 v9inode
->fscache
= fscache_acquire_cookie(v9ses
->fscache
,
273 &v9fs_cache_inode_index_def
,
275 p9_debug(P9_DEBUG_FSC
, "inode %p revalidating cookie old %p new %p\n",
276 inode
, old
, v9inode
->fscache
);
278 spin_unlock(&v9inode
->fscache_lock
);
281 int __v9fs_fscache_release_page(struct page
*page
, gfp_t gfp
)
283 struct inode
*inode
= page
->mapping
->host
;
284 struct v9fs_inode
*v9inode
= V9FS_I(inode
);
286 BUG_ON(!v9inode
->fscache
);
288 return fscache_maybe_release_page(v9inode
->fscache
, page
, gfp
);
291 void __v9fs_fscache_invalidate_page(struct page
*page
)
293 struct inode
*inode
= page
->mapping
->host
;
294 struct v9fs_inode
*v9inode
= V9FS_I(inode
);
296 BUG_ON(!v9inode
->fscache
);
298 if (PageFsCache(page
)) {
299 fscache_wait_on_page_write(v9inode
->fscache
, page
);
300 BUG_ON(!PageLocked(page
));
301 fscache_uncache_page(v9inode
->fscache
, page
);
305 static void v9fs_vfs_readpage_complete(struct page
*page
, void *data
,
309 SetPageUptodate(page
);
315 * __v9fs_readpage_from_fscache - read a page from cache
317 * Returns 0 if the pages are in cache and a BIO is submitted,
318 * 1 if the pages are not in cache and -error otherwise.
321 int __v9fs_readpage_from_fscache(struct inode
*inode
, struct page
*page
)
324 const struct v9fs_inode
*v9inode
= V9FS_I(inode
);
326 p9_debug(P9_DEBUG_FSC
, "inode %p page %p\n", inode
, page
);
327 if (!v9inode
->fscache
)
330 ret
= fscache_read_or_alloc_page(v9inode
->fscache
,
332 v9fs_vfs_readpage_complete
,
338 p9_debug(P9_DEBUG_FSC
, "page/inode not in cache %d\n", ret
);
341 p9_debug(P9_DEBUG_FSC
, "BIO submitted\n");
344 p9_debug(P9_DEBUG_FSC
, "ret %d\n", ret
);
350 * __v9fs_readpages_from_fscache - read multiple pages from cache
352 * Returns 0 if the pages are in cache and a BIO is submitted,
353 * 1 if the pages are not in cache and -error otherwise.
356 int __v9fs_readpages_from_fscache(struct inode
*inode
,
357 struct address_space
*mapping
,
358 struct list_head
*pages
,
362 const struct v9fs_inode
*v9inode
= V9FS_I(inode
);
364 p9_debug(P9_DEBUG_FSC
, "inode %p pages %u\n", inode
, *nr_pages
);
365 if (!v9inode
->fscache
)
368 ret
= fscache_read_or_alloc_pages(v9inode
->fscache
,
369 mapping
, pages
, nr_pages
,
370 v9fs_vfs_readpage_complete
,
372 mapping_gfp_mask(mapping
));
376 p9_debug(P9_DEBUG_FSC
, "pages/inodes not in cache %d\n", ret
);
379 BUG_ON(!list_empty(pages
));
380 BUG_ON(*nr_pages
!= 0);
381 p9_debug(P9_DEBUG_FSC
, "BIO submitted\n");
384 p9_debug(P9_DEBUG_FSC
, "ret %d\n", ret
);
390 * __v9fs_readpage_to_fscache - write a page to the cache
394 void __v9fs_readpage_to_fscache(struct inode
*inode
, struct page
*page
)
397 const struct v9fs_inode
*v9inode
= V9FS_I(inode
);
399 p9_debug(P9_DEBUG_FSC
, "inode %p page %p\n", inode
, page
);
400 ret
= fscache_write_page(v9inode
->fscache
, page
, GFP_KERNEL
);
401 p9_debug(P9_DEBUG_FSC
, "ret = %d\n", ret
);
403 v9fs_uncache_page(inode
, page
);
407 * wait for a page to complete writing to the cache
409 void __v9fs_fscache_wait_on_page_write(struct inode
*inode
, struct page
*page
)
411 const struct v9fs_inode
*v9inode
= V9FS_I(inode
);
412 p9_debug(P9_DEBUG_FSC
, "inode %p page %p\n", inode
, page
);
413 if (PageFsCache(page
))
414 fscache_wait_on_page_write(v9inode
->fscache
, page
);