cachefiles: fix slab-use-after-free in cachefiles_withdraw_cookie()
[linux-stable.git] / fs / cachefiles / cache.c
blob9fb06dc165202cbc86d690b51a9e7354fe8fc56e
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Manage high-level VFS aspects of a cache.
4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
8 #include <linux/slab.h>
9 #include <linux/statfs.h>
10 #include <linux/namei.h>
11 #include <trace/events/fscache.h>
12 #include "internal.h"
15 * Bring a cache online.
17 int cachefiles_add_cache(struct cachefiles_cache *cache)
19 struct fscache_cache *cache_cookie;
20 struct path path;
21 struct kstatfs stats;
22 struct dentry *graveyard, *cachedir, *root;
23 const struct cred *saved_cred;
24 int ret;
26 _enter("");
28 cache_cookie = fscache_acquire_cache(cache->tag);
29 if (IS_ERR(cache_cookie))
30 return PTR_ERR(cache_cookie);
32 /* we want to work under the module's security ID */
33 ret = cachefiles_get_security_ID(cache);
34 if (ret < 0)
35 goto error_getsec;
37 cachefiles_begin_secure(cache, &saved_cred);
39 /* look up the directory at the root of the cache */
40 ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
41 if (ret < 0)
42 goto error_open_root;
44 cache->mnt = path.mnt;
45 root = path.dentry;
47 ret = -EINVAL;
48 if (is_idmapped_mnt(path.mnt)) {
49 pr_warn("File cache on idmapped mounts not supported");
50 goto error_unsupported;
53 /* Check features of the backing filesystem:
54 * - Directories must support looking up and directory creation
55 * - We create tmpfiles to handle invalidation
56 * - We use xattrs to store metadata
57 * - We need to be able to query the amount of space available
58 * - We want to be able to sync the filesystem when stopping the cache
59 * - We use DIO to/from pages, so the blocksize mustn't be too big.
61 ret = -EOPNOTSUPP;
62 if (d_is_negative(root) ||
63 !d_backing_inode(root)->i_op->lookup ||
64 !d_backing_inode(root)->i_op->mkdir ||
65 !d_backing_inode(root)->i_op->tmpfile ||
66 !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
67 !root->d_sb->s_op->statfs ||
68 !root->d_sb->s_op->sync_fs ||
69 root->d_sb->s_blocksize > PAGE_SIZE)
70 goto error_unsupported;
72 ret = -EROFS;
73 if (sb_rdonly(root->d_sb))
74 goto error_unsupported;
76 /* determine the security of the on-disk cache as this governs
77 * security ID of files we create */
78 ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
79 if (ret < 0)
80 goto error_unsupported;
82 /* get the cache size and blocksize */
83 ret = vfs_statfs(&path, &stats);
84 if (ret < 0)
85 goto error_unsupported;
87 ret = -ERANGE;
88 if (stats.f_bsize <= 0)
89 goto error_unsupported;
91 ret = -EOPNOTSUPP;
92 if (stats.f_bsize > PAGE_SIZE)
93 goto error_unsupported;
95 cache->bsize = stats.f_bsize;
96 cache->bshift = ilog2(stats.f_bsize);
98 _debug("blksize %u (shift %u)",
99 cache->bsize, cache->bshift);
101 _debug("size %llu, avail %llu",
102 (unsigned long long) stats.f_blocks,
103 (unsigned long long) stats.f_bavail);
105 /* set up caching limits */
106 do_div(stats.f_files, 100);
107 cache->fstop = stats.f_files * cache->fstop_percent;
108 cache->fcull = stats.f_files * cache->fcull_percent;
109 cache->frun = stats.f_files * cache->frun_percent;
111 _debug("limits {%llu,%llu,%llu} files",
112 (unsigned long long) cache->frun,
113 (unsigned long long) cache->fcull,
114 (unsigned long long) cache->fstop);
116 do_div(stats.f_blocks, 100);
117 cache->bstop = stats.f_blocks * cache->bstop_percent;
118 cache->bcull = stats.f_blocks * cache->bcull_percent;
119 cache->brun = stats.f_blocks * cache->brun_percent;
121 _debug("limits {%llu,%llu,%llu} blocks",
122 (unsigned long long) cache->brun,
123 (unsigned long long) cache->bcull,
124 (unsigned long long) cache->bstop);
126 /* get the cache directory and check its type */
127 cachedir = cachefiles_get_directory(cache, root, "cache", NULL);
128 if (IS_ERR(cachedir)) {
129 ret = PTR_ERR(cachedir);
130 goto error_unsupported;
133 cache->store = cachedir;
135 /* get the graveyard directory */
136 graveyard = cachefiles_get_directory(cache, root, "graveyard", NULL);
137 if (IS_ERR(graveyard)) {
138 ret = PTR_ERR(graveyard);
139 goto error_unsupported;
142 cache->graveyard = graveyard;
143 cache->cache = cache_cookie;
145 ret = fscache_add_cache(cache_cookie, &cachefiles_cache_ops, cache);
146 if (ret < 0)
147 goto error_add_cache;
149 /* done */
150 set_bit(CACHEFILES_READY, &cache->flags);
151 dput(root);
153 pr_info("File cache on %s registered\n", cache_cookie->name);
155 /* check how much space the cache has */
156 cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
157 cachefiles_end_secure(cache, saved_cred);
158 _leave(" = 0 [%px]", cache->cache);
159 return 0;
161 error_add_cache:
162 cachefiles_put_directory(cache->graveyard);
163 cache->graveyard = NULL;
164 error_unsupported:
165 cachefiles_put_directory(cache->store);
166 cache->store = NULL;
167 mntput(cache->mnt);
168 cache->mnt = NULL;
169 dput(root);
170 error_open_root:
171 cachefiles_end_secure(cache, saved_cred);
172 put_cred(cache->cache_cred);
173 cache->cache_cred = NULL;
174 error_getsec:
175 fscache_relinquish_cache(cache_cookie);
176 cache->cache = NULL;
177 pr_err("Failed to register: %d\n", ret);
178 return ret;
182 * See if we have space for a number of pages and/or a number of files in the
183 * cache
185 int cachefiles_has_space(struct cachefiles_cache *cache,
186 unsigned fnr, unsigned bnr,
187 enum cachefiles_has_space_for reason)
189 struct kstatfs stats;
190 u64 b_avail, b_writing;
191 int ret;
193 struct path path = {
194 .mnt = cache->mnt,
195 .dentry = cache->mnt->mnt_root,
198 //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
199 // (unsigned long long) cache->frun,
200 // (unsigned long long) cache->fcull,
201 // (unsigned long long) cache->fstop,
202 // (unsigned long long) cache->brun,
203 // (unsigned long long) cache->bcull,
204 // (unsigned long long) cache->bstop,
205 // fnr, bnr);
207 /* find out how many pages of blockdev are available */
208 memset(&stats, 0, sizeof(stats));
210 ret = vfs_statfs(&path, &stats);
211 if (ret < 0) {
212 trace_cachefiles_vfs_error(NULL, d_inode(path.dentry), ret,
213 cachefiles_trace_statfs_error);
214 if (ret == -EIO)
215 cachefiles_io_error(cache, "statfs failed");
216 _leave(" = %d", ret);
217 return ret;
220 b_avail = stats.f_bavail;
221 b_writing = atomic_long_read(&cache->b_writing);
222 if (b_avail > b_writing)
223 b_avail -= b_writing;
224 else
225 b_avail = 0;
227 //_debug("avail %llu,%llu",
228 // (unsigned long long)stats.f_ffree,
229 // (unsigned long long)b_avail);
231 /* see if there is sufficient space */
232 if (stats.f_ffree > fnr)
233 stats.f_ffree -= fnr;
234 else
235 stats.f_ffree = 0;
237 if (b_avail > bnr)
238 b_avail -= bnr;
239 else
240 b_avail = 0;
242 ret = -ENOBUFS;
243 if (stats.f_ffree < cache->fstop ||
244 b_avail < cache->bstop)
245 goto stop_and_begin_cull;
247 ret = 0;
248 if (stats.f_ffree < cache->fcull ||
249 b_avail < cache->bcull)
250 goto begin_cull;
252 if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
253 stats.f_ffree >= cache->frun &&
254 b_avail >= cache->brun &&
255 test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
257 _debug("cease culling");
258 cachefiles_state_changed(cache);
261 //_leave(" = 0");
262 return 0;
264 stop_and_begin_cull:
265 switch (reason) {
266 case cachefiles_has_space_for_write:
267 fscache_count_no_write_space();
268 break;
269 case cachefiles_has_space_for_create:
270 fscache_count_no_create_space();
271 break;
272 default:
273 break;
275 begin_cull:
276 if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
277 _debug("### CULL CACHE ###");
278 cachefiles_state_changed(cache);
281 _leave(" = %d", ret);
282 return ret;
286 * Mark all the objects as being out of service and queue them all for cleanup.
288 static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
290 struct cachefiles_object *object;
291 unsigned int count = 0;
293 _enter("");
295 spin_lock(&cache->object_list_lock);
297 while (!list_empty(&cache->object_list)) {
298 object = list_first_entry(&cache->object_list,
299 struct cachefiles_object, cache_link);
300 cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
301 list_del_init(&object->cache_link);
302 fscache_withdraw_cookie(object->cookie);
303 count++;
304 if ((count & 63) == 0) {
305 spin_unlock(&cache->object_list_lock);
306 cond_resched();
307 spin_lock(&cache->object_list_lock);
311 spin_unlock(&cache->object_list_lock);
312 _leave(" [%u objs]", count);
316 * Withdraw fscache volumes.
318 static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
320 struct list_head *cur;
321 struct cachefiles_volume *volume;
322 struct fscache_volume *vcookie;
324 _enter("");
325 retry:
326 spin_lock(&cache->object_list_lock);
327 list_for_each(cur, &cache->volumes) {
328 volume = list_entry(cur, struct cachefiles_volume, cache_link);
330 if (atomic_read(&volume->vcookie->n_accesses) == 0)
331 continue;
333 vcookie = fscache_try_get_volume(volume->vcookie,
334 fscache_volume_get_withdraw);
335 if (vcookie) {
336 spin_unlock(&cache->object_list_lock);
337 fscache_withdraw_volume(vcookie);
338 fscache_put_volume(vcookie, fscache_volume_put_withdraw);
339 goto retry;
342 spin_unlock(&cache->object_list_lock);
344 _leave("");
348 * Withdraw cachefiles volumes.
350 static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
352 _enter("");
354 for (;;) {
355 struct fscache_volume *vcookie = NULL;
356 struct cachefiles_volume *volume = NULL;
358 spin_lock(&cache->object_list_lock);
359 if (!list_empty(&cache->volumes)) {
360 volume = list_first_entry(&cache->volumes,
361 struct cachefiles_volume, cache_link);
362 vcookie = fscache_try_get_volume(volume->vcookie,
363 fscache_volume_get_withdraw);
364 if (!vcookie) {
365 spin_unlock(&cache->object_list_lock);
366 cpu_relax();
367 continue;
369 list_del_init(&volume->cache_link);
371 spin_unlock(&cache->object_list_lock);
372 if (!volume)
373 break;
375 cachefiles_withdraw_volume(volume);
376 fscache_put_volume(vcookie, fscache_volume_put_withdraw);
379 _leave("");
383 * Sync a cache to backing disk.
385 static void cachefiles_sync_cache(struct cachefiles_cache *cache)
387 const struct cred *saved_cred;
388 int ret;
390 _enter("%s", cache->cache->name);
392 /* make sure all pages pinned by operations on behalf of the netfs are
393 * written to disc */
394 cachefiles_begin_secure(cache, &saved_cred);
395 down_read(&cache->mnt->mnt_sb->s_umount);
396 ret = sync_filesystem(cache->mnt->mnt_sb);
397 up_read(&cache->mnt->mnt_sb->s_umount);
398 cachefiles_end_secure(cache, saved_cred);
400 if (ret == -EIO)
401 cachefiles_io_error(cache,
402 "Attempt to sync backing fs superblock returned error %d",
403 ret);
407 * Withdraw cache objects.
409 void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
411 struct fscache_cache *fscache = cache->cache;
413 pr_info("File cache on %s unregistering\n", fscache->name);
415 fscache_withdraw_cache(fscache);
416 cachefiles_withdraw_fscache_volumes(cache);
418 /* we now have to destroy all the active objects pertaining to this
419 * cache - which we do by passing them off to thread pool to be
420 * disposed of */
421 cachefiles_withdraw_objects(cache);
422 fscache_wait_for_objects(fscache);
424 cachefiles_withdraw_volumes(cache);
425 cachefiles_sync_cache(cache);
426 cache->cache = NULL;
427 fscache_relinquish_cache(fscache);