1 mbcache: use consistent type for entry count
3 From: Eric Biggers <ebiggers@google.com>
5 mbcache used several different types to represent the number of entries
6 in the cache. For consistency within mbcache and with the shrinker API,
7 always use unsigned long.
9 This does not change behavior for current mbcache users (ext2 and ext4)
10 since they limit the entry count to a value which easily fits in an int.
12 Signed-off-by: Eric Biggers <ebiggers@google.com>
13 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
14 Reviewed-by: Jan Kara <jack@suse.cz>
16 fs/mbcache.c | 15 +++++++--------
17 1 file changed, 7 insertions(+), 8 deletions(-)
19 diff --git a/fs/mbcache.c b/fs/mbcache.c
20 index 07c5d7d..bf65906 100644
23 @@ -29,7 +29,7 @@ struct mb_cache {
24 /* log2 of hash table size */
26 /* Maximum entries in cache to avoid degrading hash too much */
28 + unsigned long c_max_entries;
29 /* Protects c_list, c_entry_count */
30 spinlock_t c_list_lock;
31 struct list_head c_list;
32 @@ -43,7 +43,7 @@ struct mb_cache {
33 static struct kmem_cache *mb_entry_cache;
35 static unsigned long mb_cache_shrink(struct mb_cache *cache,
36 - unsigned int nr_to_scan);
37 + unsigned long nr_to_scan);
39 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
41 @@ -274,11 +274,11 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
43 /* Shrink number of entries in cache */
44 static unsigned long mb_cache_shrink(struct mb_cache *cache,
45 - unsigned int nr_to_scan)
46 + unsigned long nr_to_scan)
48 struct mb_cache_entry *entry;
49 struct hlist_bl_head *head;
50 - unsigned int shrunk = 0;
51 + unsigned long shrunk = 0;
53 spin_lock(&cache->c_list_lock);
54 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
55 @@ -316,10 +316,9 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
56 static unsigned long mb_cache_scan(struct shrinker *shrink,
57 struct shrink_control *sc)
59 - int nr_to_scan = sc->nr_to_scan;
60 struct mb_cache *cache = container_of(shrink, struct mb_cache,
62 - return mb_cache_shrink(cache, nr_to_scan);
63 + return mb_cache_shrink(cache, sc->nr_to_scan);
66 /* We shrink 1/X of the cache when we have too many entries in it */
67 @@ -341,8 +340,8 @@ static void mb_cache_shrink_worker(struct work_struct *work)
68 struct mb_cache *mb_cache_create(int bucket_bits)
70 struct mb_cache *cache;
71 - int bucket_count = 1 << bucket_bits;
73 + unsigned long bucket_count = 1UL << bucket_bits;
76 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
79 2.8.0.rc3.226.g39d4020
82 To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
83 the body of a message to majordomo@vger.kernel.org
84 More majordomo info at http://vger.kernel.org/majordomo-info.html