2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include "writeback.h"
14 #include <linux/blkdev.h>
15 #include <linux/sort.h>
17 static const char * const cache_replacement_policies
[] = {
24 static const char * const error_actions
[] = {
30 write_attribute(attach
);
31 write_attribute(detach
);
32 write_attribute(unregister
);
33 write_attribute(stop
);
34 write_attribute(clear_stats
);
35 write_attribute(trigger_gc
);
36 write_attribute(prune_cache
);
37 write_attribute(flash_vol_create
);
39 read_attribute(bucket_size
);
40 read_attribute(block_size
);
41 read_attribute(nbuckets
);
42 read_attribute(tree_depth
);
43 read_attribute(root_usage_percent
);
44 read_attribute(priority_stats
);
45 read_attribute(btree_cache_size
);
46 read_attribute(btree_cache_max_chain
);
47 read_attribute(cache_available_percent
);
48 read_attribute(written
);
49 read_attribute(btree_written
);
50 read_attribute(metadata_written
);
51 read_attribute(active_journal_entries
);
53 sysfs_time_stats_attribute(btree_gc
, sec
, ms
);
54 sysfs_time_stats_attribute(btree_split
, sec
, us
);
55 sysfs_time_stats_attribute(btree_sort
, ms
, us
);
56 sysfs_time_stats_attribute(btree_read
, ms
, us
);
57 sysfs_time_stats_attribute(try_harder
, ms
, us
);
59 read_attribute(btree_nodes
);
60 read_attribute(btree_used_percent
);
61 read_attribute(average_key_size
);
62 read_attribute(dirty_data
);
63 read_attribute(bset_tree_stats
);
65 read_attribute(state
);
66 read_attribute(cache_read_races
);
67 read_attribute(writeback_keys_done
);
68 read_attribute(writeback_keys_failed
);
69 read_attribute(io_errors
);
70 read_attribute(congested
);
71 rw_attribute(congested_read_threshold_us
);
72 rw_attribute(congested_write_threshold_us
);
74 rw_attribute(sequential_cutoff
);
75 rw_attribute(data_csum
);
76 rw_attribute(cache_mode
);
77 rw_attribute(writeback_metadata
);
78 rw_attribute(writeback_running
);
79 rw_attribute(writeback_percent
);
80 rw_attribute(writeback_delay
);
81 rw_attribute(writeback_rate
);
83 rw_attribute(writeback_rate_update_seconds
);
84 rw_attribute(writeback_rate_d_term
);
85 rw_attribute(writeback_rate_p_term_inverse
);
86 rw_attribute(writeback_rate_d_smooth
);
87 read_attribute(writeback_rate_debug
);
89 read_attribute(stripe_size
);
90 read_attribute(partial_stripes_expensive
);
92 rw_attribute(synchronous
);
93 rw_attribute(journal_delay_ms
);
94 rw_attribute(discard
);
95 rw_attribute(running
);
97 rw_attribute(readahead
);
99 rw_attribute(io_error_limit
);
100 rw_attribute(io_error_halflife
);
101 rw_attribute(verify
);
102 rw_attribute(bypass_torture_test
);
103 rw_attribute(key_merging_disabled
);
104 rw_attribute(gc_always_rewrite
);
105 rw_attribute(expensive_debug_checks
);
106 rw_attribute(freelist_percent
);
107 rw_attribute(cache_replacement_policy
);
108 rw_attribute(btree_shrinker_disabled
);
109 rw_attribute(copy_gc_enabled
);
112 SHOW(__bch_cached_dev
)
114 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
116 const char *states
[] = { "no cache", "clean", "dirty", "inconsistent" };
118 #define var(stat) (dc->stat)
120 if (attr
== &sysfs_cache_mode
)
121 return bch_snprint_string_list(buf
, PAGE_SIZE
,
123 BDEV_CACHE_MODE(&dc
->sb
));
125 sysfs_printf(data_csum
, "%i", dc
->disk
.data_csum
);
126 var_printf(verify
, "%i");
127 var_printf(bypass_torture_test
, "%i");
128 var_printf(writeback_metadata
, "%i");
129 var_printf(writeback_running
, "%i");
130 var_print(writeback_delay
);
131 var_print(writeback_percent
);
132 sysfs_print(writeback_rate
, dc
->writeback_rate
.rate
);
134 var_print(writeback_rate_update_seconds
);
135 var_print(writeback_rate_d_term
);
136 var_print(writeback_rate_p_term_inverse
);
137 var_print(writeback_rate_d_smooth
);
139 if (attr
== &sysfs_writeback_rate_debug
) {
144 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
145 bch_hprint(derivative
, dc
->writeback_rate_derivative
<< 9);
146 bch_hprint(target
, dc
->writeback_rate_target
<< 9);
154 dc
->writeback_rate
.rate
,
155 dc
->writeback_rate_change
,
156 dirty
, derivative
, target
);
159 sysfs_hprint(dirty_data
,
160 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
162 sysfs_hprint(stripe_size
, dc
->disk
.stripe_size
<< 9);
163 var_printf(partial_stripes_expensive
, "%u");
165 var_hprint(sequential_cutoff
);
166 var_hprint(readahead
);
168 sysfs_print(running
, atomic_read(&dc
->running
));
169 sysfs_print(state
, states
[BDEV_STATE(&dc
->sb
)]);
171 if (attr
== &sysfs_label
) {
172 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
173 buf
[SB_LABEL_SIZE
+ 1] = '\0';
181 SHOW_LOCKED(bch_cached_dev
)
185 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
189 struct kobj_uevent_env
*env
;
191 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
192 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
194 sysfs_strtoul(data_csum
, dc
->disk
.data_csum
);
196 d_strtoul(bypass_torture_test
);
197 d_strtoul(writeback_metadata
);
198 d_strtoul(writeback_running
);
199 d_strtoul(writeback_delay
);
200 sysfs_strtoul_clamp(writeback_rate
,
201 dc
->writeback_rate
.rate
, 1, 1000000);
202 sysfs_strtoul_clamp(writeback_percent
, dc
->writeback_percent
, 0, 40);
204 d_strtoul(writeback_rate_update_seconds
);
205 d_strtoul(writeback_rate_d_term
);
206 d_strtoul(writeback_rate_p_term_inverse
);
207 sysfs_strtoul_clamp(writeback_rate_p_term_inverse
,
208 dc
->writeback_rate_p_term_inverse
, 1, INT_MAX
);
209 d_strtoul(writeback_rate_d_smooth
);
211 d_strtoi_h(sequential_cutoff
);
212 d_strtoi_h(readahead
);
214 if (attr
== &sysfs_clear_stats
)
215 bch_cache_accounting_clear(&dc
->accounting
);
217 if (attr
== &sysfs_running
&&
218 strtoul_or_return(buf
))
219 bch_cached_dev_run(dc
);
221 if (attr
== &sysfs_cache_mode
) {
222 ssize_t v
= bch_read_string_list(buf
, bch_cache_modes
+ 1);
227 if ((unsigned) v
!= BDEV_CACHE_MODE(&dc
->sb
)) {
228 SET_BDEV_CACHE_MODE(&dc
->sb
, v
);
229 bch_write_bdev_super(dc
, NULL
);
233 if (attr
== &sysfs_label
) {
234 if (size
> SB_LABEL_SIZE
)
236 memcpy(dc
->sb
.label
, buf
, size
);
237 if (size
< SB_LABEL_SIZE
)
238 dc
->sb
.label
[size
] = '\0';
239 if (size
&& dc
->sb
.label
[size
- 1] == '\n')
240 dc
->sb
.label
[size
- 1] = '\0';
241 bch_write_bdev_super(dc
, NULL
);
243 memcpy(dc
->disk
.c
->uuids
[dc
->disk
.id
].label
,
245 bch_uuid_write(dc
->disk
.c
);
247 env
= kzalloc(sizeof(struct kobj_uevent_env
), GFP_KERNEL
);
250 add_uevent_var(env
, "DRIVER=bcache");
251 add_uevent_var(env
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
252 add_uevent_var(env
, "CACHED_LABEL=%s", buf
);
254 &disk_to_dev(dc
->disk
.disk
)->kobj
, KOBJ_CHANGE
, env
->envp
);
258 if (attr
== &sysfs_attach
) {
259 if (bch_parse_uuid(buf
, dc
->sb
.set_uuid
) < 16)
262 list_for_each_entry(c
, &bch_cache_sets
, list
) {
263 v
= bch_cached_dev_attach(dc
, c
);
268 pr_err("Can't attach %s: cache set not found", buf
);
272 if (attr
== &sysfs_detach
&& dc
->disk
.c
)
273 bch_cached_dev_detach(dc
);
275 if (attr
== &sysfs_stop
)
276 bcache_device_stop(&dc
->disk
);
281 STORE(bch_cached_dev
)
283 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
286 mutex_lock(&bch_register_lock
);
287 size
= __cached_dev_store(kobj
, attr
, buf
, size
);
289 if (attr
== &sysfs_writeback_running
)
290 bch_writeback_queue(dc
);
292 if (attr
== &sysfs_writeback_percent
)
293 schedule_delayed_work(&dc
->writeback_rate_update
,
294 dc
->writeback_rate_update_seconds
* HZ
);
296 mutex_unlock(&bch_register_lock
);
300 static struct attribute
*bch_cached_dev_files
[] = {
308 &sysfs_writeback_metadata
,
309 &sysfs_writeback_running
,
310 &sysfs_writeback_delay
,
311 &sysfs_writeback_percent
,
312 &sysfs_writeback_rate
,
313 &sysfs_writeback_rate_update_seconds
,
314 &sysfs_writeback_rate_d_term
,
315 &sysfs_writeback_rate_p_term_inverse
,
316 &sysfs_writeback_rate_d_smooth
,
317 &sysfs_writeback_rate_debug
,
320 &sysfs_partial_stripes_expensive
,
321 &sysfs_sequential_cutoff
,
327 #ifdef CONFIG_BCACHE_DEBUG
329 &sysfs_bypass_torture_test
,
333 KTYPE(bch_cached_dev
);
337 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
339 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
341 sysfs_printf(data_csum
, "%i", d
->data_csum
);
342 sysfs_hprint(size
, u
->sectors
<< 9);
344 if (attr
== &sysfs_label
) {
345 memcpy(buf
, u
->label
, SB_LABEL_SIZE
);
346 buf
[SB_LABEL_SIZE
+ 1] = '\0';
354 STORE(__bch_flash_dev
)
356 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
358 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
360 sysfs_strtoul(data_csum
, d
->data_csum
);
362 if (attr
== &sysfs_size
) {
364 strtoi_h_or_return(buf
, v
);
367 bch_uuid_write(d
->c
);
368 set_capacity(d
->disk
, u
->sectors
);
371 if (attr
== &sysfs_label
) {
372 memcpy(u
->label
, buf
, SB_LABEL_SIZE
);
373 bch_uuid_write(d
->c
);
376 if (attr
== &sysfs_unregister
) {
377 set_bit(BCACHE_DEV_DETACHING
, &d
->flags
);
378 bcache_device_stop(d
);
383 STORE_LOCKED(bch_flash_dev
)
385 static struct attribute
*bch_flash_dev_files
[] = {
394 KTYPE(bch_flash_dev
);
396 SHOW(__bch_cache_set
)
398 unsigned root_usage(struct cache_set
*c
)
403 struct btree_iter iter
;
411 rw_lock(false, b
, b
->level
);
412 } while (b
!= c
->root
);
414 for_each_key_filter(b
, k
, &iter
, bch_ptr_bad
)
415 bytes
+= bkey_bytes(k
);
419 return (bytes
* 100) / btree_bytes(c
);
422 size_t cache_size(struct cache_set
*c
)
427 mutex_lock(&c
->bucket_lock
);
428 list_for_each_entry(b
, &c
->btree_cache
, list
)
429 ret
+= 1 << (b
->page_order
+ PAGE_SHIFT
);
431 mutex_unlock(&c
->bucket_lock
);
435 unsigned cache_max_chain(struct cache_set
*c
)
438 struct hlist_head
*h
;
440 mutex_lock(&c
->bucket_lock
);
442 for (h
= c
->bucket_hash
;
443 h
< c
->bucket_hash
+ (1 << BUCKET_HASH_BITS
);
446 struct hlist_node
*p
;
454 mutex_unlock(&c
->bucket_lock
);
458 unsigned btree_used(struct cache_set
*c
)
460 return div64_u64(c
->gc_stats
.key_bytes
* 100,
461 (c
->gc_stats
.nodes
?: 1) * btree_bytes(c
));
464 unsigned average_key_size(struct cache_set
*c
)
466 return c
->gc_stats
.nkeys
467 ? div64_u64(c
->gc_stats
.data
, c
->gc_stats
.nkeys
)
471 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
473 sysfs_print(synchronous
, CACHE_SYNC(&c
->sb
));
474 sysfs_print(journal_delay_ms
, c
->journal_delay_ms
);
475 sysfs_hprint(bucket_size
, bucket_bytes(c
));
476 sysfs_hprint(block_size
, block_bytes(c
));
477 sysfs_print(tree_depth
, c
->root
->level
);
478 sysfs_print(root_usage_percent
, root_usage(c
));
480 sysfs_hprint(btree_cache_size
, cache_size(c
));
481 sysfs_print(btree_cache_max_chain
, cache_max_chain(c
));
482 sysfs_print(cache_available_percent
, 100 - c
->gc_stats
.in_use
);
484 sysfs_print_time_stats(&c
->btree_gc_time
, btree_gc
, sec
, ms
);
485 sysfs_print_time_stats(&c
->btree_split_time
, btree_split
, sec
, us
);
486 sysfs_print_time_stats(&c
->sort_time
, btree_sort
, ms
, us
);
487 sysfs_print_time_stats(&c
->btree_read_time
, btree_read
, ms
, us
);
488 sysfs_print_time_stats(&c
->try_harder_time
, try_harder
, ms
, us
);
490 sysfs_print(btree_used_percent
, btree_used(c
));
491 sysfs_print(btree_nodes
, c
->gc_stats
.nodes
);
492 sysfs_hprint(average_key_size
, average_key_size(c
));
494 sysfs_print(cache_read_races
,
495 atomic_long_read(&c
->cache_read_races
));
497 sysfs_print(writeback_keys_done
,
498 atomic_long_read(&c
->writeback_keys_done
));
499 sysfs_print(writeback_keys_failed
,
500 atomic_long_read(&c
->writeback_keys_failed
));
502 if (attr
== &sysfs_errors
)
503 return bch_snprint_string_list(buf
, PAGE_SIZE
, error_actions
,
506 /* See count_io_errors for why 88 */
507 sysfs_print(io_error_halflife
, c
->error_decay
* 88);
508 sysfs_print(io_error_limit
, c
->error_limit
>> IO_ERROR_SHIFT
);
510 sysfs_hprint(congested
,
511 ((uint64_t) bch_get_congested(c
)) << 9);
512 sysfs_print(congested_read_threshold_us
,
513 c
->congested_read_threshold_us
);
514 sysfs_print(congested_write_threshold_us
,
515 c
->congested_write_threshold_us
);
517 sysfs_print(active_journal_entries
, fifo_used(&c
->journal
.pin
));
518 sysfs_printf(verify
, "%i", c
->verify
);
519 sysfs_printf(key_merging_disabled
, "%i", c
->key_merging_disabled
);
520 sysfs_printf(expensive_debug_checks
,
521 "%i", c
->expensive_debug_checks
);
522 sysfs_printf(gc_always_rewrite
, "%i", c
->gc_always_rewrite
);
523 sysfs_printf(btree_shrinker_disabled
, "%i", c
->shrinker_disabled
);
524 sysfs_printf(copy_gc_enabled
, "%i", c
->copy_gc_enabled
);
526 if (attr
== &sysfs_bset_tree_stats
)
527 return bch_bset_print_stats(c
, buf
);
531 SHOW_LOCKED(bch_cache_set
)
533 STORE(__bch_cache_set
)
535 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
537 if (attr
== &sysfs_unregister
)
538 bch_cache_set_unregister(c
);
540 if (attr
== &sysfs_stop
)
541 bch_cache_set_stop(c
);
543 if (attr
== &sysfs_synchronous
) {
544 bool sync
= strtoul_or_return(buf
);
546 if (sync
!= CACHE_SYNC(&c
->sb
)) {
547 SET_CACHE_SYNC(&c
->sb
, sync
);
548 bcache_write_super(c
);
552 if (attr
== &sysfs_flash_vol_create
) {
555 strtoi_h_or_return(buf
, v
);
557 r
= bch_flash_dev_create(c
, v
);
562 if (attr
== &sysfs_clear_stats
) {
563 atomic_long_set(&c
->writeback_keys_done
, 0);
564 atomic_long_set(&c
->writeback_keys_failed
, 0);
566 memset(&c
->gc_stats
, 0, sizeof(struct gc_stat
));
567 bch_cache_accounting_clear(&c
->accounting
);
570 if (attr
== &sysfs_trigger_gc
)
573 if (attr
== &sysfs_prune_cache
) {
574 struct shrink_control sc
;
575 sc
.gfp_mask
= GFP_KERNEL
;
576 sc
.nr_to_scan
= strtoul_or_return(buf
);
577 c
->shrink
.scan_objects(&c
->shrink
, &sc
);
580 sysfs_strtoul(congested_read_threshold_us
,
581 c
->congested_read_threshold_us
);
582 sysfs_strtoul(congested_write_threshold_us
,
583 c
->congested_write_threshold_us
);
585 if (attr
== &sysfs_errors
) {
586 ssize_t v
= bch_read_string_list(buf
, error_actions
);
594 if (attr
== &sysfs_io_error_limit
)
595 c
->error_limit
= strtoul_or_return(buf
) << IO_ERROR_SHIFT
;
597 /* See count_io_errors() for why 88 */
598 if (attr
== &sysfs_io_error_halflife
)
599 c
->error_decay
= strtoul_or_return(buf
) / 88;
601 sysfs_strtoul(journal_delay_ms
, c
->journal_delay_ms
);
602 sysfs_strtoul(verify
, c
->verify
);
603 sysfs_strtoul(key_merging_disabled
, c
->key_merging_disabled
);
604 sysfs_strtoul(expensive_debug_checks
, c
->expensive_debug_checks
);
605 sysfs_strtoul(gc_always_rewrite
, c
->gc_always_rewrite
);
606 sysfs_strtoul(btree_shrinker_disabled
, c
->shrinker_disabled
);
607 sysfs_strtoul(copy_gc_enabled
, c
->copy_gc_enabled
);
611 STORE_LOCKED(bch_cache_set
)
613 SHOW(bch_cache_set_internal
)
615 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
616 return bch_cache_set_show(&c
->kobj
, attr
, buf
);
619 STORE(bch_cache_set_internal
)
621 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
622 return bch_cache_set_store(&c
->kobj
, attr
, buf
, size
);
625 static void bch_cache_set_internal_release(struct kobject
*k
)
629 static struct attribute
*bch_cache_set_files
[] = {
633 &sysfs_journal_delay_ms
,
634 &sysfs_flash_vol_create
,
639 &sysfs_root_usage_percent
,
640 &sysfs_btree_cache_size
,
641 &sysfs_cache_available_percent
,
643 &sysfs_average_key_size
,
646 &sysfs_io_error_limit
,
647 &sysfs_io_error_halflife
,
649 &sysfs_congested_read_threshold_us
,
650 &sysfs_congested_write_threshold_us
,
654 KTYPE(bch_cache_set
);
656 static struct attribute
*bch_cache_set_internal_files
[] = {
657 &sysfs_active_journal_entries
,
659 sysfs_time_stats_attribute_list(btree_gc
, sec
, ms
)
660 sysfs_time_stats_attribute_list(btree_split
, sec
, us
)
661 sysfs_time_stats_attribute_list(btree_sort
, ms
, us
)
662 sysfs_time_stats_attribute_list(btree_read
, ms
, us
)
663 sysfs_time_stats_attribute_list(try_harder
, ms
, us
)
666 &sysfs_btree_used_percent
,
667 &sysfs_btree_cache_max_chain
,
669 &sysfs_bset_tree_stats
,
670 &sysfs_cache_read_races
,
671 &sysfs_writeback_keys_done
,
672 &sysfs_writeback_keys_failed
,
676 #ifdef CONFIG_BCACHE_DEBUG
678 &sysfs_key_merging_disabled
,
679 &sysfs_expensive_debug_checks
,
681 &sysfs_gc_always_rewrite
,
682 &sysfs_btree_shrinker_disabled
,
683 &sysfs_copy_gc_enabled
,
686 KTYPE(bch_cache_set_internal
);
690 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
692 sysfs_hprint(bucket_size
, bucket_bytes(ca
));
693 sysfs_hprint(block_size
, block_bytes(ca
));
694 sysfs_print(nbuckets
, ca
->sb
.nbuckets
);
695 sysfs_print(discard
, ca
->discard
);
696 sysfs_hprint(written
, atomic_long_read(&ca
->sectors_written
) << 9);
697 sysfs_hprint(btree_written
,
698 atomic_long_read(&ca
->btree_sectors_written
) << 9);
699 sysfs_hprint(metadata_written
,
700 (atomic_long_read(&ca
->meta_sectors_written
) +
701 atomic_long_read(&ca
->btree_sectors_written
)) << 9);
703 sysfs_print(io_errors
,
704 atomic_read(&ca
->io_errors
) >> IO_ERROR_SHIFT
);
706 sysfs_print(freelist_percent
, ca
->free
.size
* 100 /
707 ((size_t) ca
->sb
.nbuckets
));
709 if (attr
== &sysfs_cache_replacement_policy
)
710 return bch_snprint_string_list(buf
, PAGE_SIZE
,
711 cache_replacement_policies
,
712 CACHE_REPLACEMENT(&ca
->sb
));
714 if (attr
== &sysfs_priority_stats
) {
715 int cmp(const void *l
, const void *r
)
716 { return *((uint16_t *) r
) - *((uint16_t *) l
); }
718 size_t n
= ca
->sb
.nbuckets
, i
, unused
, btree
;
720 /* Compute 31 quantiles */
721 uint16_t q
[31], *p
, *cached
;
724 cached
= p
= vmalloc(ca
->sb
.nbuckets
* sizeof(uint16_t));
728 mutex_lock(&ca
->set
->bucket_lock
);
729 for (i
= ca
->sb
.first_bucket
; i
< n
; i
++)
730 p
[i
] = ca
->buckets
[i
].prio
;
731 mutex_unlock(&ca
->set
->bucket_lock
);
733 sort(p
, n
, sizeof(uint16_t), cmp
, NULL
);
739 unused
= ca
->sb
.nbuckets
- n
;
741 while (cached
< p
+ n
&&
742 *cached
== BTREE_PRIO
)
748 for (i
= 0; i
< n
; i
++)
749 sum
+= INITIAL_PRIO
- cached
[i
];
754 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
755 q
[i
] = INITIAL_PRIO
- cached
[n
* (i
+ 1) /
756 (ARRAY_SIZE(q
) + 1)];
760 ret
= scnprintf(buf
, PAGE_SIZE
,
764 "Sectors per Q: %zu\n"
766 unused
* 100 / (size_t) ca
->sb
.nbuckets
,
767 btree
* 100 / (size_t) ca
->sb
.nbuckets
, sum
,
768 n
* ca
->sb
.bucket_size
/ (ARRAY_SIZE(q
) + 1));
770 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
771 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
,
775 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
, "]\n");
782 SHOW_LOCKED(bch_cache
)
786 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
788 if (attr
== &sysfs_discard
) {
789 bool v
= strtoul_or_return(buf
);
791 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
794 if (v
!= CACHE_DISCARD(&ca
->sb
)) {
795 SET_CACHE_DISCARD(&ca
->sb
, v
);
796 bcache_write_super(ca
->set
);
800 if (attr
== &sysfs_cache_replacement_policy
) {
801 ssize_t v
= bch_read_string_list(buf
, cache_replacement_policies
);
806 if ((unsigned) v
!= CACHE_REPLACEMENT(&ca
->sb
)) {
807 mutex_lock(&ca
->set
->bucket_lock
);
808 SET_CACHE_REPLACEMENT(&ca
->sb
, v
);
809 mutex_unlock(&ca
->set
->bucket_lock
);
811 bcache_write_super(ca
->set
);
815 if (attr
== &sysfs_freelist_percent
) {
816 DECLARE_FIFO(long, free
);
818 size_t p
= strtoul_or_return(buf
);
821 ((size_t) ca
->sb
.nbuckets
* p
) / 100,
822 roundup_pow_of_two(ca
->sb
.nbuckets
) >> 9,
823 ca
->sb
.nbuckets
/ 2);
825 if (!init_fifo_exact(&free
, p
, GFP_KERNEL
))
828 mutex_lock(&ca
->set
->bucket_lock
);
830 fifo_move(&free
, &ca
->free
);
831 fifo_swap(&free
, &ca
->free
);
833 mutex_unlock(&ca
->set
->bucket_lock
);
835 while (fifo_pop(&free
, i
))
836 atomic_dec(&ca
->buckets
[i
].pin
);
841 if (attr
== &sysfs_clear_stats
) {
842 atomic_long_set(&ca
->sectors_written
, 0);
843 atomic_long_set(&ca
->btree_sectors_written
, 0);
844 atomic_long_set(&ca
->meta_sectors_written
, 0);
845 atomic_set(&ca
->io_count
, 0);
846 atomic_set(&ca
->io_errors
, 0);
851 STORE_LOCKED(bch_cache
)
853 static struct attribute
*bch_cache_files
[] = {
857 &sysfs_priority_stats
,
860 &sysfs_btree_written
,
861 &sysfs_metadata_written
,
864 &sysfs_freelist_percent
,
865 &sysfs_cache_replacement_policy
,