2 * Block stat tracking code
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
14 #define BLK_RQ_STAT_BATCH 64
16 struct blk_queue_stats
{
17 struct list_head callbacks
;
19 bool enable_accounting
;
22 static void blk_stat_init(struct blk_rq_stat
*stat
)
25 stat
->max
= stat
->nr_samples
= stat
->mean
= 0;
26 stat
->batch
= stat
->nr_batch
= 0;
29 static void blk_stat_flush_batch(struct blk_rq_stat
*stat
)
31 const s32 nr_batch
= READ_ONCE(stat
->nr_batch
);
32 const s32 nr_samples
= READ_ONCE(stat
->nr_samples
);
37 stat
->mean
= div64_s64(stat
->batch
, nr_batch
);
39 stat
->mean
= div64_s64((stat
->mean
* nr_samples
) +
41 nr_batch
+ nr_samples
);
44 stat
->nr_samples
+= nr_batch
;
45 stat
->nr_batch
= stat
->batch
= 0;
48 static void blk_stat_sum(struct blk_rq_stat
*dst
, struct blk_rq_stat
*src
)
50 blk_stat_flush_batch(src
);
55 dst
->min
= min(dst
->min
, src
->min
);
56 dst
->max
= max(dst
->max
, src
->max
);
59 dst
->mean
= src
->mean
;
61 dst
->mean
= div64_s64((src
->mean
* src
->nr_samples
) +
62 (dst
->mean
* dst
->nr_samples
),
63 dst
->nr_samples
+ src
->nr_samples
);
65 dst
->nr_samples
+= src
->nr_samples
;
68 static void __blk_stat_add(struct blk_rq_stat
*stat
, u64 value
)
70 stat
->min
= min(stat
->min
, value
);
71 stat
->max
= max(stat
->max
, value
);
73 if (stat
->batch
+ value
< stat
->batch
||
74 stat
->nr_batch
+ 1 == BLK_RQ_STAT_BATCH
)
75 blk_stat_flush_batch(stat
);
81 void blk_stat_add(struct request
*rq
)
83 struct request_queue
*q
= rq
->q
;
84 struct blk_stat_callback
*cb
;
85 struct blk_rq_stat
*stat
;
89 now
= __blk_stat_time(ktime_to_ns(ktime_get()));
90 if (now
< blk_stat_time(&rq
->issue_stat
))
93 value
= now
- blk_stat_time(&rq
->issue_stat
);
95 blk_throtl_stat_add(rq
, value
);
98 list_for_each_entry_rcu(cb
, &q
->stats
->callbacks
, list
) {
99 if (!blk_stat_is_active(cb
))
102 bucket
= cb
->bucket_fn(rq
);
106 stat
= &get_cpu_ptr(cb
->cpu_stat
)[bucket
];
107 __blk_stat_add(stat
, value
);
108 put_cpu_ptr(cb
->cpu_stat
);
113 static void blk_stat_timer_fn(unsigned long data
)
115 struct blk_stat_callback
*cb
= (void *)data
;
119 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
120 blk_stat_init(&cb
->stat
[bucket
]);
122 for_each_online_cpu(cpu
) {
123 struct blk_rq_stat
*cpu_stat
;
125 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
126 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++) {
127 blk_stat_sum(&cb
->stat
[bucket
], &cpu_stat
[bucket
]);
128 blk_stat_init(&cpu_stat
[bucket
]);
135 struct blk_stat_callback
*
136 blk_stat_alloc_callback(void (*timer_fn
)(struct blk_stat_callback
*),
137 int (*bucket_fn
)(const struct request
*),
138 unsigned int buckets
, void *data
)
140 struct blk_stat_callback
*cb
;
142 cb
= kmalloc(sizeof(*cb
), GFP_KERNEL
);
146 cb
->stat
= kmalloc_array(buckets
, sizeof(struct blk_rq_stat
),
152 cb
->cpu_stat
= __alloc_percpu(buckets
* sizeof(struct blk_rq_stat
),
153 __alignof__(struct blk_rq_stat
));
160 cb
->timer_fn
= timer_fn
;
161 cb
->bucket_fn
= bucket_fn
;
163 cb
->buckets
= buckets
;
164 setup_timer(&cb
->timer
, blk_stat_timer_fn
, (unsigned long)cb
);
168 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback
);
170 void blk_stat_add_callback(struct request_queue
*q
,
171 struct blk_stat_callback
*cb
)
176 for_each_possible_cpu(cpu
) {
177 struct blk_rq_stat
*cpu_stat
;
179 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
180 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
181 blk_stat_init(&cpu_stat
[bucket
]);
184 spin_lock(&q
->stats
->lock
);
185 list_add_tail_rcu(&cb
->list
, &q
->stats
->callbacks
);
186 set_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
187 spin_unlock(&q
->stats
->lock
);
189 EXPORT_SYMBOL_GPL(blk_stat_add_callback
);
191 void blk_stat_remove_callback(struct request_queue
*q
,
192 struct blk_stat_callback
*cb
)
194 spin_lock(&q
->stats
->lock
);
195 list_del_rcu(&cb
->list
);
196 if (list_empty(&q
->stats
->callbacks
) && !q
->stats
->enable_accounting
)
197 clear_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
198 spin_unlock(&q
->stats
->lock
);
200 del_timer_sync(&cb
->timer
);
202 EXPORT_SYMBOL_GPL(blk_stat_remove_callback
);
204 static void blk_stat_free_callback_rcu(struct rcu_head
*head
)
206 struct blk_stat_callback
*cb
;
208 cb
= container_of(head
, struct blk_stat_callback
, rcu
);
209 free_percpu(cb
->cpu_stat
);
214 void blk_stat_free_callback(struct blk_stat_callback
*cb
)
217 call_rcu(&cb
->rcu
, blk_stat_free_callback_rcu
);
219 EXPORT_SYMBOL_GPL(blk_stat_free_callback
);
221 void blk_stat_enable_accounting(struct request_queue
*q
)
223 spin_lock(&q
->stats
->lock
);
224 q
->stats
->enable_accounting
= true;
225 set_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
226 spin_unlock(&q
->stats
->lock
);
229 struct blk_queue_stats
*blk_alloc_queue_stats(void)
231 struct blk_queue_stats
*stats
;
233 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
237 INIT_LIST_HEAD(&stats
->callbacks
);
238 spin_lock_init(&stats
->lock
);
239 stats
->enable_accounting
= false;
244 void blk_free_queue_stats(struct blk_queue_stats
*stats
)
249 WARN_ON(!list_empty(&stats
->callbacks
));