btrfs: Check name_len before in btrfs_del_root_ref
[linux-2.6/btrfs-unstable.git] / block / blk-mq-debugfs.c
blob803aed4d72216f5e23a585cb6d027115fa603ff9
1 /*
2 * Copyright (C) 2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
21 #include <linux/blk-mq.h>
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-tag.h"
27 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
28 const char *const *flag_name, int flag_name_count)
30 bool sep = false;
31 int i;
33 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
34 if (!(flags & BIT(i)))
35 continue;
36 if (sep)
37 seq_puts(m, "|");
38 sep = true;
39 if (i < flag_name_count && flag_name[i])
40 seq_puts(m, flag_name[i]);
41 else
42 seq_printf(m, "%d", i);
44 return 0;
47 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
48 static const char *const blk_queue_flag_name[] = {
49 QUEUE_FLAG_NAME(QUEUED),
50 QUEUE_FLAG_NAME(STOPPED),
51 QUEUE_FLAG_NAME(SYNCFULL),
52 QUEUE_FLAG_NAME(ASYNCFULL),
53 QUEUE_FLAG_NAME(DYING),
54 QUEUE_FLAG_NAME(BYPASS),
55 QUEUE_FLAG_NAME(BIDI),
56 QUEUE_FLAG_NAME(NOMERGES),
57 QUEUE_FLAG_NAME(SAME_COMP),
58 QUEUE_FLAG_NAME(FAIL_IO),
59 QUEUE_FLAG_NAME(STACKABLE),
60 QUEUE_FLAG_NAME(NONROT),
61 QUEUE_FLAG_NAME(IO_STAT),
62 QUEUE_FLAG_NAME(DISCARD),
63 QUEUE_FLAG_NAME(NOXMERGES),
64 QUEUE_FLAG_NAME(ADD_RANDOM),
65 QUEUE_FLAG_NAME(SECERASE),
66 QUEUE_FLAG_NAME(SAME_FORCE),
67 QUEUE_FLAG_NAME(DEAD),
68 QUEUE_FLAG_NAME(INIT_DONE),
69 QUEUE_FLAG_NAME(NO_SG_MERGE),
70 QUEUE_FLAG_NAME(POLL),
71 QUEUE_FLAG_NAME(WC),
72 QUEUE_FLAG_NAME(FUA),
73 QUEUE_FLAG_NAME(FLUSH_NQ),
74 QUEUE_FLAG_NAME(DAX),
75 QUEUE_FLAG_NAME(STATS),
76 QUEUE_FLAG_NAME(POLL_STATS),
77 QUEUE_FLAG_NAME(REGISTERED),
79 #undef QUEUE_FLAG_NAME
81 static int queue_state_show(void *data, struct seq_file *m)
83 struct request_queue *q = data;
85 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
86 ARRAY_SIZE(blk_queue_flag_name));
87 seq_puts(m, "\n");
88 return 0;
91 static ssize_t queue_state_write(void *data, const char __user *buf,
92 size_t count, loff_t *ppos)
94 struct request_queue *q = data;
95 char opbuf[16] = { }, *op;
98 * The "state" attribute is removed after blk_cleanup_queue() has called
99 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
100 * triggering a use-after-free.
102 if (blk_queue_dead(q))
103 return -ENOENT;
105 if (count >= sizeof(opbuf)) {
106 pr_err("%s: operation too long\n", __func__);
107 goto inval;
110 if (copy_from_user(opbuf, buf, count))
111 return -EFAULT;
112 op = strstrip(opbuf);
113 if (strcmp(op, "run") == 0) {
114 blk_mq_run_hw_queues(q, true);
115 } else if (strcmp(op, "start") == 0) {
116 blk_mq_start_stopped_hw_queues(q, true);
117 } else {
118 pr_err("%s: unsupported operation '%s'\n", __func__, op);
119 inval:
120 pr_err("%s: use either 'run' or 'start'\n", __func__);
121 return -EINVAL;
123 return count;
126 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
128 if (stat->nr_samples) {
129 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
130 stat->nr_samples, stat->mean, stat->min, stat->max);
131 } else {
132 seq_puts(m, "samples=0");
136 static int queue_poll_stat_show(void *data, struct seq_file *m)
138 struct request_queue *q = data;
139 int bucket;
141 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
142 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
143 print_stat(m, &q->poll_stat[2*bucket]);
144 seq_puts(m, "\n");
146 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
147 print_stat(m, &q->poll_stat[2*bucket+1]);
148 seq_puts(m, "\n");
150 return 0;
153 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
154 static const char *const hctx_state_name[] = {
155 HCTX_STATE_NAME(STOPPED),
156 HCTX_STATE_NAME(TAG_ACTIVE),
157 HCTX_STATE_NAME(SCHED_RESTART),
158 HCTX_STATE_NAME(TAG_WAITING),
159 HCTX_STATE_NAME(START_ON_RUN),
161 #undef HCTX_STATE_NAME
163 static int hctx_state_show(void *data, struct seq_file *m)
165 struct blk_mq_hw_ctx *hctx = data;
167 blk_flags_show(m, hctx->state, hctx_state_name,
168 ARRAY_SIZE(hctx_state_name));
169 seq_puts(m, "\n");
170 return 0;
173 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
174 static const char *const alloc_policy_name[] = {
175 BLK_TAG_ALLOC_NAME(FIFO),
176 BLK_TAG_ALLOC_NAME(RR),
178 #undef BLK_TAG_ALLOC_NAME
180 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
181 static const char *const hctx_flag_name[] = {
182 HCTX_FLAG_NAME(SHOULD_MERGE),
183 HCTX_FLAG_NAME(TAG_SHARED),
184 HCTX_FLAG_NAME(SG_MERGE),
185 HCTX_FLAG_NAME(BLOCKING),
186 HCTX_FLAG_NAME(NO_SCHED),
188 #undef HCTX_FLAG_NAME
190 static int hctx_flags_show(void *data, struct seq_file *m)
192 struct blk_mq_hw_ctx *hctx = data;
193 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
195 seq_puts(m, "alloc_policy=");
196 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
197 alloc_policy_name[alloc_policy])
198 seq_puts(m, alloc_policy_name[alloc_policy]);
199 else
200 seq_printf(m, "%d", alloc_policy);
201 seq_puts(m, " ");
202 blk_flags_show(m,
203 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
204 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
205 seq_puts(m, "\n");
206 return 0;
209 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
210 static const char *const op_name[] = {
211 REQ_OP_NAME(READ),
212 REQ_OP_NAME(WRITE),
213 REQ_OP_NAME(FLUSH),
214 REQ_OP_NAME(DISCARD),
215 REQ_OP_NAME(ZONE_REPORT),
216 REQ_OP_NAME(SECURE_ERASE),
217 REQ_OP_NAME(ZONE_RESET),
218 REQ_OP_NAME(WRITE_SAME),
219 REQ_OP_NAME(WRITE_ZEROES),
220 REQ_OP_NAME(SCSI_IN),
221 REQ_OP_NAME(SCSI_OUT),
222 REQ_OP_NAME(DRV_IN),
223 REQ_OP_NAME(DRV_OUT),
225 #undef REQ_OP_NAME
227 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
228 static const char *const cmd_flag_name[] = {
229 CMD_FLAG_NAME(FAILFAST_DEV),
230 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
231 CMD_FLAG_NAME(FAILFAST_DRIVER),
232 CMD_FLAG_NAME(SYNC),
233 CMD_FLAG_NAME(META),
234 CMD_FLAG_NAME(PRIO),
235 CMD_FLAG_NAME(NOMERGE),
236 CMD_FLAG_NAME(IDLE),
237 CMD_FLAG_NAME(INTEGRITY),
238 CMD_FLAG_NAME(FUA),
239 CMD_FLAG_NAME(PREFLUSH),
240 CMD_FLAG_NAME(RAHEAD),
241 CMD_FLAG_NAME(BACKGROUND),
242 CMD_FLAG_NAME(NOUNMAP),
244 #undef CMD_FLAG_NAME
246 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
247 static const char *const rqf_name[] = {
248 RQF_NAME(SORTED),
249 RQF_NAME(STARTED),
250 RQF_NAME(QUEUED),
251 RQF_NAME(SOFTBARRIER),
252 RQF_NAME(FLUSH_SEQ),
253 RQF_NAME(MIXED_MERGE),
254 RQF_NAME(MQ_INFLIGHT),
255 RQF_NAME(DONTPREP),
256 RQF_NAME(PREEMPT),
257 RQF_NAME(COPY_USER),
258 RQF_NAME(FAILED),
259 RQF_NAME(QUIET),
260 RQF_NAME(ELVPRIV),
261 RQF_NAME(IO_STAT),
262 RQF_NAME(ALLOCED),
263 RQF_NAME(PM),
264 RQF_NAME(HASHED),
265 RQF_NAME(STATS),
266 RQF_NAME(SPECIAL_PAYLOAD),
268 #undef RQF_NAME
270 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
272 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
273 const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
275 seq_printf(m, "%p {.op=", rq);
276 if (op < ARRAY_SIZE(op_name) && op_name[op])
277 seq_printf(m, "%s", op_name[op]);
278 else
279 seq_printf(m, "%d", op);
280 seq_puts(m, ", .cmd_flags=");
281 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
282 ARRAY_SIZE(cmd_flag_name));
283 seq_puts(m, ", .rq_flags=");
284 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
285 ARRAY_SIZE(rqf_name));
286 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
287 rq->internal_tag);
288 if (mq_ops->show_rq)
289 mq_ops->show_rq(m, rq);
290 seq_puts(m, "}\n");
291 return 0;
293 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
295 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
297 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
299 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
301 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
302 __acquires(&hctx->lock)
304 struct blk_mq_hw_ctx *hctx = m->private;
306 spin_lock(&hctx->lock);
307 return seq_list_start(&hctx->dispatch, *pos);
310 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
312 struct blk_mq_hw_ctx *hctx = m->private;
314 return seq_list_next(v, &hctx->dispatch, pos);
317 static void hctx_dispatch_stop(struct seq_file *m, void *v)
318 __releases(&hctx->lock)
320 struct blk_mq_hw_ctx *hctx = m->private;
322 spin_unlock(&hctx->lock);
325 static const struct seq_operations hctx_dispatch_seq_ops = {
326 .start = hctx_dispatch_start,
327 .next = hctx_dispatch_next,
328 .stop = hctx_dispatch_stop,
329 .show = blk_mq_debugfs_rq_show,
332 static int hctx_ctx_map_show(void *data, struct seq_file *m)
334 struct blk_mq_hw_ctx *hctx = data;
336 sbitmap_bitmap_show(&hctx->ctx_map, m);
337 return 0;
340 static void blk_mq_debugfs_tags_show(struct seq_file *m,
341 struct blk_mq_tags *tags)
343 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
344 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
345 seq_printf(m, "active_queues=%d\n",
346 atomic_read(&tags->active_queues));
348 seq_puts(m, "\nbitmap_tags:\n");
349 sbitmap_queue_show(&tags->bitmap_tags, m);
351 if (tags->nr_reserved_tags) {
352 seq_puts(m, "\nbreserved_tags:\n");
353 sbitmap_queue_show(&tags->breserved_tags, m);
357 static int hctx_tags_show(void *data, struct seq_file *m)
359 struct blk_mq_hw_ctx *hctx = data;
360 struct request_queue *q = hctx->queue;
361 int res;
363 res = mutex_lock_interruptible(&q->sysfs_lock);
364 if (res)
365 goto out;
366 if (hctx->tags)
367 blk_mq_debugfs_tags_show(m, hctx->tags);
368 mutex_unlock(&q->sysfs_lock);
370 out:
371 return res;
374 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
376 struct blk_mq_hw_ctx *hctx = data;
377 struct request_queue *q = hctx->queue;
378 int res;
380 res = mutex_lock_interruptible(&q->sysfs_lock);
381 if (res)
382 goto out;
383 if (hctx->tags)
384 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
385 mutex_unlock(&q->sysfs_lock);
387 out:
388 return res;
391 static int hctx_sched_tags_show(void *data, struct seq_file *m)
393 struct blk_mq_hw_ctx *hctx = data;
394 struct request_queue *q = hctx->queue;
395 int res;
397 res = mutex_lock_interruptible(&q->sysfs_lock);
398 if (res)
399 goto out;
400 if (hctx->sched_tags)
401 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
402 mutex_unlock(&q->sysfs_lock);
404 out:
405 return res;
408 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
410 struct blk_mq_hw_ctx *hctx = data;
411 struct request_queue *q = hctx->queue;
412 int res;
414 res = mutex_lock_interruptible(&q->sysfs_lock);
415 if (res)
416 goto out;
417 if (hctx->sched_tags)
418 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
419 mutex_unlock(&q->sysfs_lock);
421 out:
422 return res;
425 static int hctx_io_poll_show(void *data, struct seq_file *m)
427 struct blk_mq_hw_ctx *hctx = data;
429 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
430 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
431 seq_printf(m, "success=%lu\n", hctx->poll_success);
432 return 0;
435 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
436 size_t count, loff_t *ppos)
438 struct blk_mq_hw_ctx *hctx = data;
440 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
441 return count;
444 static int hctx_dispatched_show(void *data, struct seq_file *m)
446 struct blk_mq_hw_ctx *hctx = data;
447 int i;
449 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
451 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
452 unsigned int d = 1U << (i - 1);
454 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
457 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
458 return 0;
461 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
462 size_t count, loff_t *ppos)
464 struct blk_mq_hw_ctx *hctx = data;
465 int i;
467 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
468 hctx->dispatched[i] = 0;
469 return count;
472 static int hctx_queued_show(void *data, struct seq_file *m)
474 struct blk_mq_hw_ctx *hctx = data;
476 seq_printf(m, "%lu\n", hctx->queued);
477 return 0;
480 static ssize_t hctx_queued_write(void *data, const char __user *buf,
481 size_t count, loff_t *ppos)
483 struct blk_mq_hw_ctx *hctx = data;
485 hctx->queued = 0;
486 return count;
489 static int hctx_run_show(void *data, struct seq_file *m)
491 struct blk_mq_hw_ctx *hctx = data;
493 seq_printf(m, "%lu\n", hctx->run);
494 return 0;
497 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
498 loff_t *ppos)
500 struct blk_mq_hw_ctx *hctx = data;
502 hctx->run = 0;
503 return count;
506 static int hctx_active_show(void *data, struct seq_file *m)
508 struct blk_mq_hw_ctx *hctx = data;
510 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
511 return 0;
514 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
515 __acquires(&ctx->lock)
517 struct blk_mq_ctx *ctx = m->private;
519 spin_lock(&ctx->lock);
520 return seq_list_start(&ctx->rq_list, *pos);
523 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
525 struct blk_mq_ctx *ctx = m->private;
527 return seq_list_next(v, &ctx->rq_list, pos);
530 static void ctx_rq_list_stop(struct seq_file *m, void *v)
531 __releases(&ctx->lock)
533 struct blk_mq_ctx *ctx = m->private;
535 spin_unlock(&ctx->lock);
538 static const struct seq_operations ctx_rq_list_seq_ops = {
539 .start = ctx_rq_list_start,
540 .next = ctx_rq_list_next,
541 .stop = ctx_rq_list_stop,
542 .show = blk_mq_debugfs_rq_show,
544 static int ctx_dispatched_show(void *data, struct seq_file *m)
546 struct blk_mq_ctx *ctx = data;
548 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
549 return 0;
552 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
553 size_t count, loff_t *ppos)
555 struct blk_mq_ctx *ctx = data;
557 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
558 return count;
561 static int ctx_merged_show(void *data, struct seq_file *m)
563 struct blk_mq_ctx *ctx = data;
565 seq_printf(m, "%lu\n", ctx->rq_merged);
566 return 0;
569 static ssize_t ctx_merged_write(void *data, const char __user *buf,
570 size_t count, loff_t *ppos)
572 struct blk_mq_ctx *ctx = data;
574 ctx->rq_merged = 0;
575 return count;
578 static int ctx_completed_show(void *data, struct seq_file *m)
580 struct blk_mq_ctx *ctx = data;
582 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
583 return 0;
586 static ssize_t ctx_completed_write(void *data, const char __user *buf,
587 size_t count, loff_t *ppos)
589 struct blk_mq_ctx *ctx = data;
591 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
592 return count;
595 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
597 const struct blk_mq_debugfs_attr *attr = m->private;
598 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
600 return attr->show(data, m);
603 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
604 size_t count, loff_t *ppos)
606 struct seq_file *m = file->private_data;
607 const struct blk_mq_debugfs_attr *attr = m->private;
608 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
610 if (!attr->write)
611 return -EPERM;
613 return attr->write(data, buf, count, ppos);
616 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
618 const struct blk_mq_debugfs_attr *attr = inode->i_private;
619 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
620 struct seq_file *m;
621 int ret;
623 if (attr->seq_ops) {
624 ret = seq_open(file, attr->seq_ops);
625 if (!ret) {
626 m = file->private_data;
627 m->private = data;
629 return ret;
632 if (WARN_ON_ONCE(!attr->show))
633 return -EPERM;
635 return single_open(file, blk_mq_debugfs_show, inode->i_private);
638 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
640 const struct blk_mq_debugfs_attr *attr = inode->i_private;
642 if (attr->show)
643 return single_release(inode, file);
644 else
645 return seq_release(inode, file);
648 const struct file_operations blk_mq_debugfs_fops = {
649 .open = blk_mq_debugfs_open,
650 .read = seq_read,
651 .write = blk_mq_debugfs_write,
652 .llseek = seq_lseek,
653 .release = blk_mq_debugfs_release,
656 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
657 {"poll_stat", 0400, queue_poll_stat_show},
658 {"state", 0600, queue_state_show, queue_state_write},
662 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
663 {"state", 0400, hctx_state_show},
664 {"flags", 0400, hctx_flags_show},
665 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
666 {"ctx_map", 0400, hctx_ctx_map_show},
667 {"tags", 0400, hctx_tags_show},
668 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
669 {"sched_tags", 0400, hctx_sched_tags_show},
670 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
671 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
672 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
673 {"queued", 0600, hctx_queued_show, hctx_queued_write},
674 {"run", 0600, hctx_run_show, hctx_run_write},
675 {"active", 0400, hctx_active_show},
679 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
680 {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
681 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
682 {"merged", 0600, ctx_merged_show, ctx_merged_write},
683 {"completed", 0600, ctx_completed_show, ctx_completed_write},
687 static bool debugfs_create_files(struct dentry *parent, void *data,
688 const struct blk_mq_debugfs_attr *attr)
690 d_inode(parent)->i_private = data;
692 for (; attr->name; attr++) {
693 if (!debugfs_create_file(attr->name, attr->mode, parent,
694 (void *)attr, &blk_mq_debugfs_fops))
695 return false;
697 return true;
700 int blk_mq_debugfs_register(struct request_queue *q)
702 struct blk_mq_hw_ctx *hctx;
703 int i;
705 if (!blk_debugfs_root)
706 return -ENOENT;
708 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
709 blk_debugfs_root);
710 if (!q->debugfs_dir)
711 return -ENOMEM;
713 if (!debugfs_create_files(q->debugfs_dir, q,
714 blk_mq_debugfs_queue_attrs))
715 goto err;
718 * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
719 * didn't exist yet (because we don't know what to name the directory
720 * until the queue is registered to a gendisk).
722 queue_for_each_hw_ctx(q, hctx, i) {
723 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
724 goto err;
725 if (q->elevator && !hctx->sched_debugfs_dir &&
726 blk_mq_debugfs_register_sched_hctx(q, hctx))
727 goto err;
730 return 0;
732 err:
733 blk_mq_debugfs_unregister(q);
734 return -ENOMEM;
737 void blk_mq_debugfs_unregister(struct request_queue *q)
739 debugfs_remove_recursive(q->debugfs_dir);
740 q->sched_debugfs_dir = NULL;
741 q->debugfs_dir = NULL;
744 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
745 struct blk_mq_ctx *ctx)
747 struct dentry *ctx_dir;
748 char name[20];
750 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
751 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
752 if (!ctx_dir)
753 return -ENOMEM;
755 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
756 return -ENOMEM;
758 return 0;
761 int blk_mq_debugfs_register_hctx(struct request_queue *q,
762 struct blk_mq_hw_ctx *hctx)
764 struct blk_mq_ctx *ctx;
765 char name[20];
766 int i;
768 if (!q->debugfs_dir)
769 return -ENOENT;
771 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
772 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
773 if (!hctx->debugfs_dir)
774 return -ENOMEM;
776 if (!debugfs_create_files(hctx->debugfs_dir, hctx,
777 blk_mq_debugfs_hctx_attrs))
778 goto err;
780 hctx_for_each_ctx(hctx, ctx, i) {
781 if (blk_mq_debugfs_register_ctx(hctx, ctx))
782 goto err;
785 return 0;
787 err:
788 blk_mq_debugfs_unregister_hctx(hctx);
789 return -ENOMEM;
792 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
794 debugfs_remove_recursive(hctx->debugfs_dir);
795 hctx->sched_debugfs_dir = NULL;
796 hctx->debugfs_dir = NULL;
799 int blk_mq_debugfs_register_hctxs(struct request_queue *q)
801 struct blk_mq_hw_ctx *hctx;
802 int i;
804 queue_for_each_hw_ctx(q, hctx, i) {
805 if (blk_mq_debugfs_register_hctx(q, hctx))
806 return -ENOMEM;
809 return 0;
812 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
814 struct blk_mq_hw_ctx *hctx;
815 int i;
817 queue_for_each_hw_ctx(q, hctx, i)
818 blk_mq_debugfs_unregister_hctx(hctx);
821 int blk_mq_debugfs_register_sched(struct request_queue *q)
823 struct elevator_type *e = q->elevator->type;
825 if (!q->debugfs_dir)
826 return -ENOENT;
828 if (!e->queue_debugfs_attrs)
829 return 0;
831 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
832 if (!q->sched_debugfs_dir)
833 return -ENOMEM;
835 if (!debugfs_create_files(q->sched_debugfs_dir, q,
836 e->queue_debugfs_attrs))
837 goto err;
839 return 0;
841 err:
842 blk_mq_debugfs_unregister_sched(q);
843 return -ENOMEM;
846 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
848 debugfs_remove_recursive(q->sched_debugfs_dir);
849 q->sched_debugfs_dir = NULL;
852 int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
853 struct blk_mq_hw_ctx *hctx)
855 struct elevator_type *e = q->elevator->type;
857 if (!hctx->debugfs_dir)
858 return -ENOENT;
860 if (!e->hctx_debugfs_attrs)
861 return 0;
863 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
864 hctx->debugfs_dir);
865 if (!hctx->sched_debugfs_dir)
866 return -ENOMEM;
868 if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
869 e->hctx_debugfs_attrs))
870 return -ENOMEM;
872 return 0;
875 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
877 debugfs_remove_recursive(hctx->sched_debugfs_dir);
878 hctx->sched_debugfs_dir = NULL;