2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <trace/block.h>
27 #include <linux/uaccess.h>
28 #include "trace_output.h"
30 static unsigned int blktrace_seq __read_mostly
= 1;
32 static struct trace_array
*blk_tr
;
33 static bool blk_tracer_enabled __read_mostly
;
35 /* Select an alternative, minimalistic output than the original one */
36 #define TRACE_BLK_OPT_CLASSIC 0x1
38 static struct tracer_opt blk_tracer_opts
[] = {
39 /* Default disable the minimalistic output */
40 { TRACER_OPT(blk_classic
, TRACE_BLK_OPT_CLASSIC
) },
44 static struct tracer_flags blk_tracer_flags
= {
46 .opts
= blk_tracer_opts
,
49 /* Global reference count of probes */
50 static atomic_t blk_probes_ref
= ATOMIC_INIT(0);
52 static void blk_register_tracepoints(void);
53 static void blk_unregister_tracepoints(void);
56 * Send out a notify message.
58 static void trace_note(struct blk_trace
*bt
, pid_t pid
, int action
,
59 const void *data
, size_t len
)
61 struct blk_io_trace
*t
;
62 struct ring_buffer_event
*event
= NULL
;
64 int cpu
= smp_processor_id();
65 bool blk_tracer
= blk_tracer_enabled
;
69 event
= trace_buffer_lock_reserve(blk_tr
, TRACE_BLK
,
74 t
= ring_buffer_event_data(event
);
81 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + len
);
83 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
84 t
->time
= ktime_to_ns(ktime_get());
91 memcpy((void *) t
+ sizeof(*t
), data
, len
);
94 trace_buffer_unlock_commit(blk_tr
, event
, 0, pc
);
99 * Send out a notify for this process, if we haven't done so since a trace
102 static void trace_note_tsk(struct blk_trace
*bt
, struct task_struct
*tsk
)
104 tsk
->btrace_seq
= blktrace_seq
;
105 trace_note(bt
, tsk
->pid
, BLK_TN_PROCESS
, tsk
->comm
, sizeof(tsk
->comm
));
108 static void trace_note_time(struct blk_trace
*bt
)
114 getnstimeofday(&now
);
115 words
[0] = now
.tv_sec
;
116 words
[1] = now
.tv_nsec
;
118 local_irq_save(flags
);
119 trace_note(bt
, 0, BLK_TN_TIMESTAMP
, words
, sizeof(words
));
120 local_irq_restore(flags
);
123 void __trace_note_message(struct blk_trace
*bt
, const char *fmt
, ...)
130 if (unlikely(bt
->trace_state
!= Blktrace_running
&&
131 !blk_tracer_enabled
))
134 local_irq_save(flags
);
135 buf
= per_cpu_ptr(bt
->msg_data
, smp_processor_id());
137 n
= vscnprintf(buf
, BLK_TN_MAX_MSG
, fmt
, args
);
140 trace_note(bt
, 0, BLK_TN_MESSAGE
, buf
, n
);
141 local_irq_restore(flags
);
143 EXPORT_SYMBOL_GPL(__trace_note_message
);
145 static int act_log_check(struct blk_trace
*bt
, u32 what
, sector_t sector
,
148 if (((bt
->act_mask
<< BLK_TC_SHIFT
) & what
) == 0)
150 if (sector
< bt
->start_lba
|| sector
> bt
->end_lba
)
152 if (bt
->pid
&& pid
!= bt
->pid
)
159 * Data direction bit lookup
161 static const u32 ddir_act
[2] = { BLK_TC_ACT(BLK_TC_READ
),
162 BLK_TC_ACT(BLK_TC_WRITE
) };
164 /* The ilog2() calls fall out because they're constant */
165 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
166 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
169 * The worker for the various blk_add_trace*() types. Fills out a
170 * blk_io_trace structure and places it in a per-cpu subbuffer.
172 static void __blk_add_trace(struct blk_trace
*bt
, sector_t sector
, int bytes
,
173 int rw
, u32 what
, int error
, int pdu_len
, void *pdu_data
)
175 struct task_struct
*tsk
= current
;
176 struct ring_buffer_event
*event
= NULL
;
177 struct blk_io_trace
*t
;
178 unsigned long flags
= 0;
179 unsigned long *sequence
;
182 bool blk_tracer
= blk_tracer_enabled
;
184 if (unlikely(bt
->trace_state
!= Blktrace_running
&& !blk_tracer
))
187 what
|= ddir_act
[rw
& WRITE
];
188 what
|= MASK_TC_BIT(rw
, BARRIER
);
189 what
|= MASK_TC_BIT(rw
, SYNCIO
);
190 what
|= MASK_TC_BIT(rw
, AHEAD
);
191 what
|= MASK_TC_BIT(rw
, META
);
192 what
|= MASK_TC_BIT(rw
, DISCARD
);
195 if (unlikely(act_log_check(bt
, what
, sector
, pid
)))
197 cpu
= raw_smp_processor_id();
200 tracing_record_cmdline(current
);
202 pc
= preempt_count();
203 event
= trace_buffer_lock_reserve(blk_tr
, TRACE_BLK
,
204 sizeof(*t
) + pdu_len
,
208 t
= ring_buffer_event_data(event
);
213 * A word about the locking here - we disable interrupts to reserve
214 * some space in the relay per-cpu buffer, to prevent an irq
215 * from coming in and stepping on our toes.
217 local_irq_save(flags
);
219 if (unlikely(tsk
->btrace_seq
!= blktrace_seq
))
220 trace_note_tsk(bt
, tsk
);
222 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + pdu_len
);
224 sequence
= per_cpu_ptr(bt
->sequence
, cpu
);
226 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
227 t
->sequence
= ++(*sequence
);
228 t
->time
= ktime_to_ns(ktime_get());
231 * These two are not needed in ftrace as they are in the
232 * generic trace_entry, filled by tracing_generic_entry_update,
233 * but for the trace_event->bin() synthesizer benefit we do it
244 t
->pdu_len
= pdu_len
;
247 memcpy((void *) t
+ sizeof(*t
), pdu_data
, pdu_len
);
250 trace_buffer_unlock_commit(blk_tr
, event
, 0, pc
);
255 local_irq_restore(flags
);
258 static struct dentry
*blk_tree_root
;
259 static DEFINE_MUTEX(blk_tree_mutex
);
261 static void blk_trace_free(struct blk_trace
*bt
)
263 debugfs_remove(bt
->msg_file
);
264 debugfs_remove(bt
->dropped_file
);
265 relay_close(bt
->rchan
);
266 free_percpu(bt
->sequence
);
267 free_percpu(bt
->msg_data
);
271 static void blk_trace_cleanup(struct blk_trace
*bt
)
274 if (atomic_dec_and_test(&blk_probes_ref
))
275 blk_unregister_tracepoints();
278 int blk_trace_remove(struct request_queue
*q
)
280 struct blk_trace
*bt
;
282 bt
= xchg(&q
->blk_trace
, NULL
);
286 if (bt
->trace_state
!= Blktrace_running
)
287 blk_trace_cleanup(bt
);
291 EXPORT_SYMBOL_GPL(blk_trace_remove
);
293 static int blk_dropped_open(struct inode
*inode
, struct file
*filp
)
295 filp
->private_data
= inode
->i_private
;
300 static ssize_t
blk_dropped_read(struct file
*filp
, char __user
*buffer
,
301 size_t count
, loff_t
*ppos
)
303 struct blk_trace
*bt
= filp
->private_data
;
306 snprintf(buf
, sizeof(buf
), "%u\n", atomic_read(&bt
->dropped
));
308 return simple_read_from_buffer(buffer
, count
, ppos
, buf
, strlen(buf
));
311 static const struct file_operations blk_dropped_fops
= {
312 .owner
= THIS_MODULE
,
313 .open
= blk_dropped_open
,
314 .read
= blk_dropped_read
,
317 static int blk_msg_open(struct inode
*inode
, struct file
*filp
)
319 filp
->private_data
= inode
->i_private
;
324 static ssize_t
blk_msg_write(struct file
*filp
, const char __user
*buffer
,
325 size_t count
, loff_t
*ppos
)
328 struct blk_trace
*bt
;
330 if (count
>= BLK_TN_MAX_MSG
)
333 msg
= kmalloc(count
+ 1, GFP_KERNEL
);
337 if (copy_from_user(msg
, buffer
, count
)) {
343 bt
= filp
->private_data
;
344 __trace_note_message(bt
, "%s", msg
);
350 static const struct file_operations blk_msg_fops
= {
351 .owner
= THIS_MODULE
,
352 .open
= blk_msg_open
,
353 .write
= blk_msg_write
,
357 * Keep track of how many times we encountered a full subbuffer, to aid
358 * the user space app in telling how many lost events there were.
360 static int blk_subbuf_start_callback(struct rchan_buf
*buf
, void *subbuf
,
361 void *prev_subbuf
, size_t prev_padding
)
363 struct blk_trace
*bt
;
365 if (!relay_buf_full(buf
))
368 bt
= buf
->chan
->private_data
;
369 atomic_inc(&bt
->dropped
);
373 static int blk_remove_buf_file_callback(struct dentry
*dentry
)
375 struct dentry
*parent
= dentry
->d_parent
;
376 debugfs_remove(dentry
);
379 * this will fail for all but the last file, but that is ok. what we
380 * care about is the top level buts->name directory going away, when
381 * the last trace file is gone. Then we don't have to rmdir() that
382 * manually on trace stop, so it nicely solves the issue with
383 * force killing of running traces.
386 debugfs_remove(parent
);
390 static struct dentry
*blk_create_buf_file_callback(const char *filename
,
391 struct dentry
*parent
,
393 struct rchan_buf
*buf
,
396 return debugfs_create_file(filename
, mode
, parent
, buf
,
397 &relay_file_operations
);
400 static struct rchan_callbacks blk_relay_callbacks
= {
401 .subbuf_start
= blk_subbuf_start_callback
,
402 .create_buf_file
= blk_create_buf_file_callback
,
403 .remove_buf_file
= blk_remove_buf_file_callback
,
407 * Setup everything required to start tracing
409 int do_blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
410 struct blk_user_trace_setup
*buts
)
412 struct blk_trace
*old_bt
, *bt
= NULL
;
413 struct dentry
*dir
= NULL
;
416 if (!buts
->buf_size
|| !buts
->buf_nr
)
419 strncpy(buts
->name
, name
, BLKTRACE_BDEV_SIZE
);
420 buts
->name
[BLKTRACE_BDEV_SIZE
- 1] = '\0';
423 * some device names have larger paths - convert the slashes
424 * to underscores for this to work as expected
426 for (i
= 0; i
< strlen(buts
->name
); i
++)
427 if (buts
->name
[i
] == '/')
430 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
435 bt
->sequence
= alloc_percpu(unsigned long);
439 bt
->msg_data
= __alloc_percpu(BLK_TN_MAX_MSG
, __alignof__(char));
445 mutex_lock(&blk_tree_mutex
);
446 if (!blk_tree_root
) {
447 blk_tree_root
= debugfs_create_dir("block", NULL
);
448 if (!blk_tree_root
) {
449 mutex_unlock(&blk_tree_mutex
);
453 mutex_unlock(&blk_tree_mutex
);
455 dir
= debugfs_create_dir(buts
->name
, blk_tree_root
);
462 atomic_set(&bt
->dropped
, 0);
465 bt
->dropped_file
= debugfs_create_file("dropped", 0444, dir
, bt
,
467 if (!bt
->dropped_file
)
470 bt
->msg_file
= debugfs_create_file("msg", 0222, dir
, bt
, &blk_msg_fops
);
474 bt
->rchan
= relay_open("trace", dir
, buts
->buf_size
,
475 buts
->buf_nr
, &blk_relay_callbacks
, bt
);
479 bt
->act_mask
= buts
->act_mask
;
481 bt
->act_mask
= (u16
) -1;
483 bt
->start_lba
= buts
->start_lba
;
484 bt
->end_lba
= buts
->end_lba
;
489 bt
->trace_state
= Blktrace_setup
;
492 old_bt
= xchg(&q
->blk_trace
, bt
);
494 (void) xchg(&q
->blk_trace
, old_bt
);
498 if (atomic_inc_return(&blk_probes_ref
) == 1)
499 blk_register_tracepoints();
507 int blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
510 struct blk_user_trace_setup buts
;
513 ret
= copy_from_user(&buts
, arg
, sizeof(buts
));
517 ret
= do_blk_trace_setup(q
, name
, dev
, &buts
);
521 if (copy_to_user(arg
, &buts
, sizeof(buts
)))
526 EXPORT_SYMBOL_GPL(blk_trace_setup
);
528 int blk_trace_startstop(struct request_queue
*q
, int start
)
531 struct blk_trace
*bt
= q
->blk_trace
;
537 * For starting a trace, we can transition from a setup or stopped
538 * trace. For stopping a trace, the state must be running
542 if (bt
->trace_state
== Blktrace_setup
||
543 bt
->trace_state
== Blktrace_stopped
) {
546 bt
->trace_state
= Blktrace_running
;
552 if (bt
->trace_state
== Blktrace_running
) {
553 bt
->trace_state
= Blktrace_stopped
;
554 relay_flush(bt
->rchan
);
561 EXPORT_SYMBOL_GPL(blk_trace_startstop
);
564 * blk_trace_ioctl: - handle the ioctls associated with tracing
565 * @bdev: the block device
566 * @cmd: the ioctl cmd
567 * @arg: the argument data, if any
570 int blk_trace_ioctl(struct block_device
*bdev
, unsigned cmd
, char __user
*arg
)
572 struct request_queue
*q
;
574 char b
[BDEVNAME_SIZE
];
576 q
= bdev_get_queue(bdev
);
580 mutex_lock(&bdev
->bd_mutex
);
585 ret
= blk_trace_setup(q
, b
, bdev
->bd_dev
, arg
);
590 ret
= blk_trace_startstop(q
, start
);
592 case BLKTRACETEARDOWN
:
593 ret
= blk_trace_remove(q
);
600 mutex_unlock(&bdev
->bd_mutex
);
605 * blk_trace_shutdown: - stop and cleanup trace structures
606 * @q: the request queue associated with the device
609 void blk_trace_shutdown(struct request_queue
*q
)
612 blk_trace_startstop(q
, 0);
622 * blk_add_trace_rq - Add a trace for a request oriented action
623 * @q: queue the io is for
624 * @rq: the source request
628 * Records an action against a request. Will log the bio offset + size.
631 static void blk_add_trace_rq(struct request_queue
*q
, struct request
*rq
,
634 struct blk_trace
*bt
= q
->blk_trace
;
635 int rw
= rq
->cmd_flags
& 0x03;
640 if (blk_discard_rq(rq
))
641 rw
|= (1 << BIO_RW_DISCARD
);
643 if (blk_pc_request(rq
)) {
644 what
|= BLK_TC_ACT(BLK_TC_PC
);
645 __blk_add_trace(bt
, 0, rq
->data_len
, rw
, what
, rq
->errors
,
646 rq
->cmd_len
, rq
->cmd
);
648 what
|= BLK_TC_ACT(BLK_TC_FS
);
649 __blk_add_trace(bt
, rq
->hard_sector
, rq
->hard_nr_sectors
<< 9,
650 rw
, what
, rq
->errors
, 0, NULL
);
654 static void blk_add_trace_rq_abort(struct request_queue
*q
, struct request
*rq
)
656 blk_add_trace_rq(q
, rq
, BLK_TA_ABORT
);
659 static void blk_add_trace_rq_insert(struct request_queue
*q
, struct request
*rq
)
661 blk_add_trace_rq(q
, rq
, BLK_TA_INSERT
);
664 static void blk_add_trace_rq_issue(struct request_queue
*q
, struct request
*rq
)
666 blk_add_trace_rq(q
, rq
, BLK_TA_ISSUE
);
669 static void blk_add_trace_rq_requeue(struct request_queue
*q
,
672 blk_add_trace_rq(q
, rq
, BLK_TA_REQUEUE
);
675 static void blk_add_trace_rq_complete(struct request_queue
*q
,
678 blk_add_trace_rq(q
, rq
, BLK_TA_COMPLETE
);
682 * blk_add_trace_bio - Add a trace for a bio oriented action
683 * @q: queue the io is for
684 * @bio: the source bio
688 * Records an action against a bio. Will log the bio offset + size.
691 static void blk_add_trace_bio(struct request_queue
*q
, struct bio
*bio
,
694 struct blk_trace
*bt
= q
->blk_trace
;
699 __blk_add_trace(bt
, bio
->bi_sector
, bio
->bi_size
, bio
->bi_rw
, what
,
700 !bio_flagged(bio
, BIO_UPTODATE
), 0, NULL
);
703 static void blk_add_trace_bio_bounce(struct request_queue
*q
, struct bio
*bio
)
705 blk_add_trace_bio(q
, bio
, BLK_TA_BOUNCE
);
708 static void blk_add_trace_bio_complete(struct request_queue
*q
, struct bio
*bio
)
710 blk_add_trace_bio(q
, bio
, BLK_TA_COMPLETE
);
713 static void blk_add_trace_bio_backmerge(struct request_queue
*q
,
716 blk_add_trace_bio(q
, bio
, BLK_TA_BACKMERGE
);
719 static void blk_add_trace_bio_frontmerge(struct request_queue
*q
,
722 blk_add_trace_bio(q
, bio
, BLK_TA_FRONTMERGE
);
725 static void blk_add_trace_bio_queue(struct request_queue
*q
, struct bio
*bio
)
727 blk_add_trace_bio(q
, bio
, BLK_TA_QUEUE
);
730 static void blk_add_trace_getrq(struct request_queue
*q
,
731 struct bio
*bio
, int rw
)
734 blk_add_trace_bio(q
, bio
, BLK_TA_GETRQ
);
736 struct blk_trace
*bt
= q
->blk_trace
;
739 __blk_add_trace(bt
, 0, 0, rw
, BLK_TA_GETRQ
, 0, 0, NULL
);
744 static void blk_add_trace_sleeprq(struct request_queue
*q
,
745 struct bio
*bio
, int rw
)
748 blk_add_trace_bio(q
, bio
, BLK_TA_SLEEPRQ
);
750 struct blk_trace
*bt
= q
->blk_trace
;
753 __blk_add_trace(bt
, 0, 0, rw
, BLK_TA_SLEEPRQ
,
758 static void blk_add_trace_plug(struct request_queue
*q
)
760 struct blk_trace
*bt
= q
->blk_trace
;
763 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_PLUG
, 0, 0, NULL
);
766 static void blk_add_trace_unplug_io(struct request_queue
*q
)
768 struct blk_trace
*bt
= q
->blk_trace
;
771 unsigned int pdu
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
];
772 __be64 rpdu
= cpu_to_be64(pdu
);
774 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_UNPLUG_IO
, 0,
775 sizeof(rpdu
), &rpdu
);
779 static void blk_add_trace_unplug_timer(struct request_queue
*q
)
781 struct blk_trace
*bt
= q
->blk_trace
;
784 unsigned int pdu
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
];
785 __be64 rpdu
= cpu_to_be64(pdu
);
787 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_UNPLUG_TIMER
, 0,
788 sizeof(rpdu
), &rpdu
);
792 static void blk_add_trace_split(struct request_queue
*q
, struct bio
*bio
,
795 struct blk_trace
*bt
= q
->blk_trace
;
798 __be64 rpdu
= cpu_to_be64(pdu
);
800 __blk_add_trace(bt
, bio
->bi_sector
, bio
->bi_size
, bio
->bi_rw
,
801 BLK_TA_SPLIT
, !bio_flagged(bio
, BIO_UPTODATE
),
802 sizeof(rpdu
), &rpdu
);
807 * blk_add_trace_remap - Add a trace for a remap operation
808 * @q: queue the io is for
809 * @bio: the source bio
810 * @dev: target device
811 * @from: source sector
815 * Device mapper or raid target sometimes need to split a bio because
816 * it spans a stripe (or similar). Add a trace for that action.
819 static void blk_add_trace_remap(struct request_queue
*q
, struct bio
*bio
,
820 dev_t dev
, sector_t from
, sector_t to
)
822 struct blk_trace
*bt
= q
->blk_trace
;
823 struct blk_io_trace_remap r
;
828 r
.device
= cpu_to_be32(dev
);
829 r
.device_from
= cpu_to_be32(bio
->bi_bdev
->bd_dev
);
830 r
.sector
= cpu_to_be64(to
);
832 __blk_add_trace(bt
, from
, bio
->bi_size
, bio
->bi_rw
, BLK_TA_REMAP
,
833 !bio_flagged(bio
, BIO_UPTODATE
), sizeof(r
), &r
);
837 * blk_add_driver_data - Add binary message with driver-specific data
838 * @q: queue the io is for
840 * @data: driver-specific data
841 * @len: length of driver-specific data
844 * Some drivers might want to write driver-specific data per request.
847 void blk_add_driver_data(struct request_queue
*q
,
849 void *data
, size_t len
)
851 struct blk_trace
*bt
= q
->blk_trace
;
856 if (blk_pc_request(rq
))
857 __blk_add_trace(bt
, 0, rq
->data_len
, 0, BLK_TA_DRV_DATA
,
858 rq
->errors
, len
, data
);
860 __blk_add_trace(bt
, rq
->hard_sector
, rq
->hard_nr_sectors
<< 9,
861 0, BLK_TA_DRV_DATA
, rq
->errors
, len
, data
);
863 EXPORT_SYMBOL_GPL(blk_add_driver_data
);
865 static void blk_register_tracepoints(void)
869 ret
= register_trace_block_rq_abort(blk_add_trace_rq_abort
);
871 ret
= register_trace_block_rq_insert(blk_add_trace_rq_insert
);
873 ret
= register_trace_block_rq_issue(blk_add_trace_rq_issue
);
875 ret
= register_trace_block_rq_requeue(blk_add_trace_rq_requeue
);
877 ret
= register_trace_block_rq_complete(blk_add_trace_rq_complete
);
879 ret
= register_trace_block_bio_bounce(blk_add_trace_bio_bounce
);
881 ret
= register_trace_block_bio_complete(blk_add_trace_bio_complete
);
883 ret
= register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
);
885 ret
= register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
);
887 ret
= register_trace_block_bio_queue(blk_add_trace_bio_queue
);
889 ret
= register_trace_block_getrq(blk_add_trace_getrq
);
891 ret
= register_trace_block_sleeprq(blk_add_trace_sleeprq
);
893 ret
= register_trace_block_plug(blk_add_trace_plug
);
895 ret
= register_trace_block_unplug_timer(blk_add_trace_unplug_timer
);
897 ret
= register_trace_block_unplug_io(blk_add_trace_unplug_io
);
899 ret
= register_trace_block_split(blk_add_trace_split
);
901 ret
= register_trace_block_remap(blk_add_trace_remap
);
905 static void blk_unregister_tracepoints(void)
907 unregister_trace_block_remap(blk_add_trace_remap
);
908 unregister_trace_block_split(blk_add_trace_split
);
909 unregister_trace_block_unplug_io(blk_add_trace_unplug_io
);
910 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer
);
911 unregister_trace_block_plug(blk_add_trace_plug
);
912 unregister_trace_block_sleeprq(blk_add_trace_sleeprq
);
913 unregister_trace_block_getrq(blk_add_trace_getrq
);
914 unregister_trace_block_bio_queue(blk_add_trace_bio_queue
);
915 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
);
916 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
);
917 unregister_trace_block_bio_complete(blk_add_trace_bio_complete
);
918 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce
);
919 unregister_trace_block_rq_complete(blk_add_trace_rq_complete
);
920 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue
);
921 unregister_trace_block_rq_issue(blk_add_trace_rq_issue
);
922 unregister_trace_block_rq_insert(blk_add_trace_rq_insert
);
923 unregister_trace_block_rq_abort(blk_add_trace_rq_abort
);
925 tracepoint_synchronize_unregister();
929 * struct blk_io_tracer formatting routines
932 static void fill_rwbs(char *rwbs
, const struct blk_io_trace
*t
)
935 int tc
= t
->action
>> BLK_TC_SHIFT
;
937 if (t
->action
== BLK_TN_MESSAGE
) {
942 if (tc
& BLK_TC_DISCARD
)
944 else if (tc
& BLK_TC_WRITE
)
951 if (tc
& BLK_TC_AHEAD
)
953 if (tc
& BLK_TC_BARRIER
)
955 if (tc
& BLK_TC_SYNC
)
957 if (tc
& BLK_TC_META
)
964 const struct blk_io_trace
*te_blk_io_trace(const struct trace_entry
*ent
)
966 return (const struct blk_io_trace
*)ent
;
969 static inline const void *pdu_start(const struct trace_entry
*ent
)
971 return te_blk_io_trace(ent
) + 1;
974 static inline u32
t_sec(const struct trace_entry
*ent
)
976 return te_blk_io_trace(ent
)->bytes
>> 9;
979 static inline unsigned long long t_sector(const struct trace_entry
*ent
)
981 return te_blk_io_trace(ent
)->sector
;
984 static inline __u16
t_error(const struct trace_entry
*ent
)
986 return te_blk_io_trace(ent
)->error
;
989 static __u64
get_pdu_int(const struct trace_entry
*ent
)
991 const __u64
*val
= pdu_start(ent
);
992 return be64_to_cpu(*val
);
995 static void get_pdu_remap(const struct trace_entry
*ent
,
996 struct blk_io_trace_remap
*r
)
998 const struct blk_io_trace_remap
*__r
= pdu_start(ent
);
999 __u64 sector
= __r
->sector
;
1001 r
->device
= be32_to_cpu(__r
->device
);
1002 r
->device_from
= be32_to_cpu(__r
->device_from
);
1003 r
->sector
= be64_to_cpu(sector
);
1006 typedef int (blk_log_action_t
) (struct trace_iterator
*iter
, const char *act
);
1008 static int blk_log_action_classic(struct trace_iterator
*iter
, const char *act
)
1011 unsigned long long ts
= iter
->ts
;
1012 unsigned long nsec_rem
= do_div(ts
, NSEC_PER_SEC
);
1013 unsigned secs
= (unsigned long)ts
;
1014 const struct blk_io_trace
*t
= te_blk_io_trace(iter
->ent
);
1018 return trace_seq_printf(&iter
->seq
,
1019 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1020 MAJOR(t
->device
), MINOR(t
->device
), iter
->cpu
,
1021 secs
, nsec_rem
, iter
->ent
->pid
, act
, rwbs
);
1024 static int blk_log_action(struct trace_iterator
*iter
, const char *act
)
1027 const struct blk_io_trace
*t
= te_blk_io_trace(iter
->ent
);
1030 return trace_seq_printf(&iter
->seq
, "%3d,%-3d %2s %3s ",
1031 MAJOR(t
->device
), MINOR(t
->device
), act
, rwbs
);
1034 static int blk_log_generic(struct trace_seq
*s
, const struct trace_entry
*ent
)
1036 char cmd
[TASK_COMM_LEN
];
1038 trace_find_cmdline(ent
->pid
, cmd
);
1041 return trace_seq_printf(s
, "%llu + %u [%s]\n",
1042 t_sector(ent
), t_sec(ent
), cmd
);
1043 return trace_seq_printf(s
, "[%s]\n", cmd
);
1046 static int blk_log_with_error(struct trace_seq
*s
,
1047 const struct trace_entry
*ent
)
1050 return trace_seq_printf(s
, "%llu + %u [%d]\n", t_sector(ent
),
1051 t_sec(ent
), t_error(ent
));
1052 return trace_seq_printf(s
, "%llu [%d]\n", t_sector(ent
), t_error(ent
));
1055 static int blk_log_remap(struct trace_seq
*s
, const struct trace_entry
*ent
)
1057 struct blk_io_trace_remap r
= { .device
= 0, };
1059 get_pdu_remap(ent
, &r
);
1060 return trace_seq_printf(s
, "%llu + %u <- (%d,%d) %llu\n",
1062 t_sec(ent
), MAJOR(r
.device
), MINOR(r
.device
),
1063 (unsigned long long)r
.sector
);
1066 static int blk_log_plug(struct trace_seq
*s
, const struct trace_entry
*ent
)
1068 char cmd
[TASK_COMM_LEN
];
1070 trace_find_cmdline(ent
->pid
, cmd
);
1072 return trace_seq_printf(s
, "[%s]\n", cmd
);
1075 static int blk_log_unplug(struct trace_seq
*s
, const struct trace_entry
*ent
)
1077 char cmd
[TASK_COMM_LEN
];
1079 trace_find_cmdline(ent
->pid
, cmd
);
1081 return trace_seq_printf(s
, "[%s] %llu\n", cmd
, get_pdu_int(ent
));
1084 static int blk_log_split(struct trace_seq
*s
, const struct trace_entry
*ent
)
1086 char cmd
[TASK_COMM_LEN
];
1088 trace_find_cmdline(ent
->pid
, cmd
);
1090 return trace_seq_printf(s
, "%llu / %llu [%s]\n", t_sector(ent
),
1091 get_pdu_int(ent
), cmd
);
1094 static int blk_log_msg(struct trace_seq
*s
, const struct trace_entry
*ent
)
1097 const struct blk_io_trace
*t
= te_blk_io_trace(ent
);
1099 ret
= trace_seq_putmem(s
, t
+ 1, t
->pdu_len
);
1101 return trace_seq_putc(s
, '\n');
1106 * struct tracer operations
1109 static void blk_tracer_print_header(struct seq_file
*m
)
1111 if (!(blk_tracer_flags
.val
& TRACE_BLK_OPT_CLASSIC
))
1113 seq_puts(m
, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1117 static void blk_tracer_start(struct trace_array
*tr
)
1119 blk_tracer_enabled
= true;
1120 trace_flags
&= ~TRACE_ITER_CONTEXT_INFO
;
1123 static int blk_tracer_init(struct trace_array
*tr
)
1126 blk_tracer_start(tr
);
1130 static void blk_tracer_stop(struct trace_array
*tr
)
1132 blk_tracer_enabled
= false;
1133 trace_flags
|= TRACE_ITER_CONTEXT_INFO
;
1136 static void blk_tracer_reset(struct trace_array
*tr
)
1138 blk_tracer_stop(tr
);
1141 static const struct {
1143 int (*print
)(struct trace_seq
*s
, const struct trace_entry
*ent
);
1145 [__BLK_TA_QUEUE
] = {{ "Q", "queue" }, blk_log_generic
},
1146 [__BLK_TA_BACKMERGE
] = {{ "M", "backmerge" }, blk_log_generic
},
1147 [__BLK_TA_FRONTMERGE
] = {{ "F", "frontmerge" }, blk_log_generic
},
1148 [__BLK_TA_GETRQ
] = {{ "G", "getrq" }, blk_log_generic
},
1149 [__BLK_TA_SLEEPRQ
] = {{ "S", "sleeprq" }, blk_log_generic
},
1150 [__BLK_TA_REQUEUE
] = {{ "R", "requeue" }, blk_log_with_error
},
1151 [__BLK_TA_ISSUE
] = {{ "D", "issue" }, blk_log_generic
},
1152 [__BLK_TA_COMPLETE
] = {{ "C", "complete" }, blk_log_with_error
},
1153 [__BLK_TA_PLUG
] = {{ "P", "plug" }, blk_log_plug
},
1154 [__BLK_TA_UNPLUG_IO
] = {{ "U", "unplug_io" }, blk_log_unplug
},
1155 [__BLK_TA_UNPLUG_TIMER
] = {{ "UT", "unplug_timer" }, blk_log_unplug
},
1156 [__BLK_TA_INSERT
] = {{ "I", "insert" }, blk_log_generic
},
1157 [__BLK_TA_SPLIT
] = {{ "X", "split" }, blk_log_split
},
1158 [__BLK_TA_BOUNCE
] = {{ "B", "bounce" }, blk_log_generic
},
1159 [__BLK_TA_REMAP
] = {{ "A", "remap" }, blk_log_remap
},
1162 static enum print_line_t
print_one_line(struct trace_iterator
*iter
,
1165 struct trace_seq
*s
= &iter
->seq
;
1166 const struct blk_io_trace
*t
;
1170 blk_log_action_t
*log_action
;
1172 t
= te_blk_io_trace(iter
->ent
);
1173 what
= t
->action
& ((1 << BLK_TC_SHIFT
) - 1);
1174 long_act
= !!(trace_flags
& TRACE_ITER_VERBOSE
);
1175 log_action
= classic
? &blk_log_action_classic
: &blk_log_action
;
1177 if (t
->action
== BLK_TN_MESSAGE
) {
1178 ret
= log_action(iter
, long_act
? "message" : "m");
1180 ret
= blk_log_msg(s
, iter
->ent
);
1184 if (unlikely(what
== 0 || what
>= ARRAY_SIZE(what2act
)))
1185 ret
= trace_seq_printf(s
, "Bad pc action %x\n", what
);
1187 ret
= log_action(iter
, what2act
[what
].act
[long_act
]);
1189 ret
= what2act
[what
].print(s
, iter
->ent
);
1192 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
1195 static enum print_line_t
blk_trace_event_print(struct trace_iterator
*iter
,
1198 if (!trace_print_context(iter
))
1199 return TRACE_TYPE_PARTIAL_LINE
;
1201 return print_one_line(iter
, false);
1204 static int blk_trace_synthesize_old_trace(struct trace_iterator
*iter
)
1206 struct trace_seq
*s
= &iter
->seq
;
1207 struct blk_io_trace
*t
= (struct blk_io_trace
*)iter
->ent
;
1208 const int offset
= offsetof(struct blk_io_trace
, sector
);
1209 struct blk_io_trace old
= {
1210 .magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
,
1214 if (!trace_seq_putmem(s
, &old
, offset
))
1216 return trace_seq_putmem(s
, &t
->sector
,
1217 sizeof(old
) - offset
+ t
->pdu_len
);
1220 static enum print_line_t
1221 blk_trace_event_print_binary(struct trace_iterator
*iter
, int flags
)
1223 return blk_trace_synthesize_old_trace(iter
) ?
1224 TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
1227 static enum print_line_t
blk_tracer_print_line(struct trace_iterator
*iter
)
1229 if (!(blk_tracer_flags
.val
& TRACE_BLK_OPT_CLASSIC
))
1230 return TRACE_TYPE_UNHANDLED
;
1232 return print_one_line(iter
, true);
1235 static struct tracer blk_tracer __read_mostly
= {
1237 .init
= blk_tracer_init
,
1238 .reset
= blk_tracer_reset
,
1239 .start
= blk_tracer_start
,
1240 .stop
= blk_tracer_stop
,
1241 .print_header
= blk_tracer_print_header
,
1242 .print_line
= blk_tracer_print_line
,
1243 .flags
= &blk_tracer_flags
,
1246 static struct trace_event trace_blk_event
= {
1248 .trace
= blk_trace_event_print
,
1249 .binary
= blk_trace_event_print_binary
,
1252 static int __init
init_blk_tracer(void)
1254 if (!register_ftrace_event(&trace_blk_event
)) {
1255 pr_warning("Warning: could not register block events\n");
1259 if (register_tracer(&blk_tracer
) != 0) {
1260 pr_warning("Warning: could not register the block tracer\n");
1261 unregister_ftrace_event(&trace_blk_event
);
1268 device_initcall(init_blk_tracer
);
1270 static int blk_trace_remove_queue(struct request_queue
*q
)
1272 struct blk_trace
*bt
;
1274 bt
= xchg(&q
->blk_trace
, NULL
);
1278 if (atomic_dec_and_test(&blk_probes_ref
))
1279 blk_unregister_tracepoints();
1286 * Setup everything required to start tracing
1288 static int blk_trace_setup_queue(struct request_queue
*q
, dev_t dev
)
1290 struct blk_trace
*old_bt
, *bt
= NULL
;
1293 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
1297 bt
->msg_data
= __alloc_percpu(BLK_TN_MAX_MSG
, __alignof__(char));
1302 bt
->act_mask
= (u16
)-1;
1303 bt
->end_lba
= -1ULL;
1305 old_bt
= xchg(&q
->blk_trace
, bt
);
1306 if (old_bt
!= NULL
) {
1307 (void)xchg(&q
->blk_trace
, old_bt
);
1312 if (atomic_inc_return(&blk_probes_ref
) == 1)
1313 blk_register_tracepoints();
1322 * sysfs interface to enable and configure tracing
1325 static ssize_t
sysfs_blk_trace_attr_show(struct device
*dev
,
1326 struct device_attribute
*attr
,
1328 static ssize_t
sysfs_blk_trace_attr_store(struct device
*dev
,
1329 struct device_attribute
*attr
,
1330 const char *buf
, size_t count
);
1331 #define BLK_TRACE_DEVICE_ATTR(_name) \
1332 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1333 sysfs_blk_trace_attr_show, \
1334 sysfs_blk_trace_attr_store)
1336 static BLK_TRACE_DEVICE_ATTR(enable
);
1337 static BLK_TRACE_DEVICE_ATTR(act_mask
);
1338 static BLK_TRACE_DEVICE_ATTR(pid
);
1339 static BLK_TRACE_DEVICE_ATTR(start_lba
);
1340 static BLK_TRACE_DEVICE_ATTR(end_lba
);
1342 static struct attribute
*blk_trace_attrs
[] = {
1343 &dev_attr_enable
.attr
,
1344 &dev_attr_act_mask
.attr
,
1346 &dev_attr_start_lba
.attr
,
1347 &dev_attr_end_lba
.attr
,
1351 struct attribute_group blk_trace_attr_group
= {
1353 .attrs
= blk_trace_attrs
,
1356 static const struct {
1360 { BLK_TC_READ
, "read" },
1361 { BLK_TC_WRITE
, "write" },
1362 { BLK_TC_BARRIER
, "barrier" },
1363 { BLK_TC_SYNC
, "sync" },
1364 { BLK_TC_QUEUE
, "queue" },
1365 { BLK_TC_REQUEUE
, "requeue" },
1366 { BLK_TC_ISSUE
, "issue" },
1367 { BLK_TC_COMPLETE
, "complete" },
1368 { BLK_TC_FS
, "fs" },
1369 { BLK_TC_PC
, "pc" },
1370 { BLK_TC_AHEAD
, "ahead" },
1371 { BLK_TC_META
, "meta" },
1372 { BLK_TC_DISCARD
, "discard" },
1373 { BLK_TC_DRV_DATA
, "drv_data" },
1376 static int blk_trace_str2mask(const char *str
)
1380 char *buf
, *s
, *token
;
1382 buf
= kstrdup(str
, GFP_KERNEL
);
1388 token
= strsep(&s
, ",");
1395 for (i
= 0; i
< ARRAY_SIZE(mask_maps
); i
++) {
1396 if (strcasecmp(token
, mask_maps
[i
].str
) == 0) {
1397 mask
|= mask_maps
[i
].mask
;
1401 if (i
== ARRAY_SIZE(mask_maps
)) {
1411 static ssize_t
blk_trace_mask2str(char *buf
, int mask
)
1416 for (i
= 0; i
< ARRAY_SIZE(mask_maps
); i
++) {
1417 if (mask
& mask_maps
[i
].mask
) {
1418 p
+= sprintf(p
, "%s%s",
1419 (p
== buf
) ? "" : ",", mask_maps
[i
].str
);
1427 static struct request_queue
*blk_trace_get_queue(struct block_device
*bdev
)
1429 if (bdev
->bd_disk
== NULL
)
1432 return bdev_get_queue(bdev
);
1435 static ssize_t
sysfs_blk_trace_attr_show(struct device
*dev
,
1436 struct device_attribute
*attr
,
1439 struct hd_struct
*p
= dev_to_part(dev
);
1440 struct request_queue
*q
;
1441 struct block_device
*bdev
;
1442 ssize_t ret
= -ENXIO
;
1445 bdev
= bdget(part_devt(p
));
1447 goto out_unlock_kernel
;
1449 q
= blk_trace_get_queue(bdev
);
1453 mutex_lock(&bdev
->bd_mutex
);
1455 if (attr
== &dev_attr_enable
) {
1456 ret
= sprintf(buf
, "%u\n", !!q
->blk_trace
);
1457 goto out_unlock_bdev
;
1460 if (q
->blk_trace
== NULL
)
1461 ret
= sprintf(buf
, "disabled\n");
1462 else if (attr
== &dev_attr_act_mask
)
1463 ret
= blk_trace_mask2str(buf
, q
->blk_trace
->act_mask
);
1464 else if (attr
== &dev_attr_pid
)
1465 ret
= sprintf(buf
, "%u\n", q
->blk_trace
->pid
);
1466 else if (attr
== &dev_attr_start_lba
)
1467 ret
= sprintf(buf
, "%llu\n", q
->blk_trace
->start_lba
);
1468 else if (attr
== &dev_attr_end_lba
)
1469 ret
= sprintf(buf
, "%llu\n", q
->blk_trace
->end_lba
);
1472 mutex_unlock(&bdev
->bd_mutex
);
1480 static ssize_t
sysfs_blk_trace_attr_store(struct device
*dev
,
1481 struct device_attribute
*attr
,
1482 const char *buf
, size_t count
)
1484 struct block_device
*bdev
;
1485 struct request_queue
*q
;
1486 struct hd_struct
*p
;
1488 ssize_t ret
= -EINVAL
;
1493 if (attr
== &dev_attr_act_mask
) {
1494 if (sscanf(buf
, "%llx", &value
) != 1) {
1495 /* Assume it is a list of trace category names */
1496 ret
= blk_trace_str2mask(buf
);
1501 } else if (sscanf(buf
, "%llu", &value
) != 1)
1507 p
= dev_to_part(dev
);
1508 bdev
= bdget(part_devt(p
));
1510 goto out_unlock_kernel
;
1512 q
= blk_trace_get_queue(bdev
);
1516 mutex_lock(&bdev
->bd_mutex
);
1518 if (attr
== &dev_attr_enable
) {
1520 ret
= blk_trace_setup_queue(q
, bdev
->bd_dev
);
1522 ret
= blk_trace_remove_queue(q
);
1523 goto out_unlock_bdev
;
1527 if (q
->blk_trace
== NULL
)
1528 ret
= blk_trace_setup_queue(q
, bdev
->bd_dev
);
1531 if (attr
== &dev_attr_act_mask
)
1532 q
->blk_trace
->act_mask
= value
;
1533 else if (attr
== &dev_attr_pid
)
1534 q
->blk_trace
->pid
= value
;
1535 else if (attr
== &dev_attr_start_lba
)
1536 q
->blk_trace
->start_lba
= value
;
1537 else if (attr
== &dev_attr_end_lba
)
1538 q
->blk_trace
->end_lba
= value
;
1542 mutex_unlock(&bdev
->bd_mutex
);
1548 return ret
? ret
: count
;