2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <asm/uaccess.h>
28 static unsigned int blktrace_seq __read_mostly
= 1;
31 * Send out a notify message.
33 static void trace_note(struct blk_trace
*bt
, pid_t pid
, int action
,
34 const void *data
, size_t len
)
36 struct blk_io_trace
*t
;
38 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + len
);
40 const int cpu
= smp_processor_id();
42 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
43 t
->time
= ktime_to_ns(ktime_get());
49 memcpy((void *) t
+ sizeof(*t
), data
, len
);
54 * Send out a notify for this process, if we haven't done so since a trace
57 static void trace_note_tsk(struct blk_trace
*bt
, struct task_struct
*tsk
)
59 tsk
->btrace_seq
= blktrace_seq
;
60 trace_note(bt
, tsk
->pid
, BLK_TN_PROCESS
, tsk
->comm
, sizeof(tsk
->comm
));
63 static void trace_note_time(struct blk_trace
*bt
)
70 words
[0] = now
.tv_sec
;
71 words
[1] = now
.tv_nsec
;
73 local_irq_save(flags
);
74 trace_note(bt
, 0, BLK_TN_TIMESTAMP
, words
, sizeof(words
));
75 local_irq_restore(flags
);
78 void __trace_note_message(struct blk_trace
*bt
, const char *fmt
, ...)
85 buf
= per_cpu_ptr(bt
->msg_data
, smp_processor_id());
87 n
= vscnprintf(buf
, BLK_TN_MAX_MSG
, fmt
, args
);
90 trace_note(bt
, 0, BLK_TN_MESSAGE
, buf
, n
);
93 EXPORT_SYMBOL_GPL(__trace_note_message
);
95 static int act_log_check(struct blk_trace
*bt
, u32 what
, sector_t sector
,
98 if (((bt
->act_mask
<< BLK_TC_SHIFT
) & what
) == 0)
100 if (sector
< bt
->start_lba
|| sector
> bt
->end_lba
)
102 if (bt
->pid
&& pid
!= bt
->pid
)
109 * Data direction bit lookup
111 static u32 ddir_act
[2] __read_mostly
= { BLK_TC_ACT(BLK_TC_READ
), BLK_TC_ACT(BLK_TC_WRITE
) };
114 * Bio action bits of interest
116 static u32 bio_act
[9] __read_mostly
= { 0, BLK_TC_ACT(BLK_TC_BARRIER
), BLK_TC_ACT(BLK_TC_SYNC
), 0, BLK_TC_ACT(BLK_TC_AHEAD
), 0, 0, 0, BLK_TC_ACT(BLK_TC_META
) };
119 * More could be added as needed, taking care to increment the decrementer
120 * to get correct indexing
122 #define trace_barrier_bit(rw) \
123 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
124 #define trace_sync_bit(rw) \
125 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
126 #define trace_ahead_bit(rw) \
127 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
128 #define trace_meta_bit(rw) \
129 (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
132 * The worker for the various blk_add_trace*() types. Fills out a
133 * blk_io_trace structure and places it in a per-cpu subbuffer.
135 void __blk_add_trace(struct blk_trace
*bt
, sector_t sector
, int bytes
,
136 int rw
, u32 what
, int error
, int pdu_len
, void *pdu_data
)
138 struct task_struct
*tsk
= current
;
139 struct blk_io_trace
*t
;
141 unsigned long *sequence
;
145 if (unlikely(bt
->trace_state
!= Blktrace_running
))
148 what
|= ddir_act
[rw
& WRITE
];
149 what
|= bio_act
[trace_barrier_bit(rw
)];
150 what
|= bio_act
[trace_sync_bit(rw
)];
151 what
|= bio_act
[trace_ahead_bit(rw
)];
152 what
|= bio_act
[trace_meta_bit(rw
)];
155 if (unlikely(act_log_check(bt
, what
, sector
, pid
)))
159 * A word about the locking here - we disable interrupts to reserve
160 * some space in the relay per-cpu buffer, to prevent an irq
161 * from coming in and stepping on our toes. Once reserved, it's
162 * enough to get preemption disabled to prevent read of this data
163 * before we are through filling it. get_cpu()/put_cpu() does this
166 local_irq_save(flags
);
168 if (unlikely(tsk
->btrace_seq
!= blktrace_seq
))
169 trace_note_tsk(bt
, tsk
);
171 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + pdu_len
);
173 cpu
= smp_processor_id();
174 sequence
= per_cpu_ptr(bt
->sequence
, cpu
);
176 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
177 t
->sequence
= ++(*sequence
);
178 t
->time
= ktime_to_ns(ktime_get());
186 t
->pdu_len
= pdu_len
;
189 memcpy((void *) t
+ sizeof(*t
), pdu_data
, pdu_len
);
192 local_irq_restore(flags
);
195 EXPORT_SYMBOL_GPL(__blk_add_trace
);
197 static struct dentry
*blk_tree_root
;
198 static DEFINE_MUTEX(blk_tree_mutex
);
199 static unsigned int root_users
;
201 static inline void blk_remove_root(void)
204 debugfs_remove(blk_tree_root
);
205 blk_tree_root
= NULL
;
209 static void blk_remove_tree(struct dentry
*dir
)
211 mutex_lock(&blk_tree_mutex
);
213 if (--root_users
== 0)
215 mutex_unlock(&blk_tree_mutex
);
218 static struct dentry
*blk_create_tree(const char *blk_name
)
220 struct dentry
*dir
= NULL
;
223 mutex_lock(&blk_tree_mutex
);
225 if (!blk_tree_root
) {
226 blk_tree_root
= debugfs_create_dir("block", NULL
);
232 dir
= debugfs_create_dir(blk_name
, blk_tree_root
);
236 /* Delete root only if we created it */
242 mutex_unlock(&blk_tree_mutex
);
246 static void blk_trace_cleanup(struct blk_trace
*bt
)
248 relay_close(bt
->rchan
);
249 debugfs_remove(bt
->dropped_file
);
250 blk_remove_tree(bt
->dir
);
251 free_percpu(bt
->sequence
);
252 free_percpu(bt
->msg_data
);
256 int blk_trace_remove(struct request_queue
*q
)
258 struct blk_trace
*bt
;
260 bt
= xchg(&q
->blk_trace
, NULL
);
264 if (bt
->trace_state
== Blktrace_setup
||
265 bt
->trace_state
== Blktrace_stopped
)
266 blk_trace_cleanup(bt
);
270 EXPORT_SYMBOL_GPL(blk_trace_remove
);
272 static int blk_dropped_open(struct inode
*inode
, struct file
*filp
)
274 filp
->private_data
= inode
->i_private
;
279 static ssize_t
blk_dropped_read(struct file
*filp
, char __user
*buffer
,
280 size_t count
, loff_t
*ppos
)
282 struct blk_trace
*bt
= filp
->private_data
;
285 snprintf(buf
, sizeof(buf
), "%u\n", atomic_read(&bt
->dropped
));
287 return simple_read_from_buffer(buffer
, count
, ppos
, buf
, strlen(buf
));
290 static const struct file_operations blk_dropped_fops
= {
291 .owner
= THIS_MODULE
,
292 .open
= blk_dropped_open
,
293 .read
= blk_dropped_read
,
297 * Keep track of how many times we encountered a full subbuffer, to aid
298 * the user space app in telling how many lost events there were.
300 static int blk_subbuf_start_callback(struct rchan_buf
*buf
, void *subbuf
,
301 void *prev_subbuf
, size_t prev_padding
)
303 struct blk_trace
*bt
;
305 if (!relay_buf_full(buf
))
308 bt
= buf
->chan
->private_data
;
309 atomic_inc(&bt
->dropped
);
313 static int blk_remove_buf_file_callback(struct dentry
*dentry
)
315 debugfs_remove(dentry
);
319 static struct dentry
*blk_create_buf_file_callback(const char *filename
,
320 struct dentry
*parent
,
322 struct rchan_buf
*buf
,
325 return debugfs_create_file(filename
, mode
, parent
, buf
,
326 &relay_file_operations
);
329 static struct rchan_callbacks blk_relay_callbacks
= {
330 .subbuf_start
= blk_subbuf_start_callback
,
331 .create_buf_file
= blk_create_buf_file_callback
,
332 .remove_buf_file
= blk_remove_buf_file_callback
,
336 * Setup everything required to start tracing
338 int do_blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
339 struct blk_user_trace_setup
*buts
)
341 struct blk_trace
*old_bt
, *bt
= NULL
;
342 struct dentry
*dir
= NULL
;
345 if (!buts
->buf_size
|| !buts
->buf_nr
)
348 strcpy(buts
->name
, name
);
351 * some device names have larger paths - convert the slashes
352 * to underscores for this to work as expected
354 for (i
= 0; i
< strlen(buts
->name
); i
++)
355 if (buts
->name
[i
] == '/')
359 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
363 bt
->sequence
= alloc_percpu(unsigned long);
367 bt
->msg_data
= __alloc_percpu(BLK_TN_MAX_MSG
);
372 dir
= blk_create_tree(buts
->name
);
378 atomic_set(&bt
->dropped
, 0);
381 bt
->dropped_file
= debugfs_create_file("dropped", 0444, dir
, bt
, &blk_dropped_fops
);
382 if (!bt
->dropped_file
)
385 bt
->rchan
= relay_open("trace", dir
, buts
->buf_size
,
386 buts
->buf_nr
, &blk_relay_callbacks
, bt
);
390 bt
->act_mask
= buts
->act_mask
;
392 bt
->act_mask
= (u16
) -1;
394 bt
->start_lba
= buts
->start_lba
;
395 bt
->end_lba
= buts
->end_lba
;
400 bt
->trace_state
= Blktrace_setup
;
403 old_bt
= xchg(&q
->blk_trace
, bt
);
405 (void) xchg(&q
->blk_trace
, old_bt
);
412 blk_remove_tree(dir
);
414 if (bt
->dropped_file
)
415 debugfs_remove(bt
->dropped_file
);
416 free_percpu(bt
->sequence
);
417 free_percpu(bt
->msg_data
);
419 relay_close(bt
->rchan
);
425 int blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
428 struct blk_user_trace_setup buts
;
431 ret
= copy_from_user(&buts
, arg
, sizeof(buts
));
435 ret
= do_blk_trace_setup(q
, name
, dev
, &buts
);
439 if (copy_to_user(arg
, &buts
, sizeof(buts
)))
444 EXPORT_SYMBOL_GPL(blk_trace_setup
);
446 int blk_trace_startstop(struct request_queue
*q
, int start
)
448 struct blk_trace
*bt
;
451 if ((bt
= q
->blk_trace
) == NULL
)
455 * For starting a trace, we can transition from a setup or stopped
456 * trace. For stopping a trace, the state must be running
460 if (bt
->trace_state
== Blktrace_setup
||
461 bt
->trace_state
== Blktrace_stopped
) {
464 bt
->trace_state
= Blktrace_running
;
470 if (bt
->trace_state
== Blktrace_running
) {
471 bt
->trace_state
= Blktrace_stopped
;
472 relay_flush(bt
->rchan
);
479 EXPORT_SYMBOL_GPL(blk_trace_startstop
);
482 * blk_trace_ioctl: - handle the ioctls associated with tracing
483 * @bdev: the block device
484 * @cmd: the ioctl cmd
485 * @arg: the argument data, if any
488 int blk_trace_ioctl(struct block_device
*bdev
, unsigned cmd
, char __user
*arg
)
490 struct request_queue
*q
;
492 char b
[BDEVNAME_SIZE
];
494 q
= bdev_get_queue(bdev
);
498 mutex_lock(&bdev
->bd_mutex
);
503 ret
= blk_trace_setup(q
, b
, bdev
->bd_dev
, arg
);
508 ret
= blk_trace_startstop(q
, start
);
510 case BLKTRACETEARDOWN
:
511 ret
= blk_trace_remove(q
);
518 mutex_unlock(&bdev
->bd_mutex
);
523 * blk_trace_shutdown: - stop and cleanup trace structures
524 * @q: the request queue associated with the device
527 void blk_trace_shutdown(struct request_queue
*q
)
530 blk_trace_startstop(q
, 0);