blktrace: support discard requests
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / block / blktrace.c
blob7495a84353e44dff8ca71d36f8ed8e2209764097
1 /*
2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <asm/uaccess.h>
28 static unsigned int blktrace_seq __read_mostly = 1;
31 * Send out a notify message.
33 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
34 const void *data, size_t len)
36 struct blk_io_trace *t;
38 t = relay_reserve(bt->rchan, sizeof(*t) + len);
39 if (t) {
40 const int cpu = smp_processor_id();
42 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
43 t->time = ktime_to_ns(ktime_get());
44 t->device = bt->dev;
45 t->action = action;
46 t->pid = pid;
47 t->cpu = cpu;
48 t->pdu_len = len;
49 memcpy((void *) t + sizeof(*t), data, len);
54 * Send out a notify for this process, if we haven't done so since a trace
55 * started
57 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
59 tsk->btrace_seq = blktrace_seq;
60 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
63 static void trace_note_time(struct blk_trace *bt)
65 struct timespec now;
66 unsigned long flags;
67 u32 words[2];
69 getnstimeofday(&now);
70 words[0] = now.tv_sec;
71 words[1] = now.tv_nsec;
73 local_irq_save(flags);
74 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
75 local_irq_restore(flags);
78 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
80 int n;
81 va_list args;
82 unsigned long flags;
83 char *buf;
85 local_irq_save(flags);
86 buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
87 va_start(args, fmt);
88 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
89 va_end(args);
91 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
92 local_irq_restore(flags);
94 EXPORT_SYMBOL_GPL(__trace_note_message);
96 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
97 pid_t pid)
99 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
100 return 1;
101 if (sector < bt->start_lba || sector > bt->end_lba)
102 return 1;
103 if (bt->pid && pid != bt->pid)
104 return 1;
106 return 0;
110 * Data direction bit lookup
112 static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
115 * Bio action bits of interest
117 static u32 bio_act[17] __read_mostly = {
118 [1] = BLK_TC_ACT(BLK_TC_BARRIER),
119 [2] = BLK_TC_ACT(BLK_TC_SYNC),
120 [4] = BLK_TC_ACT(BLK_TC_AHEAD),
121 [8] = BLK_TC_ACT(BLK_TC_META),
122 [16] = BLK_TC_ACT(BLK_TC_DISCARD)
126 * More could be added as needed, taking care to increment the decrementer
127 * to get correct indexing
129 #define trace_barrier_bit(rw) \
130 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
131 #define trace_sync_bit(rw) \
132 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
133 #define trace_ahead_bit(rw) \
134 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
135 #define trace_meta_bit(rw) \
136 (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
137 #define trace_discard_bit(rw) \
138 (((rw) & (1 << BIO_RW_DISCARD)) >> (BIO_RW_DISCARD - 4))
141 * The worker for the various blk_add_trace*() types. Fills out a
142 * blk_io_trace structure and places it in a per-cpu subbuffer.
144 void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
145 int rw, u32 what, int error, int pdu_len, void *pdu_data)
147 struct task_struct *tsk = current;
148 struct blk_io_trace *t;
149 unsigned long flags;
150 unsigned long *sequence;
151 pid_t pid;
152 int cpu;
154 if (unlikely(bt->trace_state != Blktrace_running))
155 return;
157 what |= ddir_act[rw & WRITE];
158 what |= bio_act[trace_barrier_bit(rw)];
159 what |= bio_act[trace_sync_bit(rw)];
160 what |= bio_act[trace_ahead_bit(rw)];
161 what |= bio_act[trace_meta_bit(rw)];
162 what |= bio_act[trace_discard_bit(rw)];
164 pid = tsk->pid;
165 if (unlikely(act_log_check(bt, what, sector, pid)))
166 return;
169 * A word about the locking here - we disable interrupts to reserve
170 * some space in the relay per-cpu buffer, to prevent an irq
171 * from coming in and stepping on our toes.
173 local_irq_save(flags);
175 if (unlikely(tsk->btrace_seq != blktrace_seq))
176 trace_note_tsk(bt, tsk);
178 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
179 if (t) {
180 cpu = smp_processor_id();
181 sequence = per_cpu_ptr(bt->sequence, cpu);
183 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
184 t->sequence = ++(*sequence);
185 t->time = ktime_to_ns(ktime_get());
186 t->sector = sector;
187 t->bytes = bytes;
188 t->action = what;
189 t->pid = pid;
190 t->device = bt->dev;
191 t->cpu = cpu;
192 t->error = error;
193 t->pdu_len = pdu_len;
195 if (pdu_len)
196 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
199 local_irq_restore(flags);
202 EXPORT_SYMBOL_GPL(__blk_add_trace);
204 static struct dentry *blk_tree_root;
205 static DEFINE_MUTEX(blk_tree_mutex);
206 static unsigned int root_users;
208 static inline void blk_remove_root(void)
210 if (blk_tree_root) {
211 debugfs_remove(blk_tree_root);
212 blk_tree_root = NULL;
216 static void blk_remove_tree(struct dentry *dir)
218 mutex_lock(&blk_tree_mutex);
219 debugfs_remove(dir);
220 if (--root_users == 0)
221 blk_remove_root();
222 mutex_unlock(&blk_tree_mutex);
225 static struct dentry *blk_create_tree(const char *blk_name)
227 struct dentry *dir = NULL;
228 int created = 0;
230 mutex_lock(&blk_tree_mutex);
232 if (!blk_tree_root) {
233 blk_tree_root = debugfs_create_dir("block", NULL);
234 if (!blk_tree_root)
235 goto err;
236 created = 1;
239 dir = debugfs_create_dir(blk_name, blk_tree_root);
240 if (dir)
241 root_users++;
242 else {
243 /* Delete root only if we created it */
244 if (created)
245 blk_remove_root();
248 err:
249 mutex_unlock(&blk_tree_mutex);
250 return dir;
253 static void blk_trace_cleanup(struct blk_trace *bt)
255 relay_close(bt->rchan);
256 debugfs_remove(bt->msg_file);
257 debugfs_remove(bt->dropped_file);
258 blk_remove_tree(bt->dir);
259 free_percpu(bt->sequence);
260 free_percpu(bt->msg_data);
261 kfree(bt);
264 int blk_trace_remove(struct request_queue *q)
266 struct blk_trace *bt;
268 bt = xchg(&q->blk_trace, NULL);
269 if (!bt)
270 return -EINVAL;
272 if (bt->trace_state == Blktrace_setup ||
273 bt->trace_state == Blktrace_stopped)
274 blk_trace_cleanup(bt);
276 return 0;
278 EXPORT_SYMBOL_GPL(blk_trace_remove);
280 static int blk_dropped_open(struct inode *inode, struct file *filp)
282 filp->private_data = inode->i_private;
284 return 0;
287 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
288 size_t count, loff_t *ppos)
290 struct blk_trace *bt = filp->private_data;
291 char buf[16];
293 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
295 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
298 static const struct file_operations blk_dropped_fops = {
299 .owner = THIS_MODULE,
300 .open = blk_dropped_open,
301 .read = blk_dropped_read,
304 static int blk_msg_open(struct inode *inode, struct file *filp)
306 filp->private_data = inode->i_private;
308 return 0;
311 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
312 size_t count, loff_t *ppos)
314 char *msg;
315 struct blk_trace *bt;
317 if (count > BLK_TN_MAX_MSG)
318 return -EINVAL;
320 msg = kmalloc(count, GFP_KERNEL);
321 if (msg == NULL)
322 return -ENOMEM;
324 if (copy_from_user(msg, buffer, count)) {
325 kfree(msg);
326 return -EFAULT;
329 bt = filp->private_data;
330 __trace_note_message(bt, "%s", msg);
331 kfree(msg);
333 return count;
336 static const struct file_operations blk_msg_fops = {
337 .owner = THIS_MODULE,
338 .open = blk_msg_open,
339 .write = blk_msg_write,
343 * Keep track of how many times we encountered a full subbuffer, to aid
344 * the user space app in telling how many lost events there were.
346 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
347 void *prev_subbuf, size_t prev_padding)
349 struct blk_trace *bt;
351 if (!relay_buf_full(buf))
352 return 1;
354 bt = buf->chan->private_data;
355 atomic_inc(&bt->dropped);
356 return 0;
359 static int blk_remove_buf_file_callback(struct dentry *dentry)
361 debugfs_remove(dentry);
362 return 0;
365 static struct dentry *blk_create_buf_file_callback(const char *filename,
366 struct dentry *parent,
367 int mode,
368 struct rchan_buf *buf,
369 int *is_global)
371 return debugfs_create_file(filename, mode, parent, buf,
372 &relay_file_operations);
375 static struct rchan_callbacks blk_relay_callbacks = {
376 .subbuf_start = blk_subbuf_start_callback,
377 .create_buf_file = blk_create_buf_file_callback,
378 .remove_buf_file = blk_remove_buf_file_callback,
382 * Setup everything required to start tracing
384 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
385 struct blk_user_trace_setup *buts)
387 struct blk_trace *old_bt, *bt = NULL;
388 struct dentry *dir = NULL;
389 int ret, i;
391 if (!buts->buf_size || !buts->buf_nr)
392 return -EINVAL;
394 strcpy(buts->name, name);
397 * some device names have larger paths - convert the slashes
398 * to underscores for this to work as expected
400 for (i = 0; i < strlen(buts->name); i++)
401 if (buts->name[i] == '/')
402 buts->name[i] = '_';
404 ret = -ENOMEM;
405 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
406 if (!bt)
407 goto err;
409 bt->sequence = alloc_percpu(unsigned long);
410 if (!bt->sequence)
411 goto err;
413 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
414 if (!bt->msg_data)
415 goto err;
417 ret = -ENOENT;
418 dir = blk_create_tree(buts->name);
419 if (!dir)
420 goto err;
422 bt->dir = dir;
423 bt->dev = dev;
424 atomic_set(&bt->dropped, 0);
426 ret = -EIO;
427 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
428 if (!bt->dropped_file)
429 goto err;
431 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
432 if (!bt->msg_file)
433 goto err;
435 bt->rchan = relay_open("trace", dir, buts->buf_size,
436 buts->buf_nr, &blk_relay_callbacks, bt);
437 if (!bt->rchan)
438 goto err;
440 bt->act_mask = buts->act_mask;
441 if (!bt->act_mask)
442 bt->act_mask = (u16) -1;
444 bt->start_lba = buts->start_lba;
445 bt->end_lba = buts->end_lba;
446 if (!bt->end_lba)
447 bt->end_lba = -1ULL;
449 bt->pid = buts->pid;
450 bt->trace_state = Blktrace_setup;
452 ret = -EBUSY;
453 old_bt = xchg(&q->blk_trace, bt);
454 if (old_bt) {
455 (void) xchg(&q->blk_trace, old_bt);
456 goto err;
459 return 0;
460 err:
461 if (dir)
462 blk_remove_tree(dir);
463 if (bt) {
464 if (bt->msg_file)
465 debugfs_remove(bt->msg_file);
466 if (bt->dropped_file)
467 debugfs_remove(bt->dropped_file);
468 free_percpu(bt->sequence);
469 free_percpu(bt->msg_data);
470 if (bt->rchan)
471 relay_close(bt->rchan);
472 kfree(bt);
474 return ret;
477 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
478 char __user *arg)
480 struct blk_user_trace_setup buts;
481 int ret;
483 ret = copy_from_user(&buts, arg, sizeof(buts));
484 if (ret)
485 return -EFAULT;
487 ret = do_blk_trace_setup(q, name, dev, &buts);
488 if (ret)
489 return ret;
491 if (copy_to_user(arg, &buts, sizeof(buts)))
492 return -EFAULT;
494 return 0;
496 EXPORT_SYMBOL_GPL(blk_trace_setup);
498 int blk_trace_startstop(struct request_queue *q, int start)
500 struct blk_trace *bt;
501 int ret;
503 if ((bt = q->blk_trace) == NULL)
504 return -EINVAL;
507 * For starting a trace, we can transition from a setup or stopped
508 * trace. For stopping a trace, the state must be running
510 ret = -EINVAL;
511 if (start) {
512 if (bt->trace_state == Blktrace_setup ||
513 bt->trace_state == Blktrace_stopped) {
514 blktrace_seq++;
515 smp_mb();
516 bt->trace_state = Blktrace_running;
518 trace_note_time(bt);
519 ret = 0;
521 } else {
522 if (bt->trace_state == Blktrace_running) {
523 bt->trace_state = Blktrace_stopped;
524 relay_flush(bt->rchan);
525 ret = 0;
529 return ret;
531 EXPORT_SYMBOL_GPL(blk_trace_startstop);
534 * blk_trace_ioctl: - handle the ioctls associated with tracing
535 * @bdev: the block device
536 * @cmd: the ioctl cmd
537 * @arg: the argument data, if any
540 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
542 struct request_queue *q;
543 int ret, start = 0;
544 char b[BDEVNAME_SIZE];
546 q = bdev_get_queue(bdev);
547 if (!q)
548 return -ENXIO;
550 mutex_lock(&bdev->bd_mutex);
552 switch (cmd) {
553 case BLKTRACESETUP:
554 bdevname(bdev, b);
555 ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
556 break;
557 case BLKTRACESTART:
558 start = 1;
559 case BLKTRACESTOP:
560 ret = blk_trace_startstop(q, start);
561 break;
562 case BLKTRACETEARDOWN:
563 ret = blk_trace_remove(q);
564 break;
565 default:
566 ret = -ENOTTY;
567 break;
570 mutex_unlock(&bdev->bd_mutex);
571 return ret;
575 * blk_trace_shutdown: - stop and cleanup trace structures
576 * @q: the request queue associated with the device
579 void blk_trace_shutdown(struct request_queue *q)
581 if (q->blk_trace) {
582 blk_trace_startstop(q, 0);
583 blk_trace_remove(q);