[PATCH] x86: don't use cpuid.2 to determine cache info if cpuid.4 is supported
[linux-2.6/mini2440.git] / block / blktrace.c
blob36f3a172275f89fbd1dca4692164cdca0127eced
1 /*
2 * Copyright (C) 2006 Jens Axboe <axboe@suse.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/config.h>
19 #include <linux/kernel.h>
20 #include <linux/blkdev.h>
21 #include <linux/blktrace_api.h>
22 #include <linux/percpu.h>
23 #include <linux/init.h>
24 #include <linux/mutex.h>
25 #include <linux/debugfs.h>
26 #include <asm/uaccess.h>
28 static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
29 static unsigned int blktrace_seq __read_mostly = 1;
32 * Send out a notify for this process, if we haven't done so since a trace
33 * started
35 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
37 struct blk_io_trace *t;
39 t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm));
40 if (t) {
41 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
42 t->device = bt->dev;
43 t->action = BLK_TC_ACT(BLK_TC_NOTIFY);
44 t->pid = tsk->pid;
45 t->cpu = smp_processor_id();
46 t->pdu_len = sizeof(tsk->comm);
47 memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len);
48 tsk->btrace_seq = blktrace_seq;
52 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
53 pid_t pid)
55 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
56 return 1;
57 if (sector < bt->start_lba || sector > bt->end_lba)
58 return 1;
59 if (bt->pid && pid != bt->pid)
60 return 1;
62 return 0;
66 * Data direction bit lookup
68 static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
71 * Bio action bits of interest
73 static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC) };
76 * More could be added as needed, taking care to increment the decrementer
77 * to get correct indexing
79 #define trace_barrier_bit(rw) \
80 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
81 #define trace_sync_bit(rw) \
82 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
85 * The worker for the various blk_add_trace*() types. Fills out a
86 * blk_io_trace structure and places it in a per-cpu subbuffer.
88 void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
89 int rw, u32 what, int error, int pdu_len, void *pdu_data)
91 struct task_struct *tsk = current;
92 struct blk_io_trace *t;
93 unsigned long flags;
94 unsigned long *sequence;
95 pid_t pid;
96 int cpu;
98 if (unlikely(bt->trace_state != Blktrace_running))
99 return;
101 what |= ddir_act[rw & WRITE];
102 what |= bio_act[trace_barrier_bit(rw)];
103 what |= bio_act[trace_sync_bit(rw)];
105 pid = tsk->pid;
106 if (unlikely(act_log_check(bt, what, sector, pid)))
107 return;
110 * A word about the locking here - we disable interrupts to reserve
111 * some space in the relay per-cpu buffer, to prevent an irq
112 * from coming in and stepping on our toes. Once reserved, it's
113 * enough to get preemption disabled to prevent read of this data
114 * before we are through filling it. get_cpu()/put_cpu() does this
115 * for us
117 local_irq_save(flags);
119 if (unlikely(tsk->btrace_seq != blktrace_seq))
120 trace_note_tsk(bt, tsk);
122 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
123 if (t) {
124 cpu = smp_processor_id();
125 sequence = per_cpu_ptr(bt->sequence, cpu);
127 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
128 t->sequence = ++(*sequence);
129 t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
130 t->sector = sector;
131 t->bytes = bytes;
132 t->action = what;
133 t->pid = pid;
134 t->device = bt->dev;
135 t->cpu = cpu;
136 t->error = error;
137 t->pdu_len = pdu_len;
139 if (pdu_len)
140 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
143 local_irq_restore(flags);
146 EXPORT_SYMBOL_GPL(__blk_add_trace);
148 static struct dentry *blk_tree_root;
149 static struct mutex blk_tree_mutex;
150 static unsigned int root_users;
152 static inline void blk_remove_root(void)
154 if (blk_tree_root) {
155 debugfs_remove(blk_tree_root);
156 blk_tree_root = NULL;
160 static void blk_remove_tree(struct dentry *dir)
162 mutex_lock(&blk_tree_mutex);
163 debugfs_remove(dir);
164 if (--root_users == 0)
165 blk_remove_root();
166 mutex_unlock(&blk_tree_mutex);
169 static struct dentry *blk_create_tree(const char *blk_name)
171 struct dentry *dir = NULL;
173 mutex_lock(&blk_tree_mutex);
175 if (!blk_tree_root) {
176 blk_tree_root = debugfs_create_dir("block", NULL);
177 if (!blk_tree_root)
178 goto err;
181 dir = debugfs_create_dir(blk_name, blk_tree_root);
182 if (dir)
183 root_users++;
184 else
185 blk_remove_root();
187 err:
188 mutex_unlock(&blk_tree_mutex);
189 return dir;
192 static void blk_trace_cleanup(struct blk_trace *bt)
194 relay_close(bt->rchan);
195 debugfs_remove(bt->dropped_file);
196 blk_remove_tree(bt->dir);
197 free_percpu(bt->sequence);
198 kfree(bt);
201 static int blk_trace_remove(request_queue_t *q)
203 struct blk_trace *bt;
205 bt = xchg(&q->blk_trace, NULL);
206 if (!bt)
207 return -EINVAL;
209 if (bt->trace_state == Blktrace_setup ||
210 bt->trace_state == Blktrace_stopped)
211 blk_trace_cleanup(bt);
213 return 0;
216 static int blk_dropped_open(struct inode *inode, struct file *filp)
218 filp->private_data = inode->u.generic_ip;
220 return 0;
223 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
224 size_t count, loff_t *ppos)
226 struct blk_trace *bt = filp->private_data;
227 char buf[16];
229 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
231 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
234 static struct file_operations blk_dropped_fops = {
235 .owner = THIS_MODULE,
236 .open = blk_dropped_open,
237 .read = blk_dropped_read,
241 * Keep track of how many times we encountered a full subbuffer, to aid
242 * the user space app in telling how many lost events there were.
244 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
245 void *prev_subbuf, size_t prev_padding)
247 struct blk_trace *bt;
249 if (!relay_buf_full(buf))
250 return 1;
252 bt = buf->chan->private_data;
253 atomic_inc(&bt->dropped);
254 return 0;
257 static int blk_remove_buf_file_callback(struct dentry *dentry)
259 debugfs_remove(dentry);
260 return 0;
263 static struct dentry *blk_create_buf_file_callback(const char *filename,
264 struct dentry *parent,
265 int mode,
266 struct rchan_buf *buf,
267 int *is_global)
269 return debugfs_create_file(filename, mode, parent, buf,
270 &relay_file_operations);
273 static struct rchan_callbacks blk_relay_callbacks = {
274 .subbuf_start = blk_subbuf_start_callback,
275 .create_buf_file = blk_create_buf_file_callback,
276 .remove_buf_file = blk_remove_buf_file_callback,
280 * Setup everything required to start tracing
282 static int blk_trace_setup(request_queue_t *q, struct block_device *bdev,
283 char __user *arg)
285 struct blk_user_trace_setup buts;
286 struct blk_trace *old_bt, *bt = NULL;
287 struct dentry *dir = NULL;
288 char b[BDEVNAME_SIZE];
289 int ret, i;
291 if (copy_from_user(&buts, arg, sizeof(buts)))
292 return -EFAULT;
294 if (!buts.buf_size || !buts.buf_nr)
295 return -EINVAL;
297 strcpy(buts.name, bdevname(bdev, b));
300 * some device names have larger paths - convert the slashes
301 * to underscores for this to work as expected
303 for (i = 0; i < strlen(buts.name); i++)
304 if (buts.name[i] == '/')
305 buts.name[i] = '_';
307 if (copy_to_user(arg, &buts, sizeof(buts)))
308 return -EFAULT;
310 ret = -ENOMEM;
311 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
312 if (!bt)
313 goto err;
315 bt->sequence = alloc_percpu(unsigned long);
316 if (!bt->sequence)
317 goto err;
319 ret = -ENOENT;
320 dir = blk_create_tree(buts.name);
321 if (!dir)
322 goto err;
324 bt->dir = dir;
325 bt->dev = bdev->bd_dev;
326 atomic_set(&bt->dropped, 0);
328 ret = -EIO;
329 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
330 if (!bt->dropped_file)
331 goto err;
333 bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks);
334 if (!bt->rchan)
335 goto err;
336 bt->rchan->private_data = bt;
338 bt->act_mask = buts.act_mask;
339 if (!bt->act_mask)
340 bt->act_mask = (u16) -1;
342 bt->start_lba = buts.start_lba;
343 bt->end_lba = buts.end_lba;
344 if (!bt->end_lba)
345 bt->end_lba = -1ULL;
347 bt->pid = buts.pid;
348 bt->trace_state = Blktrace_setup;
350 ret = -EBUSY;
351 old_bt = xchg(&q->blk_trace, bt);
352 if (old_bt) {
353 (void) xchg(&q->blk_trace, old_bt);
354 goto err;
357 return 0;
358 err:
359 if (dir)
360 blk_remove_tree(dir);
361 if (bt) {
362 if (bt->dropped_file)
363 debugfs_remove(bt->dropped_file);
364 if (bt->sequence)
365 free_percpu(bt->sequence);
366 if (bt->rchan)
367 relay_close(bt->rchan);
368 kfree(bt);
370 return ret;
373 static int blk_trace_startstop(request_queue_t *q, int start)
375 struct blk_trace *bt;
376 int ret;
378 if ((bt = q->blk_trace) == NULL)
379 return -EINVAL;
382 * For starting a trace, we can transition from a setup or stopped
383 * trace. For stopping a trace, the state must be running
385 ret = -EINVAL;
386 if (start) {
387 if (bt->trace_state == Blktrace_setup ||
388 bt->trace_state == Blktrace_stopped) {
389 blktrace_seq++;
390 smp_mb();
391 bt->trace_state = Blktrace_running;
392 ret = 0;
394 } else {
395 if (bt->trace_state == Blktrace_running) {
396 bt->trace_state = Blktrace_stopped;
397 relay_flush(bt->rchan);
398 ret = 0;
402 return ret;
406 * blk_trace_ioctl: - handle the ioctls associated with tracing
407 * @bdev: the block device
408 * @cmd: the ioctl cmd
409 * @arg: the argument data, if any
412 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
414 request_queue_t *q;
415 int ret, start = 0;
417 q = bdev_get_queue(bdev);
418 if (!q)
419 return -ENXIO;
421 mutex_lock(&bdev->bd_mutex);
423 switch (cmd) {
424 case BLKTRACESETUP:
425 ret = blk_trace_setup(q, bdev, arg);
426 break;
427 case BLKTRACESTART:
428 start = 1;
429 case BLKTRACESTOP:
430 ret = blk_trace_startstop(q, start);
431 break;
432 case BLKTRACETEARDOWN:
433 ret = blk_trace_remove(q);
434 break;
435 default:
436 ret = -ENOTTY;
437 break;
440 mutex_unlock(&bdev->bd_mutex);
441 return ret;
445 * blk_trace_shutdown: - stop and cleanup trace structures
446 * @q: the request queue associated with the device
449 void blk_trace_shutdown(request_queue_t *q)
451 blk_trace_startstop(q, 0);
452 blk_trace_remove(q);
456 * Average offset over two calls to sched_clock() with a gettimeofday()
457 * in the middle
459 static void blk_check_time(unsigned long long *t)
461 unsigned long long a, b;
462 struct timeval tv;
464 a = sched_clock();
465 do_gettimeofday(&tv);
466 b = sched_clock();
468 *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
469 *t -= (a + b) / 2;
472 static void blk_trace_check_cpu_time(void *data)
474 unsigned long long *t;
475 int cpu = get_cpu();
477 t = &per_cpu(blk_trace_cpu_offset, cpu);
480 * Just call it twice, hopefully the second call will be cache hot
481 * and a little more precise
483 blk_check_time(t);
484 blk_check_time(t);
486 put_cpu();
490 * Call blk_trace_check_cpu_time() on each CPU to calibrate our inter-CPU
491 * timings
493 static void blk_trace_calibrate_offsets(void)
495 unsigned long flags;
497 smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
498 local_irq_save(flags);
499 blk_trace_check_cpu_time(NULL);
500 local_irq_restore(flags);
503 static void blk_trace_set_ht_offsets(void)
505 #if defined(CONFIG_SCHED_SMT)
506 int cpu, i;
509 * now make sure HT siblings have the same time offset
511 preempt_disable();
512 for_each_online_cpu(cpu) {
513 unsigned long long *cpu_off, *sibling_off;
515 for_each_cpu_mask(i, cpu_sibling_map[cpu]) {
516 if (i == cpu)
517 continue;
519 cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
520 sibling_off = &per_cpu(blk_trace_cpu_offset, i);
521 *sibling_off = *cpu_off;
524 preempt_enable();
525 #endif
528 static __init int blk_trace_init(void)
530 mutex_init(&blk_tree_mutex);
531 blk_trace_calibrate_offsets();
532 blk_trace_set_ht_offsets();
534 return 0;
537 module_init(blk_trace_init);