2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <trace/block.h>
27 #include <asm/uaccess.h>
29 static unsigned int blktrace_seq __read_mostly
= 1;
31 /* Global reference count of probes */
32 static DEFINE_MUTEX(blk_probe_mutex
);
33 static atomic_t blk_probes_ref
= ATOMIC_INIT(0);
35 static int blk_register_tracepoints(void);
36 static void blk_unregister_tracepoints(void);
39 * Send out a notify message.
41 static void trace_note(struct blk_trace
*bt
, pid_t pid
, int action
,
42 const void *data
, size_t len
)
44 struct blk_io_trace
*t
;
46 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + len
);
48 const int cpu
= smp_processor_id();
50 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
51 t
->time
= ktime_to_ns(ktime_get());
57 memcpy((void *) t
+ sizeof(*t
), data
, len
);
62 * Send out a notify for this process, if we haven't done so since a trace
65 static void trace_note_tsk(struct blk_trace
*bt
, struct task_struct
*tsk
)
67 tsk
->btrace_seq
= blktrace_seq
;
68 trace_note(bt
, tsk
->pid
, BLK_TN_PROCESS
, tsk
->comm
, sizeof(tsk
->comm
));
71 static void trace_note_time(struct blk_trace
*bt
)
78 words
[0] = now
.tv_sec
;
79 words
[1] = now
.tv_nsec
;
81 local_irq_save(flags
);
82 trace_note(bt
, 0, BLK_TN_TIMESTAMP
, words
, sizeof(words
));
83 local_irq_restore(flags
);
86 void __trace_note_message(struct blk_trace
*bt
, const char *fmt
, ...)
93 local_irq_save(flags
);
94 buf
= per_cpu_ptr(bt
->msg_data
, smp_processor_id());
96 n
= vscnprintf(buf
, BLK_TN_MAX_MSG
, fmt
, args
);
99 trace_note(bt
, 0, BLK_TN_MESSAGE
, buf
, n
);
100 local_irq_restore(flags
);
102 EXPORT_SYMBOL_GPL(__trace_note_message
);
104 static int act_log_check(struct blk_trace
*bt
, u32 what
, sector_t sector
,
107 if (((bt
->act_mask
<< BLK_TC_SHIFT
) & what
) == 0)
109 if (sector
< bt
->start_lba
|| sector
> bt
->end_lba
)
111 if (bt
->pid
&& pid
!= bt
->pid
)
118 * Data direction bit lookup
120 static u32 ddir_act
[2] __read_mostly
= { BLK_TC_ACT(BLK_TC_READ
), BLK_TC_ACT(BLK_TC_WRITE
) };
122 /* The ilog2() calls fall out because they're constant */
123 #define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
124 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
127 * The worker for the various blk_add_trace*() types. Fills out a
128 * blk_io_trace structure and places it in a per-cpu subbuffer.
130 static void __blk_add_trace(struct blk_trace
*bt
, sector_t sector
, int bytes
,
131 int rw
, u32 what
, int error
, int pdu_len
, void *pdu_data
)
133 struct task_struct
*tsk
= current
;
134 struct blk_io_trace
*t
;
136 unsigned long *sequence
;
140 if (unlikely(bt
->trace_state
!= Blktrace_running
))
143 what
|= ddir_act
[rw
& WRITE
];
144 what
|= MASK_TC_BIT(rw
, BARRIER
);
145 what
|= MASK_TC_BIT(rw
, SYNC
);
146 what
|= MASK_TC_BIT(rw
, AHEAD
);
147 what
|= MASK_TC_BIT(rw
, META
);
148 what
|= MASK_TC_BIT(rw
, DISCARD
);
151 if (unlikely(act_log_check(bt
, what
, sector
, pid
)))
155 * A word about the locking here - we disable interrupts to reserve
156 * some space in the relay per-cpu buffer, to prevent an irq
157 * from coming in and stepping on our toes.
159 local_irq_save(flags
);
161 if (unlikely(tsk
->btrace_seq
!= blktrace_seq
))
162 trace_note_tsk(bt
, tsk
);
164 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + pdu_len
);
166 cpu
= smp_processor_id();
167 sequence
= per_cpu_ptr(bt
->sequence
, cpu
);
169 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
170 t
->sequence
= ++(*sequence
);
171 t
->time
= ktime_to_ns(ktime_get());
179 t
->pdu_len
= pdu_len
;
182 memcpy((void *) t
+ sizeof(*t
), pdu_data
, pdu_len
);
185 local_irq_restore(flags
);
188 static struct dentry
*blk_tree_root
;
189 static DEFINE_MUTEX(blk_tree_mutex
);
190 static unsigned int root_users
;
192 static inline void blk_remove_root(void)
195 debugfs_remove(blk_tree_root
);
196 blk_tree_root
= NULL
;
200 static void blk_remove_tree(struct dentry
*dir
)
202 mutex_lock(&blk_tree_mutex
);
204 if (--root_users
== 0)
206 mutex_unlock(&blk_tree_mutex
);
209 static struct dentry
*blk_create_tree(const char *blk_name
)
211 struct dentry
*dir
= NULL
;
214 mutex_lock(&blk_tree_mutex
);
216 if (!blk_tree_root
) {
217 blk_tree_root
= debugfs_create_dir("block", NULL
);
223 dir
= debugfs_create_dir(blk_name
, blk_tree_root
);
227 /* Delete root only if we created it */
233 mutex_unlock(&blk_tree_mutex
);
237 static void blk_trace_cleanup(struct blk_trace
*bt
)
239 relay_close(bt
->rchan
);
240 debugfs_remove(bt
->msg_file
);
241 debugfs_remove(bt
->dropped_file
);
242 blk_remove_tree(bt
->dir
);
243 free_percpu(bt
->sequence
);
244 free_percpu(bt
->msg_data
);
246 mutex_lock(&blk_probe_mutex
);
247 if (atomic_dec_and_test(&blk_probes_ref
))
248 blk_unregister_tracepoints();
249 mutex_unlock(&blk_probe_mutex
);
252 int blk_trace_remove(struct request_queue
*q
)
254 struct blk_trace
*bt
;
256 bt
= xchg(&q
->blk_trace
, NULL
);
260 if (bt
->trace_state
== Blktrace_setup
||
261 bt
->trace_state
== Blktrace_stopped
)
262 blk_trace_cleanup(bt
);
266 EXPORT_SYMBOL_GPL(blk_trace_remove
);
268 static int blk_dropped_open(struct inode
*inode
, struct file
*filp
)
270 filp
->private_data
= inode
->i_private
;
275 static ssize_t
blk_dropped_read(struct file
*filp
, char __user
*buffer
,
276 size_t count
, loff_t
*ppos
)
278 struct blk_trace
*bt
= filp
->private_data
;
281 snprintf(buf
, sizeof(buf
), "%u\n", atomic_read(&bt
->dropped
));
283 return simple_read_from_buffer(buffer
, count
, ppos
, buf
, strlen(buf
));
286 static const struct file_operations blk_dropped_fops
= {
287 .owner
= THIS_MODULE
,
288 .open
= blk_dropped_open
,
289 .read
= blk_dropped_read
,
292 static int blk_msg_open(struct inode
*inode
, struct file
*filp
)
294 filp
->private_data
= inode
->i_private
;
299 static ssize_t
blk_msg_write(struct file
*filp
, const char __user
*buffer
,
300 size_t count
, loff_t
*ppos
)
303 struct blk_trace
*bt
;
305 if (count
> BLK_TN_MAX_MSG
)
308 msg
= kmalloc(count
, GFP_KERNEL
);
312 if (copy_from_user(msg
, buffer
, count
)) {
317 bt
= filp
->private_data
;
318 __trace_note_message(bt
, "%s", msg
);
324 static const struct file_operations blk_msg_fops
= {
325 .owner
= THIS_MODULE
,
326 .open
= blk_msg_open
,
327 .write
= blk_msg_write
,
331 * Keep track of how many times we encountered a full subbuffer, to aid
332 * the user space app in telling how many lost events there were.
334 static int blk_subbuf_start_callback(struct rchan_buf
*buf
, void *subbuf
,
335 void *prev_subbuf
, size_t prev_padding
)
337 struct blk_trace
*bt
;
339 if (!relay_buf_full(buf
))
342 bt
= buf
->chan
->private_data
;
343 atomic_inc(&bt
->dropped
);
347 static int blk_remove_buf_file_callback(struct dentry
*dentry
)
349 debugfs_remove(dentry
);
353 static struct dentry
*blk_create_buf_file_callback(const char *filename
,
354 struct dentry
*parent
,
356 struct rchan_buf
*buf
,
359 return debugfs_create_file(filename
, mode
, parent
, buf
,
360 &relay_file_operations
);
363 static struct rchan_callbacks blk_relay_callbacks
= {
364 .subbuf_start
= blk_subbuf_start_callback
,
365 .create_buf_file
= blk_create_buf_file_callback
,
366 .remove_buf_file
= blk_remove_buf_file_callback
,
370 * Setup everything required to start tracing
372 int do_blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
373 struct blk_user_trace_setup
*buts
)
375 struct blk_trace
*old_bt
, *bt
= NULL
;
376 struct dentry
*dir
= NULL
;
379 if (!buts
->buf_size
|| !buts
->buf_nr
)
382 strncpy(buts
->name
, name
, BLKTRACE_BDEV_SIZE
);
383 buts
->name
[BLKTRACE_BDEV_SIZE
- 1] = '\0';
386 * some device names have larger paths - convert the slashes
387 * to underscores for this to work as expected
389 for (i
= 0; i
< strlen(buts
->name
); i
++)
390 if (buts
->name
[i
] == '/')
394 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
398 bt
->sequence
= alloc_percpu(unsigned long);
402 bt
->msg_data
= __alloc_percpu(BLK_TN_MAX_MSG
);
407 dir
= blk_create_tree(buts
->name
);
413 atomic_set(&bt
->dropped
, 0);
416 bt
->dropped_file
= debugfs_create_file("dropped", 0444, dir
, bt
, &blk_dropped_fops
);
417 if (!bt
->dropped_file
)
420 bt
->msg_file
= debugfs_create_file("msg", 0222, dir
, bt
, &blk_msg_fops
);
424 bt
->rchan
= relay_open("trace", dir
, buts
->buf_size
,
425 buts
->buf_nr
, &blk_relay_callbacks
, bt
);
429 bt
->act_mask
= buts
->act_mask
;
431 bt
->act_mask
= (u16
) -1;
433 bt
->start_lba
= buts
->start_lba
;
434 bt
->end_lba
= buts
->end_lba
;
439 bt
->trace_state
= Blktrace_setup
;
441 mutex_lock(&blk_probe_mutex
);
442 if (atomic_add_return(1, &blk_probes_ref
) == 1) {
443 ret
= blk_register_tracepoints();
447 mutex_unlock(&blk_probe_mutex
);
450 old_bt
= xchg(&q
->blk_trace
, bt
);
452 (void) xchg(&q
->blk_trace
, old_bt
);
458 atomic_dec(&blk_probes_ref
);
459 mutex_unlock(&blk_probe_mutex
);
462 blk_remove_tree(dir
);
465 debugfs_remove(bt
->msg_file
);
466 if (bt
->dropped_file
)
467 debugfs_remove(bt
->dropped_file
);
468 free_percpu(bt
->sequence
);
469 free_percpu(bt
->msg_data
);
471 relay_close(bt
->rchan
);
477 int blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
480 struct blk_user_trace_setup buts
;
483 ret
= copy_from_user(&buts
, arg
, sizeof(buts
));
487 ret
= do_blk_trace_setup(q
, name
, dev
, &buts
);
491 if (copy_to_user(arg
, &buts
, sizeof(buts
)))
496 EXPORT_SYMBOL_GPL(blk_trace_setup
);
498 int blk_trace_startstop(struct request_queue
*q
, int start
)
500 struct blk_trace
*bt
;
503 if ((bt
= q
->blk_trace
) == NULL
)
507 * For starting a trace, we can transition from a setup or stopped
508 * trace. For stopping a trace, the state must be running
512 if (bt
->trace_state
== Blktrace_setup
||
513 bt
->trace_state
== Blktrace_stopped
) {
516 bt
->trace_state
= Blktrace_running
;
522 if (bt
->trace_state
== Blktrace_running
) {
523 bt
->trace_state
= Blktrace_stopped
;
524 relay_flush(bt
->rchan
);
531 EXPORT_SYMBOL_GPL(blk_trace_startstop
);
534 * blk_trace_ioctl: - handle the ioctls associated with tracing
535 * @bdev: the block device
536 * @cmd: the ioctl cmd
537 * @arg: the argument data, if any
540 int blk_trace_ioctl(struct block_device
*bdev
, unsigned cmd
, char __user
*arg
)
542 struct request_queue
*q
;
544 char b
[BDEVNAME_SIZE
];
546 q
= bdev_get_queue(bdev
);
550 mutex_lock(&bdev
->bd_mutex
);
555 ret
= blk_trace_setup(q
, b
, bdev
->bd_dev
, arg
);
560 ret
= blk_trace_startstop(q
, start
);
562 case BLKTRACETEARDOWN
:
563 ret
= blk_trace_remove(q
);
570 mutex_unlock(&bdev
->bd_mutex
);
575 * blk_trace_shutdown: - stop and cleanup trace structures
576 * @q: the request queue associated with the device
579 void blk_trace_shutdown(struct request_queue
*q
)
582 blk_trace_startstop(q
, 0);
592 * blk_add_trace_rq - Add a trace for a request oriented action
593 * @q: queue the io is for
594 * @rq: the source request
598 * Records an action against a request. Will log the bio offset + size.
601 static void blk_add_trace_rq(struct request_queue
*q
, struct request
*rq
,
604 struct blk_trace
*bt
= q
->blk_trace
;
605 int rw
= rq
->cmd_flags
& 0x03;
610 if (blk_discard_rq(rq
))
611 rw
|= (1 << BIO_RW_DISCARD
);
613 if (blk_pc_request(rq
)) {
614 what
|= BLK_TC_ACT(BLK_TC_PC
);
615 __blk_add_trace(bt
, 0, rq
->data_len
, rw
, what
, rq
->errors
,
616 sizeof(rq
->cmd
), rq
->cmd
);
618 what
|= BLK_TC_ACT(BLK_TC_FS
);
619 __blk_add_trace(bt
, rq
->hard_sector
, rq
->hard_nr_sectors
<< 9,
620 rw
, what
, rq
->errors
, 0, NULL
);
624 static void blk_add_trace_rq_abort(struct request_queue
*q
, struct request
*rq
)
626 blk_add_trace_rq(q
, rq
, BLK_TA_ABORT
);
629 static void blk_add_trace_rq_insert(struct request_queue
*q
, struct request
*rq
)
631 blk_add_trace_rq(q
, rq
, BLK_TA_INSERT
);
634 static void blk_add_trace_rq_issue(struct request_queue
*q
, struct request
*rq
)
636 blk_add_trace_rq(q
, rq
, BLK_TA_ISSUE
);
639 static void blk_add_trace_rq_requeue(struct request_queue
*q
, struct request
*rq
)
641 blk_add_trace_rq(q
, rq
, BLK_TA_REQUEUE
);
644 static void blk_add_trace_rq_complete(struct request_queue
*q
, struct request
*rq
)
646 blk_add_trace_rq(q
, rq
, BLK_TA_COMPLETE
);
650 * blk_add_trace_bio - Add a trace for a bio oriented action
651 * @q: queue the io is for
652 * @bio: the source bio
656 * Records an action against a bio. Will log the bio offset + size.
659 static void blk_add_trace_bio(struct request_queue
*q
, struct bio
*bio
,
662 struct blk_trace
*bt
= q
->blk_trace
;
667 __blk_add_trace(bt
, bio
->bi_sector
, bio
->bi_size
, bio
->bi_rw
, what
,
668 !bio_flagged(bio
, BIO_UPTODATE
), 0, NULL
);
671 static void blk_add_trace_bio_bounce(struct request_queue
*q
, struct bio
*bio
)
673 blk_add_trace_bio(q
, bio
, BLK_TA_BOUNCE
);
676 static void blk_add_trace_bio_complete(struct request_queue
*q
, struct bio
*bio
)
678 blk_add_trace_bio(q
, bio
, BLK_TA_COMPLETE
);
681 static void blk_add_trace_bio_backmerge(struct request_queue
*q
, struct bio
*bio
)
683 blk_add_trace_bio(q
, bio
, BLK_TA_BACKMERGE
);
686 static void blk_add_trace_bio_frontmerge(struct request_queue
*q
, struct bio
*bio
)
688 blk_add_trace_bio(q
, bio
, BLK_TA_FRONTMERGE
);
691 static void blk_add_trace_bio_queue(struct request_queue
*q
, struct bio
*bio
)
693 blk_add_trace_bio(q
, bio
, BLK_TA_QUEUE
);
696 static void blk_add_trace_getrq(struct request_queue
*q
, struct bio
*bio
, int rw
)
699 blk_add_trace_bio(q
, bio
, BLK_TA_GETRQ
);
701 struct blk_trace
*bt
= q
->blk_trace
;
704 __blk_add_trace(bt
, 0, 0, rw
, BLK_TA_GETRQ
, 0, 0, NULL
);
709 static void blk_add_trace_sleeprq(struct request_queue
*q
, struct bio
*bio
, int rw
)
712 blk_add_trace_bio(q
, bio
, BLK_TA_SLEEPRQ
);
714 struct blk_trace
*bt
= q
->blk_trace
;
717 __blk_add_trace(bt
, 0, 0, rw
, BLK_TA_SLEEPRQ
, 0, 0, NULL
);
721 static void blk_add_trace_plug(struct request_queue
*q
)
723 struct blk_trace
*bt
= q
->blk_trace
;
726 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_PLUG
, 0, 0, NULL
);
729 static void blk_add_trace_unplug_io(struct request_queue
*q
)
731 struct blk_trace
*bt
= q
->blk_trace
;
734 unsigned int pdu
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
];
735 __be64 rpdu
= cpu_to_be64(pdu
);
737 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_UNPLUG_IO
, 0,
738 sizeof(rpdu
), &rpdu
);
742 static void blk_add_trace_unplug_timer(struct request_queue
*q
)
744 struct blk_trace
*bt
= q
->blk_trace
;
747 unsigned int pdu
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
];
748 __be64 rpdu
= cpu_to_be64(pdu
);
750 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_UNPLUG_TIMER
, 0,
751 sizeof(rpdu
), &rpdu
);
755 static void blk_add_trace_split(struct request_queue
*q
, struct bio
*bio
,
758 struct blk_trace
*bt
= q
->blk_trace
;
761 __be64 rpdu
= cpu_to_be64(pdu
);
763 __blk_add_trace(bt
, bio
->bi_sector
, bio
->bi_size
, bio
->bi_rw
,
764 BLK_TA_SPLIT
, !bio_flagged(bio
, BIO_UPTODATE
),
765 sizeof(rpdu
), &rpdu
);
770 * blk_add_trace_remap - Add a trace for a remap operation
771 * @q: queue the io is for
772 * @bio: the source bio
773 * @dev: target device
774 * @from: source sector
778 * Device mapper or raid target sometimes need to split a bio because
779 * it spans a stripe (or similar). Add a trace for that action.
782 static void blk_add_trace_remap(struct request_queue
*q
, struct bio
*bio
,
783 dev_t dev
, sector_t from
, sector_t to
)
785 struct blk_trace
*bt
= q
->blk_trace
;
786 struct blk_io_trace_remap r
;
791 r
.device
= cpu_to_be32(dev
);
792 r
.device_from
= cpu_to_be32(bio
->bi_bdev
->bd_dev
);
793 r
.sector
= cpu_to_be64(to
);
795 __blk_add_trace(bt
, from
, bio
->bi_size
, bio
->bi_rw
, BLK_TA_REMAP
,
796 !bio_flagged(bio
, BIO_UPTODATE
), sizeof(r
), &r
);
800 * blk_add_driver_data - Add binary message with driver-specific data
801 * @q: queue the io is for
803 * @data: driver-specific data
804 * @len: length of driver-specific data
807 * Some drivers might want to write driver-specific data per request.
810 void blk_add_driver_data(struct request_queue
*q
,
812 void *data
, size_t len
)
814 struct blk_trace
*bt
= q
->blk_trace
;
819 if (blk_pc_request(rq
))
820 __blk_add_trace(bt
, 0, rq
->data_len
, 0, BLK_TA_DRV_DATA
,
821 rq
->errors
, len
, data
);
823 __blk_add_trace(bt
, rq
->hard_sector
, rq
->hard_nr_sectors
<< 9,
824 0, BLK_TA_DRV_DATA
, rq
->errors
, len
, data
);
826 EXPORT_SYMBOL_GPL(blk_add_driver_data
);
828 static int blk_register_tracepoints(void)
832 ret
= register_trace_block_rq_abort(blk_add_trace_rq_abort
);
834 ret
= register_trace_block_rq_insert(blk_add_trace_rq_insert
);
836 ret
= register_trace_block_rq_issue(blk_add_trace_rq_issue
);
838 ret
= register_trace_block_rq_requeue(blk_add_trace_rq_requeue
);
840 ret
= register_trace_block_rq_complete(blk_add_trace_rq_complete
);
842 ret
= register_trace_block_bio_bounce(blk_add_trace_bio_bounce
);
844 ret
= register_trace_block_bio_complete(blk_add_trace_bio_complete
);
846 ret
= register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
);
848 ret
= register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
);
850 ret
= register_trace_block_bio_queue(blk_add_trace_bio_queue
);
852 ret
= register_trace_block_getrq(blk_add_trace_getrq
);
854 ret
= register_trace_block_sleeprq(blk_add_trace_sleeprq
);
856 ret
= register_trace_block_plug(blk_add_trace_plug
);
858 ret
= register_trace_block_unplug_timer(blk_add_trace_unplug_timer
);
860 ret
= register_trace_block_unplug_io(blk_add_trace_unplug_io
);
862 ret
= register_trace_block_split(blk_add_trace_split
);
864 ret
= register_trace_block_remap(blk_add_trace_remap
);
869 static void blk_unregister_tracepoints(void)
871 unregister_trace_block_remap(blk_add_trace_remap
);
872 unregister_trace_block_split(blk_add_trace_split
);
873 unregister_trace_block_unplug_io(blk_add_trace_unplug_io
);
874 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer
);
875 unregister_trace_block_plug(blk_add_trace_plug
);
876 unregister_trace_block_sleeprq(blk_add_trace_sleeprq
);
877 unregister_trace_block_getrq(blk_add_trace_getrq
);
878 unregister_trace_block_bio_queue(blk_add_trace_bio_queue
);
879 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
);
880 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
);
881 unregister_trace_block_bio_complete(blk_add_trace_bio_complete
);
882 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce
);
883 unregister_trace_block_rq_complete(blk_add_trace_rq_complete
);
884 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue
);
885 unregister_trace_block_rq_issue(blk_add_trace_rq_issue
);
886 unregister_trace_block_rq_insert(blk_add_trace_rq_insert
);
887 unregister_trace_block_rq_abort(blk_add_trace_rq_abort
);
889 tracepoint_synchronize_unregister();