License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6/btrfs-unstable.git] / include / trace / events / bcache.h
blob2cbd6e42ad835ea22941963b1443a814dcac4d40
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcache
5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHE_H
8 #include <linux/tracepoint.h>
10 DECLARE_EVENT_CLASS(bcache_request,
11 TP_PROTO(struct bcache_device *d, struct bio *bio),
12 TP_ARGS(d, bio),
14 TP_STRUCT__entry(
15 __field(dev_t, dev )
16 __field(unsigned int, orig_major )
17 __field(unsigned int, orig_minor )
18 __field(sector_t, sector )
19 __field(dev_t, orig_sector )
20 __field(unsigned int, nr_sector )
21 __array(char, rwbs, 6 )
24 TP_fast_assign(
25 __entry->dev = bio_dev(bio);
26 __entry->orig_major = d->disk->major;
27 __entry->orig_minor = d->disk->first_minor;
28 __entry->sector = bio->bi_iter.bi_sector;
29 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
30 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
35 MAJOR(__entry->dev), MINOR(__entry->dev),
36 __entry->rwbs, (unsigned long long)__entry->sector,
37 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
38 (unsigned long long)__entry->orig_sector)
41 DECLARE_EVENT_CLASS(bkey,
42 TP_PROTO(struct bkey *k),
43 TP_ARGS(k),
45 TP_STRUCT__entry(
46 __field(u32, size )
47 __field(u32, inode )
48 __field(u64, offset )
49 __field(bool, dirty )
52 TP_fast_assign(
53 __entry->inode = KEY_INODE(k);
54 __entry->offset = KEY_OFFSET(k);
55 __entry->size = KEY_SIZE(k);
56 __entry->dirty = KEY_DIRTY(k);
59 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
60 __entry->offset, __entry->size, __entry->dirty)
63 DECLARE_EVENT_CLASS(btree_node,
64 TP_PROTO(struct btree *b),
65 TP_ARGS(b),
67 TP_STRUCT__entry(
68 __field(size_t, bucket )
71 TP_fast_assign(
72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
75 TP_printk("bucket %zu", __entry->bucket)
78 /* request.c */
80 DEFINE_EVENT(bcache_request, bcache_request_start,
81 TP_PROTO(struct bcache_device *d, struct bio *bio),
82 TP_ARGS(d, bio)
85 DEFINE_EVENT(bcache_request, bcache_request_end,
86 TP_PROTO(struct bcache_device *d, struct bio *bio),
87 TP_ARGS(d, bio)
90 DECLARE_EVENT_CLASS(bcache_bio,
91 TP_PROTO(struct bio *bio),
92 TP_ARGS(bio),
94 TP_STRUCT__entry(
95 __field(dev_t, dev )
96 __field(sector_t, sector )
97 __field(unsigned int, nr_sector )
98 __array(char, rwbs, 6 )
101 TP_fast_assign(
102 __entry->dev = bio_dev(bio);
103 __entry->sector = bio->bi_iter.bi_sector;
104 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
108 TP_printk("%d,%d %s %llu + %u",
109 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110 (unsigned long long)__entry->sector, __entry->nr_sector)
113 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
114 TP_PROTO(struct bio *bio),
115 TP_ARGS(bio)
118 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
119 TP_PROTO(struct bio *bio),
120 TP_ARGS(bio)
123 TRACE_EVENT(bcache_read,
124 TP_PROTO(struct bio *bio, bool hit, bool bypass),
125 TP_ARGS(bio, hit, bypass),
127 TP_STRUCT__entry(
128 __field(dev_t, dev )
129 __field(sector_t, sector )
130 __field(unsigned int, nr_sector )
131 __array(char, rwbs, 6 )
132 __field(bool, cache_hit )
133 __field(bool, bypass )
136 TP_fast_assign(
137 __entry->dev = bio_dev(bio);
138 __entry->sector = bio->bi_iter.bi_sector;
139 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
141 __entry->cache_hit = hit;
142 __entry->bypass = bypass;
145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
146 MAJOR(__entry->dev), MINOR(__entry->dev),
147 __entry->rwbs, (unsigned long long)__entry->sector,
148 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
151 TRACE_EVENT(bcache_write,
152 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
153 bool writeback, bool bypass),
154 TP_ARGS(c, inode, bio, writeback, bypass),
156 TP_STRUCT__entry(
157 __array(char, uuid, 16 )
158 __field(u64, inode )
159 __field(sector_t, sector )
160 __field(unsigned int, nr_sector )
161 __array(char, rwbs, 6 )
162 __field(bool, writeback )
163 __field(bool, bypass )
166 TP_fast_assign(
167 memcpy(__entry->uuid, c->sb.set_uuid, 16);
168 __entry->inode = inode;
169 __entry->sector = bio->bi_iter.bi_sector;
170 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
172 __entry->writeback = writeback;
173 __entry->bypass = bypass;
176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
177 __entry->uuid, __entry->inode,
178 __entry->rwbs, (unsigned long long)__entry->sector,
179 __entry->nr_sector, __entry->writeback, __entry->bypass)
182 DEFINE_EVENT(bcache_bio, bcache_read_retry,
183 TP_PROTO(struct bio *bio),
184 TP_ARGS(bio)
187 DEFINE_EVENT(bkey, bcache_cache_insert,
188 TP_PROTO(struct bkey *k),
189 TP_ARGS(k)
192 /* Journal */
194 DECLARE_EVENT_CLASS(cache_set,
195 TP_PROTO(struct cache_set *c),
196 TP_ARGS(c),
198 TP_STRUCT__entry(
199 __array(char, uuid, 16 )
202 TP_fast_assign(
203 memcpy(__entry->uuid, c->sb.set_uuid, 16);
206 TP_printk("%pU", __entry->uuid)
209 DEFINE_EVENT(bkey, bcache_journal_replay_key,
210 TP_PROTO(struct bkey *k),
211 TP_ARGS(k)
214 DEFINE_EVENT(cache_set, bcache_journal_full,
215 TP_PROTO(struct cache_set *c),
216 TP_ARGS(c)
219 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
220 TP_PROTO(struct cache_set *c),
221 TP_ARGS(c)
224 DEFINE_EVENT(bcache_bio, bcache_journal_write,
225 TP_PROTO(struct bio *bio),
226 TP_ARGS(bio)
229 /* Btree */
231 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
232 TP_PROTO(struct cache_set *c),
233 TP_ARGS(c)
236 DEFINE_EVENT(btree_node, bcache_btree_read,
237 TP_PROTO(struct btree *b),
238 TP_ARGS(b)
241 TRACE_EVENT(bcache_btree_write,
242 TP_PROTO(struct btree *b),
243 TP_ARGS(b),
245 TP_STRUCT__entry(
246 __field(size_t, bucket )
247 __field(unsigned, block )
248 __field(unsigned, keys )
251 TP_fast_assign(
252 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
253 __entry->block = b->written;
254 __entry->keys = b->keys.set[b->keys.nsets].data->keys;
257 TP_printk("bucket %zu", __entry->bucket)
260 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
261 TP_PROTO(struct btree *b),
262 TP_ARGS(b)
265 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
266 TP_PROTO(struct cache_set *c),
267 TP_ARGS(c)
270 DEFINE_EVENT(btree_node, bcache_btree_node_free,
271 TP_PROTO(struct btree *b),
272 TP_ARGS(b)
275 TRACE_EVENT(bcache_btree_gc_coalesce,
276 TP_PROTO(unsigned nodes),
277 TP_ARGS(nodes),
279 TP_STRUCT__entry(
280 __field(unsigned, nodes )
283 TP_fast_assign(
284 __entry->nodes = nodes;
287 TP_printk("coalesced %u nodes", __entry->nodes)
290 DEFINE_EVENT(cache_set, bcache_gc_start,
291 TP_PROTO(struct cache_set *c),
292 TP_ARGS(c)
295 DEFINE_EVENT(cache_set, bcache_gc_end,
296 TP_PROTO(struct cache_set *c),
297 TP_ARGS(c)
300 DEFINE_EVENT(bkey, bcache_gc_copy,
301 TP_PROTO(struct bkey *k),
302 TP_ARGS(k)
305 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
306 TP_PROTO(struct bkey *k),
307 TP_ARGS(k)
310 TRACE_EVENT(bcache_btree_insert_key,
311 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
312 TP_ARGS(b, k, op, status),
314 TP_STRUCT__entry(
315 __field(u64, btree_node )
316 __field(u32, btree_level )
317 __field(u32, inode )
318 __field(u64, offset )
319 __field(u32, size )
320 __field(u8, dirty )
321 __field(u8, op )
322 __field(u8, status )
325 TP_fast_assign(
326 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
327 __entry->btree_level = b->level;
328 __entry->inode = KEY_INODE(k);
329 __entry->offset = KEY_OFFSET(k);
330 __entry->size = KEY_SIZE(k);
331 __entry->dirty = KEY_DIRTY(k);
332 __entry->op = op;
333 __entry->status = status;
336 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
337 __entry->status, __entry->op,
338 __entry->btree_node, __entry->btree_level,
339 __entry->inode, __entry->offset,
340 __entry->size, __entry->dirty)
343 DECLARE_EVENT_CLASS(btree_split,
344 TP_PROTO(struct btree *b, unsigned keys),
345 TP_ARGS(b, keys),
347 TP_STRUCT__entry(
348 __field(size_t, bucket )
349 __field(unsigned, keys )
352 TP_fast_assign(
353 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
354 __entry->keys = keys;
357 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
360 DEFINE_EVENT(btree_split, bcache_btree_node_split,
361 TP_PROTO(struct btree *b, unsigned keys),
362 TP_ARGS(b, keys)
365 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
366 TP_PROTO(struct btree *b, unsigned keys),
367 TP_ARGS(b, keys)
370 DEFINE_EVENT(btree_node, bcache_btree_set_root,
371 TP_PROTO(struct btree *b),
372 TP_ARGS(b)
375 TRACE_EVENT(bcache_keyscan,
376 TP_PROTO(unsigned nr_found,
377 unsigned start_inode, uint64_t start_offset,
378 unsigned end_inode, uint64_t end_offset),
379 TP_ARGS(nr_found,
380 start_inode, start_offset,
381 end_inode, end_offset),
383 TP_STRUCT__entry(
384 __field(__u32, nr_found )
385 __field(__u32, start_inode )
386 __field(__u64, start_offset )
387 __field(__u32, end_inode )
388 __field(__u64, end_offset )
391 TP_fast_assign(
392 __entry->nr_found = nr_found;
393 __entry->start_inode = start_inode;
394 __entry->start_offset = start_offset;
395 __entry->end_inode = end_inode;
396 __entry->end_offset = end_offset;
399 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
400 __entry->start_inode, __entry->start_offset,
401 __entry->end_inode, __entry->end_offset)
404 /* Allocator */
406 TRACE_EVENT(bcache_invalidate,
407 TP_PROTO(struct cache *ca, size_t bucket),
408 TP_ARGS(ca, bucket),
410 TP_STRUCT__entry(
411 __field(unsigned, sectors )
412 __field(dev_t, dev )
413 __field(__u64, offset )
416 TP_fast_assign(
417 __entry->dev = ca->bdev->bd_dev;
418 __entry->offset = bucket << ca->set->bucket_bits;
419 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
422 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
423 __entry->sectors, MAJOR(__entry->dev),
424 MINOR(__entry->dev), __entry->offset)
427 TRACE_EVENT(bcache_alloc,
428 TP_PROTO(struct cache *ca, size_t bucket),
429 TP_ARGS(ca, bucket),
431 TP_STRUCT__entry(
432 __field(dev_t, dev )
433 __field(__u64, offset )
436 TP_fast_assign(
437 __entry->dev = ca->bdev->bd_dev;
438 __entry->offset = bucket << ca->set->bucket_bits;
441 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
442 MINOR(__entry->dev), __entry->offset)
445 TRACE_EVENT(bcache_alloc_fail,
446 TP_PROTO(struct cache *ca, unsigned reserve),
447 TP_ARGS(ca, reserve),
449 TP_STRUCT__entry(
450 __field(dev_t, dev )
451 __field(unsigned, free )
452 __field(unsigned, free_inc )
453 __field(unsigned, blocked )
456 TP_fast_assign(
457 __entry->dev = ca->bdev->bd_dev;
458 __entry->free = fifo_used(&ca->free[reserve]);
459 __entry->free_inc = fifo_used(&ca->free_inc);
460 __entry->blocked = atomic_read(&ca->set->prio_blocked);
463 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
464 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
465 __entry->free_inc, __entry->blocked)
468 /* Background writeback */
470 DEFINE_EVENT(bkey, bcache_writeback,
471 TP_PROTO(struct bkey *k),
472 TP_ARGS(k)
475 DEFINE_EVENT(bkey, bcache_writeback_collision,
476 TP_PROTO(struct bkey *k),
477 TP_ARGS(k)
480 #endif /* _TRACE_BCACHE_H */
482 /* This part must be outside protection */
483 #include <trace/define_trace.h>