r8169: move task enable boolean to bitfield.
[linux-2.6.git] / fs / gfs2 / trace_gfs2.h
blob5d07609ec57db11a276511f20c2bfe1c9d8ffd16
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM gfs2
4 #if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_GFS2_H
7 #include <linux/tracepoint.h>
9 #include <linux/fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/dlmconstants.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/writeback.h>
14 #include "incore.h"
15 #include "glock.h"
17 #define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
18 #define glock_trace_name(x) __print_symbolic(x, \
19 dlm_state_name(IV), \
20 dlm_state_name(NL), \
21 dlm_state_name(CR), \
22 dlm_state_name(CW), \
23 dlm_state_name(PR), \
24 dlm_state_name(PW), \
25 dlm_state_name(EX))
27 #define block_state_name(x) __print_symbolic(x, \
28 { GFS2_BLKST_FREE, "free" }, \
29 { GFS2_BLKST_USED, "used" }, \
30 { GFS2_BLKST_DINODE, "dinode" }, \
31 { GFS2_BLKST_UNLINKED, "unlinked" })
33 #define show_glock_flags(flags) __print_flags(flags, "", \
34 {(1UL << GLF_LOCK), "l" }, \
35 {(1UL << GLF_DEMOTE), "D" }, \
36 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
37 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
38 {(1UL << GLF_DIRTY), "y" }, \
39 {(1UL << GLF_LFLUSH), "f" }, \
40 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
41 {(1UL << GLF_REPLY_PENDING), "r" }, \
42 {(1UL << GLF_INITIAL), "I" }, \
43 {(1UL << GLF_FROZEN), "F" }, \
44 {(1UL << GLF_QUEUED), "q" }, \
45 {(1UL << GLF_LRU), "L" }, \
46 {(1UL << GLF_OBJECT), "o" })
48 #ifndef NUMPTY
49 #define NUMPTY
50 static inline u8 glock_trace_state(unsigned int state)
52 switch(state) {
53 case LM_ST_SHARED:
54 return DLM_LOCK_PR;
55 case LM_ST_DEFERRED:
56 return DLM_LOCK_CW;
57 case LM_ST_EXCLUSIVE:
58 return DLM_LOCK_EX;
60 return DLM_LOCK_NL;
62 #endif
64 /* Section 1 - Locking
66 * Objectives:
67 * Latency: Remote demote request to state change
68 * Latency: Local lock request to state change
69 * Latency: State change to lock grant
70 * Correctness: Ordering of local lock state vs. I/O requests
71 * Correctness: Responses to remote demote requests
74 /* General glock state change (DLM lock request completes) */
75 TRACE_EVENT(gfs2_glock_state_change,
77 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
79 TP_ARGS(gl, new_state),
81 TP_STRUCT__entry(
82 __field( dev_t, dev )
83 __field( u64, glnum )
84 __field( u32, gltype )
85 __field( u8, cur_state )
86 __field( u8, new_state )
87 __field( u8, dmt_state )
88 __field( u8, tgt_state )
89 __field( unsigned long, flags )
92 TP_fast_assign(
93 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
94 __entry->glnum = gl->gl_name.ln_number;
95 __entry->gltype = gl->gl_name.ln_type;
96 __entry->cur_state = glock_trace_state(gl->gl_state);
97 __entry->new_state = glock_trace_state(new_state);
98 __entry->tgt_state = glock_trace_state(gl->gl_target);
99 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
100 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
103 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
104 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
105 (unsigned long long)__entry->glnum,
106 glock_trace_name(__entry->cur_state),
107 glock_trace_name(__entry->new_state),
108 glock_trace_name(__entry->tgt_state),
109 glock_trace_name(__entry->dmt_state),
110 show_glock_flags(__entry->flags))
113 /* State change -> unlocked, glock is being deallocated */
114 TRACE_EVENT(gfs2_glock_put,
116 TP_PROTO(const struct gfs2_glock *gl),
118 TP_ARGS(gl),
120 TP_STRUCT__entry(
121 __field( dev_t, dev )
122 __field( u64, glnum )
123 __field( u32, gltype )
124 __field( u8, cur_state )
125 __field( unsigned long, flags )
128 TP_fast_assign(
129 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
130 __entry->gltype = gl->gl_name.ln_type;
131 __entry->glnum = gl->gl_name.ln_number;
132 __entry->cur_state = glock_trace_state(gl->gl_state);
133 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
136 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
137 MAJOR(__entry->dev), MINOR(__entry->dev),
138 __entry->gltype, (unsigned long long)__entry->glnum,
139 glock_trace_name(__entry->cur_state),
140 glock_trace_name(DLM_LOCK_IV),
141 show_glock_flags(__entry->flags))
145 /* Callback (local or remote) requesting lock demotion */
146 TRACE_EVENT(gfs2_demote_rq,
148 TP_PROTO(const struct gfs2_glock *gl),
150 TP_ARGS(gl),
152 TP_STRUCT__entry(
153 __field( dev_t, dev )
154 __field( u64, glnum )
155 __field( u32, gltype )
156 __field( u8, cur_state )
157 __field( u8, dmt_state )
158 __field( unsigned long, flags )
161 TP_fast_assign(
162 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
163 __entry->gltype = gl->gl_name.ln_type;
164 __entry->glnum = gl->gl_name.ln_number;
165 __entry->cur_state = glock_trace_state(gl->gl_state);
166 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
167 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
170 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
171 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
172 (unsigned long long)__entry->glnum,
173 glock_trace_name(__entry->cur_state),
174 glock_trace_name(__entry->dmt_state),
175 show_glock_flags(__entry->flags))
179 /* Promotion/grant of a glock */
180 TRACE_EVENT(gfs2_promote,
182 TP_PROTO(const struct gfs2_holder *gh, int first),
184 TP_ARGS(gh, first),
186 TP_STRUCT__entry(
187 __field( dev_t, dev )
188 __field( u64, glnum )
189 __field( u32, gltype )
190 __field( int, first )
191 __field( u8, state )
194 TP_fast_assign(
195 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
196 __entry->glnum = gh->gh_gl->gl_name.ln_number;
197 __entry->gltype = gh->gh_gl->gl_name.ln_type;
198 __entry->first = first;
199 __entry->state = glock_trace_state(gh->gh_state);
202 TP_printk("%u,%u glock %u:%llu promote %s %s",
203 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
204 (unsigned long long)__entry->glnum,
205 __entry->first ? "first": "other",
206 glock_trace_name(__entry->state))
209 /* Queue/dequeue a lock request */
210 TRACE_EVENT(gfs2_glock_queue,
212 TP_PROTO(const struct gfs2_holder *gh, int queue),
214 TP_ARGS(gh, queue),
216 TP_STRUCT__entry(
217 __field( dev_t, dev )
218 __field( u64, glnum )
219 __field( u32, gltype )
220 __field( int, queue )
221 __field( u8, state )
224 TP_fast_assign(
225 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
226 __entry->glnum = gh->gh_gl->gl_name.ln_number;
227 __entry->gltype = gh->gh_gl->gl_name.ln_type;
228 __entry->queue = queue;
229 __entry->state = glock_trace_state(gh->gh_state);
232 TP_printk("%u,%u glock %u:%llu %squeue %s",
233 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
234 (unsigned long long)__entry->glnum,
235 __entry->queue ? "" : "de",
236 glock_trace_name(__entry->state))
239 /* Section 2 - Log/journal
241 * Objectives:
242 * Latency: Log flush time
243 * Correctness: pin/unpin vs. disk I/O ordering
244 * Performance: Log usage stats
247 /* Pin/unpin a block in the log */
248 TRACE_EVENT(gfs2_pin,
250 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
252 TP_ARGS(bd, pin),
254 TP_STRUCT__entry(
255 __field( dev_t, dev )
256 __field( int, pin )
257 __field( u32, len )
258 __field( sector_t, block )
259 __field( u64, ino )
262 TP_fast_assign(
263 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
264 __entry->pin = pin;
265 __entry->len = bd->bd_bh->b_size;
266 __entry->block = bd->bd_bh->b_blocknr;
267 __entry->ino = bd->bd_gl->gl_name.ln_number;
270 TP_printk("%u,%u log %s %llu/%lu inode %llu",
271 MAJOR(__entry->dev), MINOR(__entry->dev),
272 __entry->pin ? "pin" : "unpin",
273 (unsigned long long)__entry->block,
274 (unsigned long)__entry->len,
275 (unsigned long long)__entry->ino)
278 /* Flushing the log */
279 TRACE_EVENT(gfs2_log_flush,
281 TP_PROTO(const struct gfs2_sbd *sdp, int start),
283 TP_ARGS(sdp, start),
285 TP_STRUCT__entry(
286 __field( dev_t, dev )
287 __field( int, start )
288 __field( u64, log_seq )
291 TP_fast_assign(
292 __entry->dev = sdp->sd_vfs->s_dev;
293 __entry->start = start;
294 __entry->log_seq = sdp->sd_log_sequence;
297 TP_printk("%u,%u log flush %s %llu",
298 MAJOR(__entry->dev), MINOR(__entry->dev),
299 __entry->start ? "start" : "end",
300 (unsigned long long)__entry->log_seq)
303 /* Reserving/releasing blocks in the log */
304 TRACE_EVENT(gfs2_log_blocks,
306 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
308 TP_ARGS(sdp, blocks),
310 TP_STRUCT__entry(
311 __field( dev_t, dev )
312 __field( int, blocks )
315 TP_fast_assign(
316 __entry->dev = sdp->sd_vfs->s_dev;
317 __entry->blocks = blocks;
320 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
321 MINOR(__entry->dev), __entry->blocks)
324 /* Writing back the AIL */
325 TRACE_EVENT(gfs2_ail_flush,
327 TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
329 TP_ARGS(sdp, wbc, start),
331 TP_STRUCT__entry(
332 __field( dev_t, dev )
333 __field( int, start )
334 __field( int, sync_mode )
335 __field( long, nr_to_write )
338 TP_fast_assign(
339 __entry->dev = sdp->sd_vfs->s_dev;
340 __entry->start = start;
341 __entry->sync_mode = wbc->sync_mode;
342 __entry->nr_to_write = wbc->nr_to_write;
345 TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
346 MINOR(__entry->dev), __entry->start ? "start" : "end",
347 __entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
348 __entry->nr_to_write)
351 /* Section 3 - bmap
353 * Objectives:
354 * Latency: Bmap request time
355 * Performance: Block allocator tracing
356 * Correctness: Test of disard generation vs. blocks allocated
359 /* Map an extent of blocks, possibly a new allocation */
360 TRACE_EVENT(gfs2_bmap,
362 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
363 sector_t lblock, int create, int errno),
365 TP_ARGS(ip, bh, lblock, create, errno),
367 TP_STRUCT__entry(
368 __field( dev_t, dev )
369 __field( sector_t, lblock )
370 __field( sector_t, pblock )
371 __field( u64, inum )
372 __field( unsigned long, state )
373 __field( u32, len )
374 __field( int, create )
375 __field( int, errno )
378 TP_fast_assign(
379 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
380 __entry->lblock = lblock;
381 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
382 __entry->inum = ip->i_no_addr;
383 __entry->state = bh->b_state;
384 __entry->len = bh->b_size;
385 __entry->create = create;
386 __entry->errno = errno;
389 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
390 MAJOR(__entry->dev), MINOR(__entry->dev),
391 (unsigned long long)__entry->inum,
392 (unsigned long long)__entry->lblock,
393 (unsigned long)__entry->len,
394 (unsigned long long)__entry->pblock,
395 __entry->state, __entry->create ? "create " : "nocreate",
396 __entry->errno)
399 /* Keep track of blocks as they are allocated/freed */
400 TRACE_EVENT(gfs2_block_alloc,
402 TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len,
403 u8 block_state),
405 TP_ARGS(ip, block, len, block_state),
407 TP_STRUCT__entry(
408 __field( dev_t, dev )
409 __field( u64, start )
410 __field( u64, inum )
411 __field( u32, len )
412 __field( u8, block_state )
415 TP_fast_assign(
416 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
417 __entry->start = block;
418 __entry->inum = ip->i_no_addr;
419 __entry->len = len;
420 __entry->block_state = block_state;
423 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s",
424 MAJOR(__entry->dev), MINOR(__entry->dev),
425 (unsigned long long)__entry->inum,
426 (unsigned long long)__entry->start,
427 (unsigned long)__entry->len,
428 block_state_name(__entry->block_state))
431 #endif /* _TRACE_GFS2_H */
433 /* This part must be outside protection */
434 #undef TRACE_INCLUDE_PATH
435 #define TRACE_INCLUDE_PATH .
436 #define TRACE_INCLUDE_FILE trace_gfs2
437 #include <trace/define_trace.h>