tracing: Buffer the output of seq_file in case of filled buffer
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_output.c
blobe5cf90fef34e928546b962a2afa5927e0f5964d4
1 /*
2 * trace_output.c
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 */
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
12 #include "trace_output.h"
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
17 DECLARE_RWSEM(trace_event_mutex);
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
26 int trace_print_seq(struct seq_file *m, struct trace_seq *s)
28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29 int ret;
31 ret = seq_write(m, s->buffer, len);
34 * Only reset this buffer if we successfully wrote to the
35 * seq_file buffer.
37 if (!ret)
38 trace_seq_init(s);
40 return ret;
43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
45 struct trace_seq *s = &iter->seq;
46 struct trace_entry *entry = iter->ent;
47 struct bprint_entry *field;
48 int ret;
50 trace_assign_type(field, entry);
52 ret = trace_seq_bprintf(s, field->fmt, field->buf);
53 if (!ret)
54 return TRACE_TYPE_PARTIAL_LINE;
56 return TRACE_TYPE_HANDLED;
59 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
61 struct trace_seq *s = &iter->seq;
62 struct trace_entry *entry = iter->ent;
63 struct print_entry *field;
64 int ret;
66 trace_assign_type(field, entry);
68 ret = trace_seq_printf(s, "%s", field->buf);
69 if (!ret)
70 return TRACE_TYPE_PARTIAL_LINE;
72 return TRACE_TYPE_HANDLED;
75 /**
76 * trace_seq_printf - sequence printing of trace information
77 * @s: trace sequence descriptor
78 * @fmt: printf format string
80 * It returns 0 if the trace oversizes the buffer's free
81 * space, 1 otherwise.
83 * The tracer may use either sequence operations or its own
84 * copy to user routines. To simplify formating of a trace
85 * trace_seq_printf is used to store strings into a special
86 * buffer (@s). Then the output may be either used by
87 * the sequencer or pulled into another buffer.
89 int
90 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
92 int len = (PAGE_SIZE - 1) - s->len;
93 va_list ap;
94 int ret;
96 if (!len)
97 return 0;
99 va_start(ap, fmt);
100 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
101 va_end(ap);
103 /* If we can't write it all, don't bother writing anything */
104 if (ret >= len)
105 return 0;
107 s->len += ret;
109 return 1;
111 EXPORT_SYMBOL_GPL(trace_seq_printf);
114 * trace_seq_vprintf - sequence printing of trace information
115 * @s: trace sequence descriptor
116 * @fmt: printf format string
118 * The tracer may use either sequence operations or its own
119 * copy to user routines. To simplify formating of a trace
120 * trace_seq_printf is used to store strings into a special
121 * buffer (@s). Then the output may be either used by
122 * the sequencer or pulled into another buffer.
125 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
127 int len = (PAGE_SIZE - 1) - s->len;
128 int ret;
130 if (!len)
131 return 0;
133 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
135 /* If we can't write it all, don't bother writing anything */
136 if (ret >= len)
137 return 0;
139 s->len += ret;
141 return len;
143 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
145 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
147 int len = (PAGE_SIZE - 1) - s->len;
148 int ret;
150 if (!len)
151 return 0;
153 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
155 /* If we can't write it all, don't bother writing anything */
156 if (ret >= len)
157 return 0;
159 s->len += ret;
161 return len;
165 * trace_seq_puts - trace sequence printing of simple string
166 * @s: trace sequence descriptor
167 * @str: simple string to record
169 * The tracer may use either the sequence operations or its own
170 * copy to user routines. This function records a simple string
171 * into a special buffer (@s) for later retrieval by a sequencer
172 * or other mechanism.
174 int trace_seq_puts(struct trace_seq *s, const char *str)
176 int len = strlen(str);
178 if (len > ((PAGE_SIZE - 1) - s->len))
179 return 0;
181 memcpy(s->buffer + s->len, str, len);
182 s->len += len;
184 return len;
187 int trace_seq_putc(struct trace_seq *s, unsigned char c)
189 if (s->len >= (PAGE_SIZE - 1))
190 return 0;
192 s->buffer[s->len++] = c;
194 return 1;
197 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
199 if (len > ((PAGE_SIZE - 1) - s->len))
200 return 0;
202 memcpy(s->buffer + s->len, mem, len);
203 s->len += len;
205 return len;
208 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
210 unsigned char hex[HEX_CHARS];
211 const unsigned char *data = mem;
212 int i, j;
214 #ifdef __BIG_ENDIAN
215 for (i = 0, j = 0; i < len; i++) {
216 #else
217 for (i = len-1, j = 0; i >= 0; i--) {
218 #endif
219 hex[j++] = hex_asc_hi(data[i]);
220 hex[j++] = hex_asc_lo(data[i]);
222 hex[j++] = ' ';
224 return trace_seq_putmem(s, hex, j);
227 void *trace_seq_reserve(struct trace_seq *s, size_t len)
229 void *ret;
231 if (len > ((PAGE_SIZE - 1) - s->len))
232 return NULL;
234 ret = s->buffer + s->len;
235 s->len += len;
237 return ret;
240 int trace_seq_path(struct trace_seq *s, struct path *path)
242 unsigned char *p;
244 if (s->len >= (PAGE_SIZE - 1))
245 return 0;
246 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
247 if (!IS_ERR(p)) {
248 p = mangle_path(s->buffer + s->len, p, "\n");
249 if (p) {
250 s->len = p - s->buffer;
251 return 1;
253 } else {
254 s->buffer[s->len++] = '?';
255 return 1;
258 return 0;
261 const char *
262 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
263 unsigned long flags,
264 const struct trace_print_flags *flag_array)
266 unsigned long mask;
267 const char *str;
268 const char *ret = p->buffer + p->len;
269 int i;
271 for (i = 0; flag_array[i].name && flags; i++) {
273 mask = flag_array[i].mask;
274 if ((flags & mask) != mask)
275 continue;
277 str = flag_array[i].name;
278 flags &= ~mask;
279 if (p->len && delim)
280 trace_seq_puts(p, delim);
281 trace_seq_puts(p, str);
284 /* check for left over flags */
285 if (flags) {
286 if (p->len && delim)
287 trace_seq_puts(p, delim);
288 trace_seq_printf(p, "0x%lx", flags);
291 trace_seq_putc(p, 0);
293 return ret;
295 EXPORT_SYMBOL(ftrace_print_flags_seq);
297 const char *
298 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
299 const struct trace_print_flags *symbol_array)
301 int i;
302 const char *ret = p->buffer + p->len;
304 for (i = 0; symbol_array[i].name; i++) {
306 if (val != symbol_array[i].mask)
307 continue;
309 trace_seq_puts(p, symbol_array[i].name);
310 break;
313 if (!p->len)
314 trace_seq_printf(p, "0x%lx", val);
316 trace_seq_putc(p, 0);
318 return ret;
320 EXPORT_SYMBOL(ftrace_print_symbols_seq);
322 #ifdef CONFIG_KRETPROBES
323 static inline const char *kretprobed(const char *name)
325 static const char tramp_name[] = "kretprobe_trampoline";
326 int size = sizeof(tramp_name);
328 if (strncmp(tramp_name, name, size) == 0)
329 return "[unknown/kretprobe'd]";
330 return name;
332 #else
333 static inline const char *kretprobed(const char *name)
335 return name;
337 #endif /* CONFIG_KRETPROBES */
339 static int
340 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
342 #ifdef CONFIG_KALLSYMS
343 char str[KSYM_SYMBOL_LEN];
344 const char *name;
346 kallsyms_lookup(address, NULL, NULL, NULL, str);
348 name = kretprobed(str);
350 return trace_seq_printf(s, fmt, name);
351 #endif
352 return 1;
355 static int
356 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
357 unsigned long address)
359 #ifdef CONFIG_KALLSYMS
360 char str[KSYM_SYMBOL_LEN];
361 const char *name;
363 sprint_symbol(str, address);
364 name = kretprobed(str);
366 return trace_seq_printf(s, fmt, name);
367 #endif
368 return 1;
371 #ifndef CONFIG_64BIT
372 # define IP_FMT "%08lx"
373 #else
374 # define IP_FMT "%016lx"
375 #endif
377 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
378 unsigned long ip, unsigned long sym_flags)
380 struct file *file = NULL;
381 unsigned long vmstart = 0;
382 int ret = 1;
384 if (mm) {
385 const struct vm_area_struct *vma;
387 down_read(&mm->mmap_sem);
388 vma = find_vma(mm, ip);
389 if (vma) {
390 file = vma->vm_file;
391 vmstart = vma->vm_start;
393 if (file) {
394 ret = trace_seq_path(s, &file->f_path);
395 if (ret)
396 ret = trace_seq_printf(s, "[+0x%lx]",
397 ip - vmstart);
399 up_read(&mm->mmap_sem);
401 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
402 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
403 return ret;
407 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
408 unsigned long sym_flags)
410 struct mm_struct *mm = NULL;
411 int ret = 1;
412 unsigned int i;
414 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
415 struct task_struct *task;
417 * we do the lookup on the thread group leader,
418 * since individual threads might have already quit!
420 rcu_read_lock();
421 task = find_task_by_vpid(entry->tgid);
422 if (task)
423 mm = get_task_mm(task);
424 rcu_read_unlock();
427 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
428 unsigned long ip = entry->caller[i];
430 if (ip == ULONG_MAX || !ret)
431 break;
432 if (ret)
433 ret = trace_seq_puts(s, " => ");
434 if (!ip) {
435 if (ret)
436 ret = trace_seq_puts(s, "??");
437 if (ret)
438 ret = trace_seq_puts(s, "\n");
439 continue;
441 if (!ret)
442 break;
443 if (ret)
444 ret = seq_print_user_ip(s, mm, ip, sym_flags);
445 ret = trace_seq_puts(s, "\n");
448 if (mm)
449 mmput(mm);
450 return ret;
454 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
456 int ret;
458 if (!ip)
459 return trace_seq_printf(s, "0");
461 if (sym_flags & TRACE_ITER_SYM_OFFSET)
462 ret = seq_print_sym_offset(s, "%s", ip);
463 else
464 ret = seq_print_sym_short(s, "%s", ip);
466 if (!ret)
467 return 0;
469 if (sym_flags & TRACE_ITER_SYM_ADDR)
470 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
471 return ret;
475 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
476 * @s: trace seq struct to write to
477 * @entry: The trace entry field from the ring buffer
479 * Prints the generic fields of irqs off, in hard or softirq, preempt
480 * count and lock depth.
482 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
484 int hardirq, softirq;
485 int ret;
487 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
488 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
490 if (!trace_seq_printf(s, "%c%c%c",
491 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
492 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
493 'X' : '.',
494 (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
495 'N' : '.',
496 (hardirq && softirq) ? 'H' :
497 hardirq ? 'h' : softirq ? 's' : '.'))
498 return 0;
500 if (entry->preempt_count)
501 ret = trace_seq_printf(s, "%x", entry->preempt_count);
502 else
503 ret = trace_seq_putc(s, '.');
505 if (!ret)
506 return 0;
508 if (entry->lock_depth < 0)
509 return trace_seq_putc(s, '.');
511 return trace_seq_printf(s, "%d", entry->lock_depth);
514 static int
515 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
517 char comm[TASK_COMM_LEN];
519 trace_find_cmdline(entry->pid, comm);
521 if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
522 comm, entry->pid, cpu))
523 return 0;
525 return trace_print_lat_fmt(s, entry);
528 static unsigned long preempt_mark_thresh = 100;
530 static int
531 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
532 unsigned long rel_usecs)
534 return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
535 rel_usecs > preempt_mark_thresh ? '!' :
536 rel_usecs > 1 ? '+' : ' ');
539 int trace_print_context(struct trace_iterator *iter)
541 struct trace_seq *s = &iter->seq;
542 struct trace_entry *entry = iter->ent;
543 unsigned long long t = ns2usecs(iter->ts);
544 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
545 unsigned long secs = (unsigned long)t;
546 char comm[TASK_COMM_LEN];
548 trace_find_cmdline(entry->pid, comm);
550 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
551 comm, entry->pid, iter->cpu, secs, usec_rem);
554 int trace_print_lat_context(struct trace_iterator *iter)
556 u64 next_ts;
557 int ret;
558 struct trace_seq *s = &iter->seq;
559 struct trace_entry *entry = iter->ent,
560 *next_entry = trace_find_next_entry(iter, NULL,
561 &next_ts);
562 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
563 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
564 unsigned long rel_usecs;
566 if (!next_entry)
567 next_ts = iter->ts;
568 rel_usecs = ns2usecs(next_ts - iter->ts);
570 if (verbose) {
571 char comm[TASK_COMM_LEN];
573 trace_find_cmdline(entry->pid, comm);
575 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
576 " %ld.%03ldms (+%ld.%03ldms): ", comm,
577 entry->pid, iter->cpu, entry->flags,
578 entry->preempt_count, iter->idx,
579 ns2usecs(iter->ts),
580 abs_usecs / USEC_PER_MSEC,
581 abs_usecs % USEC_PER_MSEC,
582 rel_usecs / USEC_PER_MSEC,
583 rel_usecs % USEC_PER_MSEC);
584 } else {
585 ret = lat_print_generic(s, entry, iter->cpu);
586 if (ret)
587 ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
590 return ret;
593 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
595 static int task_state_char(unsigned long state)
597 int bit = state ? __ffs(state) + 1 : 0;
599 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
603 * ftrace_find_event - find a registered event
604 * @type: the type of event to look for
606 * Returns an event of type @type otherwise NULL
607 * Called with trace_event_read_lock() held.
609 struct trace_event *ftrace_find_event(int type)
611 struct trace_event *event;
612 struct hlist_node *n;
613 unsigned key;
615 key = type & (EVENT_HASHSIZE - 1);
617 hlist_for_each_entry(event, n, &event_hash[key], node) {
618 if (event->type == type)
619 return event;
622 return NULL;
625 static LIST_HEAD(ftrace_event_list);
627 static int trace_search_list(struct list_head **list)
629 struct trace_event *e;
630 int last = __TRACE_LAST_TYPE;
632 if (list_empty(&ftrace_event_list)) {
633 *list = &ftrace_event_list;
634 return last + 1;
638 * We used up all possible max events,
639 * lets see if somebody freed one.
641 list_for_each_entry(e, &ftrace_event_list, list) {
642 if (e->type != last + 1)
643 break;
644 last++;
647 /* Did we used up all 65 thousand events??? */
648 if ((last + 1) > FTRACE_MAX_EVENT)
649 return 0;
651 *list = &e->list;
652 return last + 1;
655 void trace_event_read_lock(void)
657 down_read(&trace_event_mutex);
660 void trace_event_read_unlock(void)
662 up_read(&trace_event_mutex);
666 * register_ftrace_event - register output for an event type
667 * @event: the event type to register
669 * Event types are stored in a hash and this hash is used to
670 * find a way to print an event. If the @event->type is set
671 * then it will use that type, otherwise it will assign a
672 * type to use.
674 * If you assign your own type, please make sure it is added
675 * to the trace_type enum in trace.h, to avoid collisions
676 * with the dynamic types.
678 * Returns the event type number or zero on error.
680 int register_ftrace_event(struct trace_event *event)
682 unsigned key;
683 int ret = 0;
685 down_write(&trace_event_mutex);
687 if (WARN_ON(!event))
688 goto out;
690 INIT_LIST_HEAD(&event->list);
692 if (!event->type) {
693 struct list_head *list = NULL;
695 if (next_event_type > FTRACE_MAX_EVENT) {
697 event->type = trace_search_list(&list);
698 if (!event->type)
699 goto out;
701 } else {
703 event->type = next_event_type++;
704 list = &ftrace_event_list;
707 if (WARN_ON(ftrace_find_event(event->type)))
708 goto out;
710 list_add_tail(&event->list, list);
712 } else if (event->type > __TRACE_LAST_TYPE) {
713 printk(KERN_WARNING "Need to add type to trace.h\n");
714 WARN_ON(1);
715 goto out;
716 } else {
717 /* Is this event already used */
718 if (ftrace_find_event(event->type))
719 goto out;
722 if (event->trace == NULL)
723 event->trace = trace_nop_print;
724 if (event->raw == NULL)
725 event->raw = trace_nop_print;
726 if (event->hex == NULL)
727 event->hex = trace_nop_print;
728 if (event->binary == NULL)
729 event->binary = trace_nop_print;
731 key = event->type & (EVENT_HASHSIZE - 1);
733 hlist_add_head(&event->node, &event_hash[key]);
735 ret = event->type;
736 out:
737 up_write(&trace_event_mutex);
739 return ret;
741 EXPORT_SYMBOL_GPL(register_ftrace_event);
744 * Used by module code with the trace_event_mutex held for write.
746 int __unregister_ftrace_event(struct trace_event *event)
748 hlist_del(&event->node);
749 list_del(&event->list);
750 return 0;
754 * unregister_ftrace_event - remove a no longer used event
755 * @event: the event to remove
757 int unregister_ftrace_event(struct trace_event *event)
759 down_write(&trace_event_mutex);
760 __unregister_ftrace_event(event);
761 up_write(&trace_event_mutex);
763 return 0;
765 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
768 * Standard events
771 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
773 return TRACE_TYPE_HANDLED;
776 /* TRACE_FN */
777 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
779 struct ftrace_entry *field;
780 struct trace_seq *s = &iter->seq;
782 trace_assign_type(field, iter->ent);
784 if (!seq_print_ip_sym(s, field->ip, flags))
785 goto partial;
787 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
788 if (!trace_seq_printf(s, " <-"))
789 goto partial;
790 if (!seq_print_ip_sym(s,
791 field->parent_ip,
792 flags))
793 goto partial;
795 if (!trace_seq_printf(s, "\n"))
796 goto partial;
798 return TRACE_TYPE_HANDLED;
800 partial:
801 return TRACE_TYPE_PARTIAL_LINE;
804 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
806 struct ftrace_entry *field;
808 trace_assign_type(field, iter->ent);
810 if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
811 field->ip,
812 field->parent_ip))
813 return TRACE_TYPE_PARTIAL_LINE;
815 return TRACE_TYPE_HANDLED;
818 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
820 struct ftrace_entry *field;
821 struct trace_seq *s = &iter->seq;
823 trace_assign_type(field, iter->ent);
825 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
826 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
828 return TRACE_TYPE_HANDLED;
831 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
833 struct ftrace_entry *field;
834 struct trace_seq *s = &iter->seq;
836 trace_assign_type(field, iter->ent);
838 SEQ_PUT_FIELD_RET(s, field->ip);
839 SEQ_PUT_FIELD_RET(s, field->parent_ip);
841 return TRACE_TYPE_HANDLED;
844 static struct trace_event trace_fn_event = {
845 .type = TRACE_FN,
846 .trace = trace_fn_trace,
847 .raw = trace_fn_raw,
848 .hex = trace_fn_hex,
849 .binary = trace_fn_bin,
852 /* TRACE_CTX an TRACE_WAKE */
853 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
854 char *delim)
856 struct ctx_switch_entry *field;
857 char comm[TASK_COMM_LEN];
858 int S, T;
861 trace_assign_type(field, iter->ent);
863 T = task_state_char(field->next_state);
864 S = task_state_char(field->prev_state);
865 trace_find_cmdline(field->next_pid, comm);
866 if (!trace_seq_printf(&iter->seq,
867 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
868 field->prev_pid,
869 field->prev_prio,
870 S, delim,
871 field->next_cpu,
872 field->next_pid,
873 field->next_prio,
874 T, comm))
875 return TRACE_TYPE_PARTIAL_LINE;
877 return TRACE_TYPE_HANDLED;
880 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
882 return trace_ctxwake_print(iter, "==>");
885 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
886 int flags)
888 return trace_ctxwake_print(iter, " +");
891 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
893 struct ctx_switch_entry *field;
894 int T;
896 trace_assign_type(field, iter->ent);
898 if (!S)
899 S = task_state_char(field->prev_state);
900 T = task_state_char(field->next_state);
901 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
902 field->prev_pid,
903 field->prev_prio,
905 field->next_cpu,
906 field->next_pid,
907 field->next_prio,
909 return TRACE_TYPE_PARTIAL_LINE;
911 return TRACE_TYPE_HANDLED;
914 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
916 return trace_ctxwake_raw(iter, 0);
919 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
921 return trace_ctxwake_raw(iter, '+');
925 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
927 struct ctx_switch_entry *field;
928 struct trace_seq *s = &iter->seq;
929 int T;
931 trace_assign_type(field, iter->ent);
933 if (!S)
934 S = task_state_char(field->prev_state);
935 T = task_state_char(field->next_state);
937 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
938 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
939 SEQ_PUT_HEX_FIELD_RET(s, S);
940 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
941 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
942 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
943 SEQ_PUT_HEX_FIELD_RET(s, T);
945 return TRACE_TYPE_HANDLED;
948 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
950 return trace_ctxwake_hex(iter, 0);
953 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
955 return trace_ctxwake_hex(iter, '+');
958 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
959 int flags)
961 struct ctx_switch_entry *field;
962 struct trace_seq *s = &iter->seq;
964 trace_assign_type(field, iter->ent);
966 SEQ_PUT_FIELD_RET(s, field->prev_pid);
967 SEQ_PUT_FIELD_RET(s, field->prev_prio);
968 SEQ_PUT_FIELD_RET(s, field->prev_state);
969 SEQ_PUT_FIELD_RET(s, field->next_pid);
970 SEQ_PUT_FIELD_RET(s, field->next_prio);
971 SEQ_PUT_FIELD_RET(s, field->next_state);
973 return TRACE_TYPE_HANDLED;
976 static struct trace_event trace_ctx_event = {
977 .type = TRACE_CTX,
978 .trace = trace_ctx_print,
979 .raw = trace_ctx_raw,
980 .hex = trace_ctx_hex,
981 .binary = trace_ctxwake_bin,
984 static struct trace_event trace_wake_event = {
985 .type = TRACE_WAKE,
986 .trace = trace_wake_print,
987 .raw = trace_wake_raw,
988 .hex = trace_wake_hex,
989 .binary = trace_ctxwake_bin,
992 /* TRACE_SPECIAL */
993 static enum print_line_t trace_special_print(struct trace_iterator *iter,
994 int flags)
996 struct special_entry *field;
998 trace_assign_type(field, iter->ent);
1000 if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
1001 field->arg1,
1002 field->arg2,
1003 field->arg3))
1004 return TRACE_TYPE_PARTIAL_LINE;
1006 return TRACE_TYPE_HANDLED;
1009 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
1010 int flags)
1012 struct special_entry *field;
1013 struct trace_seq *s = &iter->seq;
1015 trace_assign_type(field, iter->ent);
1017 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1018 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1019 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1021 return TRACE_TYPE_HANDLED;
1024 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
1025 int flags)
1027 struct special_entry *field;
1028 struct trace_seq *s = &iter->seq;
1030 trace_assign_type(field, iter->ent);
1032 SEQ_PUT_FIELD_RET(s, field->arg1);
1033 SEQ_PUT_FIELD_RET(s, field->arg2);
1034 SEQ_PUT_FIELD_RET(s, field->arg3);
1036 return TRACE_TYPE_HANDLED;
1039 static struct trace_event trace_special_event = {
1040 .type = TRACE_SPECIAL,
1041 .trace = trace_special_print,
1042 .raw = trace_special_print,
1043 .hex = trace_special_hex,
1044 .binary = trace_special_bin,
1047 /* TRACE_STACK */
1049 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1050 int flags)
1052 struct stack_entry *field;
1053 struct trace_seq *s = &iter->seq;
1054 int i;
1056 trace_assign_type(field, iter->ent);
1058 if (!trace_seq_puts(s, "<stack trace>\n"))
1059 goto partial;
1060 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1061 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
1062 break;
1063 if (!trace_seq_puts(s, " => "))
1064 goto partial;
1066 if (!seq_print_ip_sym(s, field->caller[i], flags))
1067 goto partial;
1068 if (!trace_seq_puts(s, "\n"))
1069 goto partial;
1072 return TRACE_TYPE_HANDLED;
1074 partial:
1075 return TRACE_TYPE_PARTIAL_LINE;
1078 static struct trace_event trace_stack_event = {
1079 .type = TRACE_STACK,
1080 .trace = trace_stack_print,
1081 .raw = trace_special_print,
1082 .hex = trace_special_hex,
1083 .binary = trace_special_bin,
1086 /* TRACE_USER_STACK */
1087 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1088 int flags)
1090 struct userstack_entry *field;
1091 struct trace_seq *s = &iter->seq;
1093 trace_assign_type(field, iter->ent);
1095 if (!trace_seq_puts(s, "<user stack trace>\n"))
1096 goto partial;
1098 if (!seq_print_userip_objs(field, s, flags))
1099 goto partial;
1101 return TRACE_TYPE_HANDLED;
1103 partial:
1104 return TRACE_TYPE_PARTIAL_LINE;
1107 static struct trace_event trace_user_stack_event = {
1108 .type = TRACE_USER_STACK,
1109 .trace = trace_user_stack_print,
1110 .raw = trace_special_print,
1111 .hex = trace_special_hex,
1112 .binary = trace_special_bin,
1115 /* TRACE_BPRINT */
1116 static enum print_line_t
1117 trace_bprint_print(struct trace_iterator *iter, int flags)
1119 struct trace_entry *entry = iter->ent;
1120 struct trace_seq *s = &iter->seq;
1121 struct bprint_entry *field;
1123 trace_assign_type(field, entry);
1125 if (!seq_print_ip_sym(s, field->ip, flags))
1126 goto partial;
1128 if (!trace_seq_puts(s, ": "))
1129 goto partial;
1131 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1132 goto partial;
1134 return TRACE_TYPE_HANDLED;
1136 partial:
1137 return TRACE_TYPE_PARTIAL_LINE;
1141 static enum print_line_t
1142 trace_bprint_raw(struct trace_iterator *iter, int flags)
1144 struct bprint_entry *field;
1145 struct trace_seq *s = &iter->seq;
1147 trace_assign_type(field, iter->ent);
1149 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1150 goto partial;
1152 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1153 goto partial;
1155 return TRACE_TYPE_HANDLED;
1157 partial:
1158 return TRACE_TYPE_PARTIAL_LINE;
1162 static struct trace_event trace_bprint_event = {
1163 .type = TRACE_BPRINT,
1164 .trace = trace_bprint_print,
1165 .raw = trace_bprint_raw,
1168 /* TRACE_PRINT */
1169 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1170 int flags)
1172 struct print_entry *field;
1173 struct trace_seq *s = &iter->seq;
1175 trace_assign_type(field, iter->ent);
1177 if (!seq_print_ip_sym(s, field->ip, flags))
1178 goto partial;
1180 if (!trace_seq_printf(s, ": %s", field->buf))
1181 goto partial;
1183 return TRACE_TYPE_HANDLED;
1185 partial:
1186 return TRACE_TYPE_PARTIAL_LINE;
1189 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1191 struct print_entry *field;
1193 trace_assign_type(field, iter->ent);
1195 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1196 goto partial;
1198 return TRACE_TYPE_HANDLED;
1200 partial:
1201 return TRACE_TYPE_PARTIAL_LINE;
1204 static struct trace_event trace_print_event = {
1205 .type = TRACE_PRINT,
1206 .trace = trace_print_print,
1207 .raw = trace_print_raw,
1211 static struct trace_event *events[] __initdata = {
1212 &trace_fn_event,
1213 &trace_ctx_event,
1214 &trace_wake_event,
1215 &trace_special_event,
1216 &trace_stack_event,
1217 &trace_user_stack_event,
1218 &trace_bprint_event,
1219 &trace_print_event,
1220 NULL
1223 __init static int init_events(void)
1225 struct trace_event *event;
1226 int i, ret;
1228 for (i = 0; events[i]; i++) {
1229 event = events[i];
1231 ret = register_ftrace_event(event);
1232 if (!ret) {
1233 printk(KERN_WARNING "event %d failed to register\n",
1234 event->type);
1235 WARN_ON_ONCE(1);
1239 return 0;
1241 device_initcall(init_events);