x86,mmiotrace: Add support for tracing STOS instruction
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / slow-work-debugfs.c
blobe45c436452983fcb44a82925b1ac3574532cf405
1 /* Slow work debugging
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/slow-work.h>
14 #include <linux/fs.h>
15 #include <linux/time.h>
16 #include <linux/seq_file.h>
17 #include "slow-work.h"
19 #define ITERATOR_SHIFT (BITS_PER_LONG - 4)
20 #define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
21 #define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
23 void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
25 seq_puts(m, "Slow-work: New thread");
29 * Render the time mark field on a work item into a 5-char time with units plus
30 * a space
32 static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
34 struct timespec now, diff;
36 now = CURRENT_TIME;
37 diff = timespec_sub(now, work->mark);
39 if (diff.tv_sec < 0)
40 seq_puts(m, " -ve ");
41 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
42 seq_printf(m, "%3luns ", diff.tv_nsec);
43 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
44 seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
45 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
46 seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
47 else if (diff.tv_sec <= 1)
48 seq_puts(m, " 1s ");
49 else if (diff.tv_sec < 60)
50 seq_printf(m, "%4lus ", diff.tv_sec);
51 else if (diff.tv_sec < 60 * 60)
52 seq_printf(m, "%4lum ", diff.tv_sec / 60);
53 else if (diff.tv_sec < 60 * 60 * 24)
54 seq_printf(m, "%4luh ", diff.tv_sec / 3600);
55 else
56 seq_puts(m, "exces ");
60 * Describe a slow work item for debugfs
62 static int slow_work_runqueue_show(struct seq_file *m, void *v)
64 struct slow_work *work;
65 struct list_head *p = v;
66 unsigned long id;
68 switch ((unsigned long) v) {
69 case 1:
70 seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
71 return 0;
72 case 2:
73 seq_puts(m, "=== ===== ================ == ===== ==========\n");
74 return 0;
76 case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
77 id = (unsigned long) v - 3;
79 read_lock(&slow_work_execs_lock);
80 work = slow_work_execs[id];
81 if (work) {
82 smp_read_barrier_depends();
84 seq_printf(m, "%3lu %5d %16p %2lx ",
85 id, slow_work_pids[id], work, work->flags);
86 slow_work_print_mark(m, work);
88 if (work->ops->desc)
89 work->ops->desc(work, m);
90 seq_putc(m, '\n');
92 read_unlock(&slow_work_execs_lock);
93 return 0;
95 default:
96 work = list_entry(p, struct slow_work, link);
97 seq_printf(m, "%3s - %16p %2lx ",
98 work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
99 work, work->flags);
100 slow_work_print_mark(m, work);
102 if (work->ops->desc)
103 work->ops->desc(work, m);
104 seq_putc(m, '\n');
105 return 0;
110 * map the iterator to a work item
112 static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
114 struct list_head *p;
115 unsigned long count, id;
117 switch (*_pos >> ITERATOR_SHIFT) {
118 case 0x0:
119 if (*_pos == 0)
120 *_pos = 1;
121 if (*_pos < 3)
122 return (void *)(unsigned long) *_pos;
123 if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
124 for (id = *_pos - 3;
125 id < SLOW_WORK_THREAD_LIMIT;
126 id++, (*_pos)++)
127 if (slow_work_execs[id])
128 return (void *)(unsigned long) *_pos;
129 *_pos = 0x1UL << ITERATOR_SHIFT;
131 case 0x1:
132 count = *_pos & ITERATOR_COUNTER;
133 list_for_each(p, &slow_work_queue) {
134 if (count == 0)
135 return p;
136 count--;
138 *_pos = 0x2UL << ITERATOR_SHIFT;
140 case 0x2:
141 count = *_pos & ITERATOR_COUNTER;
142 list_for_each(p, &vslow_work_queue) {
143 if (count == 0)
144 return p;
145 count--;
147 *_pos = 0x3UL << ITERATOR_SHIFT;
149 default:
150 return NULL;
155 * set up the iterator to start reading from the first line
157 static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
159 spin_lock_irq(&slow_work_queue_lock);
160 return slow_work_runqueue_index(m, _pos);
164 * move to the next line
166 static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
168 struct list_head *p = v;
169 unsigned long selector = *_pos >> ITERATOR_SHIFT;
171 (*_pos)++;
172 switch (selector) {
173 case 0x0:
174 return slow_work_runqueue_index(m, _pos);
176 case 0x1:
177 if (*_pos >> ITERATOR_SHIFT == 0x1) {
178 p = p->next;
179 if (p != &slow_work_queue)
180 return p;
182 *_pos = 0x2UL << ITERATOR_SHIFT;
183 p = &vslow_work_queue;
185 case 0x2:
186 if (*_pos >> ITERATOR_SHIFT == 0x2) {
187 p = p->next;
188 if (p != &vslow_work_queue)
189 return p;
191 *_pos = 0x3UL << ITERATOR_SHIFT;
193 default:
194 return NULL;
199 * clean up after reading
201 static void slow_work_runqueue_stop(struct seq_file *m, void *v)
203 spin_unlock_irq(&slow_work_queue_lock);
206 static const struct seq_operations slow_work_runqueue_ops = {
207 .start = slow_work_runqueue_start,
208 .stop = slow_work_runqueue_stop,
209 .next = slow_work_runqueue_next,
210 .show = slow_work_runqueue_show,
214 * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents
216 static int slow_work_runqueue_open(struct inode *inode, struct file *file)
218 return seq_open(file, &slow_work_runqueue_ops);
221 const struct file_operations slow_work_runqueue_fops = {
222 .owner = THIS_MODULE,
223 .open = slow_work_runqueue_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = seq_release,