Merge branch 'for-3.11' of git://linux-nfs.org/~bfields/linux
[linux-2.6.git] / include / linux / kernel_stat.h
blobed5f6ed6eb772797ea1c7eb0e98e46f55dbcf027
1 #ifndef _LINUX_KERNEL_STAT_H
2 #define _LINUX_KERNEL_STAT_H
4 #include <linux/smp.h>
5 #include <linux/threads.h>
6 #include <linux/percpu.h>
7 #include <linux/cpumask.h>
8 #include <linux/interrupt.h>
9 #include <linux/sched.h>
10 #include <linux/vtime.h>
11 #include <asm/irq.h>
12 #include <asm/cputime.h>
15 * 'kernel_stat.h' contains the definitions needed for doing
16 * some kernel statistics (CPU usage, context switches ...),
17 * used by rstatd/perfmeter
20 enum cpu_usage_stat {
21 CPUTIME_USER,
22 CPUTIME_NICE,
23 CPUTIME_SYSTEM,
24 CPUTIME_SOFTIRQ,
25 CPUTIME_IRQ,
26 CPUTIME_IDLE,
27 CPUTIME_IOWAIT,
28 CPUTIME_STEAL,
29 CPUTIME_GUEST,
30 CPUTIME_GUEST_NICE,
31 NR_STATS,
34 struct kernel_cpustat {
35 u64 cpustat[NR_STATS];
38 struct kernel_stat {
39 #ifndef CONFIG_GENERIC_HARDIRQS
40 unsigned int irqs[NR_IRQS];
41 #endif
42 unsigned long irqs_sum;
43 unsigned int softirqs[NR_SOFTIRQS];
46 DECLARE_PER_CPU(struct kernel_stat, kstat);
47 DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
49 /* Must have preemption disabled for this to be meaningful. */
50 #define kstat_this_cpu (&__get_cpu_var(kstat))
51 #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
52 #define kstat_cpu(cpu) per_cpu(kstat, cpu)
53 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
55 extern unsigned long long nr_context_switches(void);
57 #ifndef CONFIG_GENERIC_HARDIRQS
59 struct irq_desc;
61 static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
62 struct irq_desc *desc)
64 __this_cpu_inc(kstat.irqs[irq]);
65 __this_cpu_inc(kstat.irqs_sum);
68 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
70 return kstat_cpu(cpu).irqs[irq];
72 #else
73 #include <linux/irq.h>
74 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
76 #define kstat_incr_irqs_this_cpu(irqno, DESC) \
77 do { \
78 __this_cpu_inc(*(DESC)->kstat_irqs); \
79 __this_cpu_inc(kstat.irqs_sum); \
80 } while (0)
82 #endif
84 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
86 __this_cpu_inc(kstat.softirqs[irq]);
89 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
91 return kstat_cpu(cpu).softirqs[irq];
95 * Number of interrupts per specific IRQ source, since bootup
97 #ifndef CONFIG_GENERIC_HARDIRQS
98 static inline unsigned int kstat_irqs(unsigned int irq)
100 unsigned int sum = 0;
101 int cpu;
103 for_each_possible_cpu(cpu)
104 sum += kstat_irqs_cpu(irq, cpu);
106 return sum;
108 #else
109 extern unsigned int kstat_irqs(unsigned int irq);
110 #endif
113 * Number of interrupts per cpu, since bootup
115 static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
117 return kstat_cpu(cpu).irqs_sum;
121 * Lock/unlock the current runqueue - to extract task statistics:
123 extern unsigned long long task_delta_exec(struct task_struct *);
125 extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
126 extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
127 extern void account_steal_time(cputime_t);
128 extern void account_idle_time(cputime_t);
130 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
131 static inline void account_process_tick(struct task_struct *tsk, int user)
133 vtime_account_user(tsk);
135 #else
136 extern void account_process_tick(struct task_struct *, int user);
137 #endif
139 extern void account_steal_ticks(unsigned long ticks);
140 extern void account_idle_ticks(unsigned long ticks);
142 #endif /* _LINUX_KERNEL_STAT_H */