[SPARC64]: Fix build with CONFIG_NET disabled.
[linux-2.6/kmemtrace.git] / kernel / cpu_acct.c
blob731e47e7f164dd998a10beb6cd5333c388d90335
1 /*
2 * kernel/cpu_acct.c - CPU accounting cgroup subsystem
4 * Copyright (C) Google Inc, 2006
6 * Developed by Paul Menage (menage@google.com) and Balbir Singh
7 * (balbir@in.ibm.com)
9 */
12 * Example cgroup subsystem for reporting total CPU usage of tasks in a
13 * cgroup, along with percentage load over a time interval
16 #include <linux/module.h>
17 #include <linux/cgroup.h>
18 #include <linux/fs.h>
19 #include <linux/rcupdate.h>
21 #include <asm/div64.h>
23 struct cpuacct {
24 struct cgroup_subsys_state css;
25 spinlock_t lock;
26 /* total time used by this class */
27 cputime64_t time;
29 /* time when next load calculation occurs */
30 u64 next_interval_check;
32 /* time used in current period */
33 cputime64_t current_interval_time;
35 /* time used in last period */
36 cputime64_t last_interval_time;
39 struct cgroup_subsys cpuacct_subsys;
41 static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
43 return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
44 struct cpuacct, css);
47 static inline struct cpuacct *task_ca(struct task_struct *task)
49 return container_of(task_subsys_state(task, cpuacct_subsys_id),
50 struct cpuacct, css);
53 #define INTERVAL (HZ * 10)
55 static inline u64 next_interval_boundary(u64 now)
57 /* calculate the next interval boundary beyond the
58 * current time */
59 do_div(now, INTERVAL);
60 return (now + 1) * INTERVAL;
63 static struct cgroup_subsys_state *cpuacct_create(
64 struct cgroup_subsys *ss, struct cgroup *cont)
66 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
68 if (!ca)
69 return ERR_PTR(-ENOMEM);
70 spin_lock_init(&ca->lock);
71 ca->next_interval_check = next_interval_boundary(get_jiffies_64());
72 return &ca->css;
75 static void cpuacct_destroy(struct cgroup_subsys *ss,
76 struct cgroup *cont)
78 kfree(cgroup_ca(cont));
81 /* Lazily update the load calculation if necessary. Called with ca locked */
82 static void cpuusage_update(struct cpuacct *ca)
84 u64 now = get_jiffies_64();
86 /* If we're not due for an update, return */
87 if (ca->next_interval_check > now)
88 return;
90 if (ca->next_interval_check <= (now - INTERVAL)) {
91 /* If it's been more than an interval since the last
92 * check, then catch up - the last interval must have
93 * been zero load */
94 ca->last_interval_time = 0;
95 ca->next_interval_check = next_interval_boundary(now);
96 } else {
97 /* If a steal takes the last interval time negative,
98 * then we just ignore it */
99 if ((s64)ca->current_interval_time > 0)
100 ca->last_interval_time = ca->current_interval_time;
101 else
102 ca->last_interval_time = 0;
103 ca->next_interval_check += INTERVAL;
105 ca->current_interval_time = 0;
108 static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
110 struct cpuacct *ca = cgroup_ca(cont);
111 u64 time;
113 spin_lock_irq(&ca->lock);
114 cpuusage_update(ca);
115 time = cputime64_to_jiffies64(ca->time);
116 spin_unlock_irq(&ca->lock);
118 /* Convert 64-bit jiffies to seconds */
119 time *= 1000;
120 do_div(time, HZ);
121 return time;
124 static u64 load_read(struct cgroup *cont, struct cftype *cft)
126 struct cpuacct *ca = cgroup_ca(cont);
127 u64 time;
129 /* Find the time used in the previous interval */
130 spin_lock_irq(&ca->lock);
131 cpuusage_update(ca);
132 time = cputime64_to_jiffies64(ca->last_interval_time);
133 spin_unlock_irq(&ca->lock);
135 /* Convert time to a percentage, to give the load in the
136 * previous period */
137 time *= 100;
138 do_div(time, INTERVAL);
140 return time;
143 static struct cftype files[] = {
145 .name = "usage",
146 .read_uint = cpuusage_read,
149 .name = "load",
150 .read_uint = load_read,
154 static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
156 return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
159 void cpuacct_charge(struct task_struct *task, cputime_t cputime)
162 struct cpuacct *ca;
163 unsigned long flags;
165 if (!cpuacct_subsys.active)
166 return;
167 rcu_read_lock();
168 ca = task_ca(task);
169 if (ca) {
170 spin_lock_irqsave(&ca->lock, flags);
171 cpuusage_update(ca);
172 ca->time = cputime64_add(ca->time, cputime);
173 ca->current_interval_time =
174 cputime64_add(ca->current_interval_time, cputime);
175 spin_unlock_irqrestore(&ca->lock, flags);
177 rcu_read_unlock();
180 struct cgroup_subsys cpuacct_subsys = {
181 .name = "cpuacct",
182 .create = cpuacct_create,
183 .destroy = cpuacct_destroy,
184 .populate = cpuacct_populate,
185 .subsys_id = cpuacct_subsys_id,