2 * kernel/cpu_acct.c - CPU accounting cgroup subsystem
4 * Copyright (C) Google Inc, 2006
6 * Developed by Paul Menage (menage@google.com) and Balbir Singh
12 * Example cgroup subsystem for reporting total CPU usage of tasks in a
13 * cgroup, along with percentage load over a time interval
16 #include <linux/module.h>
17 #include <linux/cgroup.h>
19 #include <linux/rcupdate.h>
21 #include <asm/div64.h>
24 struct cgroup_subsys_state css
;
26 /* total time used by this class */
29 /* time when next load calculation occurs */
30 u64 next_interval_check
;
32 /* time used in current period */
33 cputime64_t current_interval_time
;
35 /* time used in last period */
36 cputime64_t last_interval_time
;
39 struct cgroup_subsys cpuacct_subsys
;
41 static inline struct cpuacct
*cgroup_ca(struct cgroup
*cont
)
43 return container_of(cgroup_subsys_state(cont
, cpuacct_subsys_id
),
47 static inline struct cpuacct
*task_ca(struct task_struct
*task
)
49 return container_of(task_subsys_state(task
, cpuacct_subsys_id
),
53 #define INTERVAL (HZ * 10)
55 static inline u64
next_interval_boundary(u64 now
)
57 /* calculate the next interval boundary beyond the
59 do_div(now
, INTERVAL
);
60 return (now
+ 1) * INTERVAL
;
63 static struct cgroup_subsys_state
*cpuacct_create(
64 struct cgroup_subsys
*ss
, struct cgroup
*cont
)
66 struct cpuacct
*ca
= kzalloc(sizeof(*ca
), GFP_KERNEL
);
69 return ERR_PTR(-ENOMEM
);
70 spin_lock_init(&ca
->lock
);
71 ca
->next_interval_check
= next_interval_boundary(get_jiffies_64());
75 static void cpuacct_destroy(struct cgroup_subsys
*ss
,
78 kfree(cgroup_ca(cont
));
81 /* Lazily update the load calculation if necessary. Called with ca locked */
82 static void cpuusage_update(struct cpuacct
*ca
)
84 u64 now
= get_jiffies_64();
86 /* If we're not due for an update, return */
87 if (ca
->next_interval_check
> now
)
90 if (ca
->next_interval_check
<= (now
- INTERVAL
)) {
91 /* If it's been more than an interval since the last
92 * check, then catch up - the last interval must have
94 ca
->last_interval_time
= 0;
95 ca
->next_interval_check
= next_interval_boundary(now
);
97 /* If a steal takes the last interval time negative,
98 * then we just ignore it */
99 if ((s64
)ca
->current_interval_time
> 0)
100 ca
->last_interval_time
= ca
->current_interval_time
;
102 ca
->last_interval_time
= 0;
103 ca
->next_interval_check
+= INTERVAL
;
105 ca
->current_interval_time
= 0;
108 static u64
cpuusage_read(struct cgroup
*cont
, struct cftype
*cft
)
110 struct cpuacct
*ca
= cgroup_ca(cont
);
113 spin_lock_irq(&ca
->lock
);
115 time
= cputime64_to_jiffies64(ca
->time
);
116 spin_unlock_irq(&ca
->lock
);
118 /* Convert 64-bit jiffies to seconds */
124 static u64
load_read(struct cgroup
*cont
, struct cftype
*cft
)
126 struct cpuacct
*ca
= cgroup_ca(cont
);
129 /* Find the time used in the previous interval */
130 spin_lock_irq(&ca
->lock
);
132 time
= cputime64_to_jiffies64(ca
->last_interval_time
);
133 spin_unlock_irq(&ca
->lock
);
135 /* Convert time to a percentage, to give the load in the
138 do_div(time
, INTERVAL
);
143 static struct cftype files
[] = {
146 .read_uint
= cpuusage_read
,
150 .read_uint
= load_read
,
154 static int cpuacct_populate(struct cgroup_subsys
*ss
, struct cgroup
*cont
)
156 return cgroup_add_files(cont
, ss
, files
, ARRAY_SIZE(files
));
159 void cpuacct_charge(struct task_struct
*task
, cputime_t cputime
)
165 if (!cpuacct_subsys
.active
)
170 spin_lock_irqsave(&ca
->lock
, flags
);
172 ca
->time
= cputime64_add(ca
->time
, cputime
);
173 ca
->current_interval_time
=
174 cputime64_add(ca
->current_interval_time
, cputime
);
175 spin_unlock_irqrestore(&ca
->lock
, flags
);
180 struct cgroup_subsys cpuacct_subsys
= {
182 .create
= cpuacct_create
,
183 .destroy
= cpuacct_destroy
,
184 .populate
= cpuacct_populate
,
185 .subsys_id
= cpuacct_subsys_id
,