2 * drivers/cpufreq/cpufreq_stats.c
4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/cpu.h>
15 #include <linux/sysfs.h>
16 #include <linux/cpufreq.h>
17 #include <linux/module.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/kobject.h>
21 #include <linux/spinlock.h>
22 #include <linux/notifier.h>
23 #include <asm/cputime.h>
25 static spinlock_t cpufreq_stats_lock
;
27 struct cpufreq_stats
{
29 unsigned int total_trans
;
30 unsigned long long last_time
;
31 unsigned int max_state
;
32 unsigned int state_num
;
33 unsigned int last_index
;
35 unsigned int *freq_table
;
36 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
37 unsigned int *trans_table
;
41 static DEFINE_PER_CPU(struct cpufreq_stats
*, cpufreq_stats_table
);
43 struct cpufreq_stats_attribute
{
44 struct attribute attr
;
45 ssize_t(*show
) (struct cpufreq_stats
*, char *);
48 static int cpufreq_stats_update(unsigned int cpu
)
50 struct cpufreq_stats
*stat
;
51 unsigned long long cur_time
;
53 cur_time
= get_jiffies_64();
54 spin_lock(&cpufreq_stats_lock
);
55 stat
= per_cpu(cpufreq_stats_table
, cpu
);
56 if (stat
->time_in_state
)
57 stat
->time_in_state
[stat
->last_index
] +=
58 cur_time
- stat
->last_time
;
59 stat
->last_time
= cur_time
;
60 spin_unlock(&cpufreq_stats_lock
);
64 static ssize_t
show_total_trans(struct cpufreq_policy
*policy
, char *buf
)
66 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, policy
->cpu
);
69 return sprintf(buf
, "%d\n",
70 per_cpu(cpufreq_stats_table
, stat
->cpu
)->total_trans
);
73 static ssize_t
show_time_in_state(struct cpufreq_policy
*policy
, char *buf
)
77 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, policy
->cpu
);
80 cpufreq_stats_update(stat
->cpu
);
81 for (i
= 0; i
< stat
->state_num
; i
++) {
82 len
+= sprintf(buf
+ len
, "%u %llu\n", stat
->freq_table
[i
],
84 cputime64_to_clock_t(stat
->time_in_state
[i
]));
89 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
90 static ssize_t
show_trans_table(struct cpufreq_policy
*policy
, char *buf
)
95 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, policy
->cpu
);
98 cpufreq_stats_update(stat
->cpu
);
99 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, " From : To\n");
100 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, " : ");
101 for (i
= 0; i
< stat
->state_num
; i
++) {
102 if (len
>= PAGE_SIZE
)
104 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "%9u ",
105 stat
->freq_table
[i
]);
107 if (len
>= PAGE_SIZE
)
110 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "\n");
112 for (i
= 0; i
< stat
->state_num
; i
++) {
113 if (len
>= PAGE_SIZE
)
116 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "%9u: ",
117 stat
->freq_table
[i
]);
119 for (j
= 0; j
< stat
->state_num
; j
++) {
120 if (len
>= PAGE_SIZE
)
122 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "%9u ",
123 stat
->trans_table
[i
*stat
->max_state
+j
]);
125 if (len
>= PAGE_SIZE
)
127 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "\n");
129 if (len
>= PAGE_SIZE
)
133 cpufreq_freq_attr_ro(trans_table
);
136 cpufreq_freq_attr_ro(total_trans
);
137 cpufreq_freq_attr_ro(time_in_state
);
139 static struct attribute
*default_attrs
[] = {
142 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
147 static struct attribute_group stats_attr_group
= {
148 .attrs
= default_attrs
,
152 static int freq_table_get_index(struct cpufreq_stats
*stat
, unsigned int freq
)
155 for (index
= 0; index
< stat
->max_state
; index
++)
156 if (stat
->freq_table
[index
] == freq
)
161 /* should be called late in the CPU removal sequence so that the stats
162 * memory is still available in case someone tries to use it.
164 static void cpufreq_stats_free_table(unsigned int cpu
)
166 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, cpu
);
169 pr_debug("%s: Free stat table\n", __func__
);
170 kfree(stat
->time_in_state
);
172 per_cpu(cpufreq_stats_table
, cpu
) = NULL
;
176 /* must be called early in the CPU removal sequence (before
177 * cpufreq_remove_dev) so that policy is still valid.
179 static void cpufreq_stats_free_sysfs(unsigned int cpu
)
181 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
186 if (!cpufreq_frequency_get_table(cpu
))
189 if (!policy_is_shared(policy
)) {
190 pr_debug("%s: Free sysfs stat\n", __func__
);
191 sysfs_remove_group(&policy
->kobj
, &stats_attr_group
);
195 cpufreq_cpu_put(policy
);
198 static int cpufreq_stats_create_table(struct cpufreq_policy
*policy
,
199 struct cpufreq_frequency_table
*table
)
201 unsigned int i
, j
, count
= 0, ret
= 0;
202 struct cpufreq_stats
*stat
;
203 struct cpufreq_policy
*data
;
204 unsigned int alloc_size
;
205 unsigned int cpu
= policy
->cpu
;
206 if (per_cpu(cpufreq_stats_table
, cpu
))
208 stat
= kzalloc(sizeof(struct cpufreq_stats
), GFP_KERNEL
);
212 data
= cpufreq_cpu_get(cpu
);
218 ret
= sysfs_create_group(&data
->kobj
, &stats_attr_group
);
223 per_cpu(cpufreq_stats_table
, cpu
) = stat
;
225 for (i
= 0; table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
226 unsigned int freq
= table
[i
].frequency
;
227 if (freq
== CPUFREQ_ENTRY_INVALID
)
232 alloc_size
= count
* sizeof(int) + count
* sizeof(u64
);
234 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
235 alloc_size
+= count
* count
* sizeof(int);
237 stat
->max_state
= count
;
238 stat
->time_in_state
= kzalloc(alloc_size
, GFP_KERNEL
);
239 if (!stat
->time_in_state
) {
243 stat
->freq_table
= (unsigned int *)(stat
->time_in_state
+ count
);
245 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
246 stat
->trans_table
= stat
->freq_table
+ count
;
249 for (i
= 0; table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
250 unsigned int freq
= table
[i
].frequency
;
251 if (freq
== CPUFREQ_ENTRY_INVALID
)
253 if (freq_table_get_index(stat
, freq
) == -1)
254 stat
->freq_table
[j
++] = freq
;
257 spin_lock(&cpufreq_stats_lock
);
258 stat
->last_time
= get_jiffies_64();
259 stat
->last_index
= freq_table_get_index(stat
, policy
->cur
);
260 spin_unlock(&cpufreq_stats_lock
);
261 cpufreq_cpu_put(data
);
264 cpufreq_cpu_put(data
);
267 per_cpu(cpufreq_stats_table
, cpu
) = NULL
;
271 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy
*policy
)
273 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
,
276 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
277 policy
->cpu
, policy
->last_cpu
);
278 per_cpu(cpufreq_stats_table
, policy
->cpu
) = per_cpu(cpufreq_stats_table
,
280 per_cpu(cpufreq_stats_table
, policy
->last_cpu
) = NULL
;
281 stat
->cpu
= policy
->cpu
;
284 static int cpufreq_stat_notifier_policy(struct notifier_block
*nb
,
285 unsigned long val
, void *data
)
288 struct cpufreq_policy
*policy
= data
;
289 struct cpufreq_frequency_table
*table
;
290 unsigned int cpu
= policy
->cpu
;
292 if (val
== CPUFREQ_UPDATE_POLICY_CPU
) {
293 cpufreq_stats_update_policy_cpu(policy
);
297 if (val
!= CPUFREQ_NOTIFY
)
299 table
= cpufreq_frequency_get_table(cpu
);
302 ret
= cpufreq_stats_create_table(policy
, table
);
308 static int cpufreq_stat_notifier_trans(struct notifier_block
*nb
,
309 unsigned long val
, void *data
)
311 struct cpufreq_freqs
*freq
= data
;
312 struct cpufreq_stats
*stat
;
313 int old_index
, new_index
;
315 if (val
!= CPUFREQ_POSTCHANGE
)
318 stat
= per_cpu(cpufreq_stats_table
, freq
->cpu
);
322 old_index
= stat
->last_index
;
323 new_index
= freq_table_get_index(stat
, freq
->new);
325 /* We can't do stat->time_in_state[-1]= .. */
326 if (old_index
== -1 || new_index
== -1)
329 cpufreq_stats_update(freq
->cpu
);
331 if (old_index
== new_index
)
334 spin_lock(&cpufreq_stats_lock
);
335 stat
->last_index
= new_index
;
336 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
337 stat
->trans_table
[old_index
* stat
->max_state
+ new_index
]++;
340 spin_unlock(&cpufreq_stats_lock
);
344 static int __cpuinit
cpufreq_stat_cpu_callback(struct notifier_block
*nfb
,
345 unsigned long action
,
348 unsigned int cpu
= (unsigned long)hcpu
;
352 case CPU_ONLINE_FROZEN
:
353 cpufreq_update_policy(cpu
);
355 case CPU_DOWN_PREPARE
:
356 cpufreq_stats_free_sysfs(cpu
);
359 cpufreq_stats_free_table(cpu
);
361 case CPU_UP_CANCELED_FROZEN
:
362 cpufreq_stats_free_sysfs(cpu
);
363 cpufreq_stats_free_table(cpu
);
369 /* priority=1 so this will get called before cpufreq_remove_dev */
370 static struct notifier_block cpufreq_stat_cpu_notifier __refdata
= {
371 .notifier_call
= cpufreq_stat_cpu_callback
,
375 static struct notifier_block notifier_policy_block
= {
376 .notifier_call
= cpufreq_stat_notifier_policy
379 static struct notifier_block notifier_trans_block
= {
380 .notifier_call
= cpufreq_stat_notifier_trans
383 static int __init
cpufreq_stats_init(void)
388 spin_lock_init(&cpufreq_stats_lock
);
389 ret
= cpufreq_register_notifier(¬ifier_policy_block
,
390 CPUFREQ_POLICY_NOTIFIER
);
394 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier
);
395 for_each_online_cpu(cpu
)
396 cpufreq_update_policy(cpu
);
398 ret
= cpufreq_register_notifier(¬ifier_trans_block
,
399 CPUFREQ_TRANSITION_NOTIFIER
);
401 cpufreq_unregister_notifier(¬ifier_policy_block
,
402 CPUFREQ_POLICY_NOTIFIER
);
403 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier
);
404 for_each_online_cpu(cpu
)
405 cpufreq_stats_free_table(cpu
);
411 static void __exit
cpufreq_stats_exit(void)
415 cpufreq_unregister_notifier(¬ifier_policy_block
,
416 CPUFREQ_POLICY_NOTIFIER
);
417 cpufreq_unregister_notifier(¬ifier_trans_block
,
418 CPUFREQ_TRANSITION_NOTIFIER
);
419 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier
);
420 for_each_online_cpu(cpu
) {
421 cpufreq_stats_free_table(cpu
);
422 cpufreq_stats_free_sysfs(cpu
);
426 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
427 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
428 "through sysfs filesystem");
429 MODULE_LICENSE("GPL");
431 module_init(cpufreq_stats_init
);
432 module_exit(cpufreq_stats_exit
);