Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
[linux-2.6.git] / drivers / cpufreq / exynos5440-cpufreq.c
blob0c74018eda47e1fd2d09ef0db1baaf8595a70547
1 /*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
7 * EXYNOS5440 - CPU frequency scaling support
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/opp.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 /* Register definitions */
28 #define XMU_DVFS_CTRL 0x0060
29 #define XMU_PMU_P0_7 0x0064
30 #define XMU_C0_3_PSTATE 0x0090
31 #define XMU_P_LIMIT 0x00a0
32 #define XMU_P_STATUS 0x00a4
33 #define XMU_PMUEVTEN 0x00d0
34 #define XMU_PMUIRQEN 0x00d4
35 #define XMU_PMUIRQ 0x00d8
37 /* PMU mask and shift definations */
38 #define P_VALUE_MASK 0x7
40 #define XMU_DVFS_CTRL_EN_SHIFT 0
42 #define P0_7_CPUCLKDEV_SHIFT 21
43 #define P0_7_CPUCLKDEV_MASK 0x7
44 #define P0_7_ATBCLKDEV_SHIFT 18
45 #define P0_7_ATBCLKDEV_MASK 0x7
46 #define P0_7_CSCLKDEV_SHIFT 15
47 #define P0_7_CSCLKDEV_MASK 0x7
48 #define P0_7_CPUEMA_SHIFT 28
49 #define P0_7_CPUEMA_MASK 0xf
50 #define P0_7_L2EMA_SHIFT 24
51 #define P0_7_L2EMA_MASK 0xf
52 #define P0_7_VDD_SHIFT 8
53 #define P0_7_VDD_MASK 0x7f
54 #define P0_7_FREQ_SHIFT 0
55 #define P0_7_FREQ_MASK 0xff
57 #define C0_3_PSTATE_VALID_SHIFT 8
58 #define C0_3_PSTATE_CURR_SHIFT 4
59 #define C0_3_PSTATE_NEW_SHIFT 0
61 #define PSTATE_CHANGED_EVTEN_SHIFT 0
63 #define PSTATE_CHANGED_IRQEN_SHIFT 0
65 #define PSTATE_CHANGED_SHIFT 0
67 /* some constant values for clock divider calculation */
68 #define CPU_DIV_FREQ_MAX 500
69 #define CPU_DBG_FREQ_MAX 375
70 #define CPU_ATB_FREQ_MAX 500
72 #define PMIC_LOW_VOLT 0x30
73 #define PMIC_HIGH_VOLT 0x28
75 #define CPUEMA_HIGH 0x2
76 #define CPUEMA_MID 0x4
77 #define CPUEMA_LOW 0x7
79 #define L2EMA_HIGH 0x1
80 #define L2EMA_MID 0x3
81 #define L2EMA_LOW 0x4
83 #define DIV_TAB_MAX 2
84 /* frequency unit is 20MHZ */
85 #define FREQ_UNIT 20
86 #define MAX_VOLTAGE 1550000 /* In microvolt */
87 #define VOLTAGE_STEP 12500 /* In microvolt */
89 #define CPUFREQ_NAME "exynos5440_dvfs"
90 #define DEF_TRANS_LATENCY 100000
92 enum cpufreq_level_index {
93 L0, L1, L2, L3, L4,
94 L5, L6, L7, L8, L9,
96 #define CPUFREQ_LEVEL_END (L7 + 1)
98 struct exynos_dvfs_data {
99 void __iomem *base;
100 struct resource *mem;
101 int irq;
102 struct clk *cpu_clk;
103 unsigned int cur_frequency;
104 unsigned int latency;
105 struct cpufreq_frequency_table *freq_table;
106 unsigned int freq_count;
107 struct device *dev;
108 bool dvfs_enabled;
109 struct work_struct irq_work;
112 static struct exynos_dvfs_data *dvfs_info;
113 static DEFINE_MUTEX(cpufreq_lock);
114 static struct cpufreq_freqs freqs;
116 static int init_div_table(void)
118 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
119 unsigned int tmp, clk_div, ema_div, freq, volt_id;
120 int i = 0;
121 struct opp *opp;
123 rcu_read_lock();
124 for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
126 opp = opp_find_freq_exact(dvfs_info->dev,
127 freq_tbl[i].frequency * 1000, true);
128 if (IS_ERR(opp)) {
129 rcu_read_unlock();
130 dev_err(dvfs_info->dev,
131 "failed to find valid OPP for %u KHZ\n",
132 freq_tbl[i].frequency);
133 return PTR_ERR(opp);
136 freq = freq_tbl[i].frequency / 1000; /* In MHZ */
137 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
138 << P0_7_CPUCLKDEV_SHIFT;
139 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
140 << P0_7_ATBCLKDEV_SHIFT;
141 clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
142 << P0_7_CSCLKDEV_SHIFT;
144 /* Calculate EMA */
145 volt_id = opp_get_voltage(opp);
146 volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
147 if (volt_id < PMIC_HIGH_VOLT) {
148 ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
149 (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
150 } else if (volt_id > PMIC_LOW_VOLT) {
151 ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
152 (L2EMA_LOW << P0_7_L2EMA_SHIFT);
153 } else {
154 ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
155 (L2EMA_MID << P0_7_L2EMA_SHIFT);
158 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
159 | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
161 __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
164 rcu_read_unlock();
165 return 0;
168 static void exynos_enable_dvfs(void)
170 unsigned int tmp, i, cpu;
171 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
172 /* Disable DVFS */
173 __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
175 /* Enable PSTATE Change Event */
176 tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
177 tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
178 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
180 /* Enable PSTATE Change IRQ */
181 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
182 tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
183 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
185 /* Set initial performance index */
186 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
187 if (freq_table[i].frequency == dvfs_info->cur_frequency)
188 break;
190 if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
191 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
192 /* Assign the highest frequency */
193 i = 0;
194 dvfs_info->cur_frequency = freq_table[i].frequency;
197 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
198 dvfs_info->cur_frequency);
200 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
201 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
202 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
203 tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
204 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
207 /* Enable DVFS */
208 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
209 dvfs_info->base + XMU_DVFS_CTRL);
212 static int exynos_verify_speed(struct cpufreq_policy *policy)
214 return cpufreq_frequency_table_verify(policy,
215 dvfs_info->freq_table);
218 static unsigned int exynos_getspeed(unsigned int cpu)
220 return dvfs_info->cur_frequency;
223 static int exynos_target(struct cpufreq_policy *policy,
224 unsigned int target_freq,
225 unsigned int relation)
227 unsigned int index, tmp;
228 int ret = 0, i;
229 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
231 mutex_lock(&cpufreq_lock);
233 ret = cpufreq_frequency_table_target(policy, freq_table,
234 target_freq, relation, &index);
235 if (ret)
236 goto out;
238 freqs.old = dvfs_info->cur_frequency;
239 freqs.new = freq_table[index].frequency;
241 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
243 /* Set the target frequency in all C0_3_PSTATE register */
244 for_each_cpu(i, policy->cpus) {
245 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
246 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
247 tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
249 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
251 out:
252 mutex_unlock(&cpufreq_lock);
253 return ret;
256 static void exynos_cpufreq_work(struct work_struct *work)
258 unsigned int cur_pstate, index;
259 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
260 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
262 /* Ensure we can access cpufreq structures */
263 if (unlikely(dvfs_info->dvfs_enabled == false))
264 goto skip_work;
266 mutex_lock(&cpufreq_lock);
267 freqs.old = dvfs_info->cur_frequency;
269 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
270 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
271 index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
272 else
273 index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
275 if (likely(index < dvfs_info->freq_count)) {
276 freqs.new = freq_table[index].frequency;
277 dvfs_info->cur_frequency = freqs.new;
278 } else {
279 dev_crit(dvfs_info->dev, "New frequency out of range\n");
280 freqs.new = dvfs_info->cur_frequency;
282 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
284 cpufreq_cpu_put(policy);
285 mutex_unlock(&cpufreq_lock);
286 skip_work:
287 enable_irq(dvfs_info->irq);
290 static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
292 unsigned int tmp;
294 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
295 if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
296 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
297 disable_irq_nosync(irq);
298 schedule_work(&dvfs_info->irq_work);
300 return IRQ_HANDLED;
303 static void exynos_sort_descend_freq_table(void)
305 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
306 int i = 0, index;
307 unsigned int tmp_freq;
309 * Exynos5440 clock controller state logic expects the cpufreq table to
310 * be in descending order. But the OPP library constructs the table in
311 * ascending order. So to make the table descending we just need to
312 * swap the i element with the N - i element.
314 for (i = 0; i < dvfs_info->freq_count / 2; i++) {
315 index = dvfs_info->freq_count - i - 1;
316 tmp_freq = freq_tbl[i].frequency;
317 freq_tbl[i].frequency = freq_tbl[index].frequency;
318 freq_tbl[index].frequency = tmp_freq;
322 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
324 int ret;
326 ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
327 if (ret) {
328 dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
329 return ret;
332 policy->cur = dvfs_info->cur_frequency;
333 policy->cpuinfo.transition_latency = dvfs_info->latency;
334 cpumask_setall(policy->cpus);
336 cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
338 return 0;
341 static struct cpufreq_driver exynos_driver = {
342 .flags = CPUFREQ_STICKY,
343 .verify = exynos_verify_speed,
344 .target = exynos_target,
345 .get = exynos_getspeed,
346 .init = exynos_cpufreq_cpu_init,
347 .name = CPUFREQ_NAME,
350 static const struct of_device_id exynos_cpufreq_match[] = {
352 .compatible = "samsung,exynos5440-cpufreq",
356 MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
358 static int exynos_cpufreq_probe(struct platform_device *pdev)
360 int ret = -EINVAL;
361 struct device_node *np;
362 struct resource res;
364 np = pdev->dev.of_node;
365 if (!np)
366 return -ENODEV;
368 dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
369 if (!dvfs_info) {
370 ret = -ENOMEM;
371 goto err_put_node;
374 dvfs_info->dev = &pdev->dev;
376 ret = of_address_to_resource(np, 0, &res);
377 if (ret)
378 goto err_put_node;
380 dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
381 if (IS_ERR(dvfs_info->base)) {
382 ret = PTR_ERR(dvfs_info->base);
383 goto err_put_node;
386 dvfs_info->irq = irq_of_parse_and_map(np, 0);
387 if (!dvfs_info->irq) {
388 dev_err(dvfs_info->dev, "No cpufreq irq found\n");
389 ret = -ENODEV;
390 goto err_put_node;
393 ret = of_init_opp_table(dvfs_info->dev);
394 if (ret) {
395 dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
396 goto err_put_node;
399 ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
400 if (ret) {
401 dev_err(dvfs_info->dev,
402 "failed to init cpufreq table: %d\n", ret);
403 goto err_put_node;
405 dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
406 exynos_sort_descend_freq_table();
408 if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
409 dvfs_info->latency = DEF_TRANS_LATENCY;
411 dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
412 if (IS_ERR(dvfs_info->cpu_clk)) {
413 dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
414 ret = PTR_ERR(dvfs_info->cpu_clk);
415 goto err_free_table;
418 dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
419 if (!dvfs_info->cur_frequency) {
420 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
421 ret = -EINVAL;
422 goto err_free_table;
424 dvfs_info->cur_frequency /= 1000;
426 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
427 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
428 exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
429 CPUFREQ_NAME, dvfs_info);
430 if (ret) {
431 dev_err(dvfs_info->dev, "Failed to register IRQ\n");
432 goto err_free_table;
435 ret = init_div_table();
436 if (ret) {
437 dev_err(dvfs_info->dev, "Failed to initialise div table\n");
438 goto err_free_table;
441 exynos_enable_dvfs();
442 ret = cpufreq_register_driver(&exynos_driver);
443 if (ret) {
444 dev_err(dvfs_info->dev,
445 "%s: failed to register cpufreq driver\n", __func__);
446 goto err_free_table;
449 of_node_put(np);
450 dvfs_info->dvfs_enabled = true;
451 return 0;
453 err_free_table:
454 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
455 err_put_node:
456 of_node_put(np);
457 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
458 return ret;
461 static int exynos_cpufreq_remove(struct platform_device *pdev)
463 cpufreq_unregister_driver(&exynos_driver);
464 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
465 return 0;
468 static struct platform_driver exynos_cpufreq_platdrv = {
469 .driver = {
470 .name = "exynos5440-cpufreq",
471 .owner = THIS_MODULE,
472 .of_match_table = exynos_cpufreq_match,
474 .probe = exynos_cpufreq_probe,
475 .remove = exynos_cpufreq_remove,
477 module_platform_driver(exynos_cpufreq_platdrv);
479 MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
480 MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
481 MODULE_LICENSE("GPL");