2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
7 * EXYNOS5440 - CPU frequency scaling support
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/pm_opp.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 /* Register definitions */
28 #define XMU_DVFS_CTRL 0x0060
29 #define XMU_PMU_P0_7 0x0064
30 #define XMU_C0_3_PSTATE 0x0090
31 #define XMU_P_LIMIT 0x00a0
32 #define XMU_P_STATUS 0x00a4
33 #define XMU_PMUEVTEN 0x00d0
34 #define XMU_PMUIRQEN 0x00d4
35 #define XMU_PMUIRQ 0x00d8
37 /* PMU mask and shift definations */
38 #define P_VALUE_MASK 0x7
40 #define XMU_DVFS_CTRL_EN_SHIFT 0
42 #define P0_7_CPUCLKDEV_SHIFT 21
43 #define P0_7_CPUCLKDEV_MASK 0x7
44 #define P0_7_ATBCLKDEV_SHIFT 18
45 #define P0_7_ATBCLKDEV_MASK 0x7
46 #define P0_7_CSCLKDEV_SHIFT 15
47 #define P0_7_CSCLKDEV_MASK 0x7
48 #define P0_7_CPUEMA_SHIFT 28
49 #define P0_7_CPUEMA_MASK 0xf
50 #define P0_7_L2EMA_SHIFT 24
51 #define P0_7_L2EMA_MASK 0xf
52 #define P0_7_VDD_SHIFT 8
53 #define P0_7_VDD_MASK 0x7f
54 #define P0_7_FREQ_SHIFT 0
55 #define P0_7_FREQ_MASK 0xff
57 #define C0_3_PSTATE_VALID_SHIFT 8
58 #define C0_3_PSTATE_CURR_SHIFT 4
59 #define C0_3_PSTATE_NEW_SHIFT 0
61 #define PSTATE_CHANGED_EVTEN_SHIFT 0
63 #define PSTATE_CHANGED_IRQEN_SHIFT 0
65 #define PSTATE_CHANGED_SHIFT 0
67 /* some constant values for clock divider calculation */
68 #define CPU_DIV_FREQ_MAX 500
69 #define CPU_DBG_FREQ_MAX 375
70 #define CPU_ATB_FREQ_MAX 500
72 #define PMIC_LOW_VOLT 0x30
73 #define PMIC_HIGH_VOLT 0x28
75 #define CPUEMA_HIGH 0x2
76 #define CPUEMA_MID 0x4
77 #define CPUEMA_LOW 0x7
79 #define L2EMA_HIGH 0x1
84 /* frequency unit is 20MHZ */
86 #define MAX_VOLTAGE 1550000 /* In microvolt */
87 #define VOLTAGE_STEP 12500 /* In microvolt */
89 #define CPUFREQ_NAME "exynos5440_dvfs"
90 #define DEF_TRANS_LATENCY 100000
92 enum cpufreq_level_index
{
96 #define CPUFREQ_LEVEL_END (L7 + 1)
98 struct exynos_dvfs_data
{
100 struct resource
*mem
;
103 unsigned int cur_frequency
;
104 unsigned int latency
;
105 struct cpufreq_frequency_table
*freq_table
;
106 unsigned int freq_count
;
109 struct work_struct irq_work
;
112 static struct exynos_dvfs_data
*dvfs_info
;
113 static DEFINE_MUTEX(cpufreq_lock
);
114 static struct cpufreq_freqs freqs
;
116 static int init_div_table(void)
118 struct cpufreq_frequency_table
*freq_tbl
= dvfs_info
->freq_table
;
119 unsigned int tmp
, clk_div
, ema_div
, freq
, volt_id
;
121 struct dev_pm_opp
*opp
;
124 for (i
= 0; freq_tbl
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
126 opp
= dev_pm_opp_find_freq_exact(dvfs_info
->dev
,
127 freq_tbl
[i
].frequency
* 1000, true);
130 dev_err(dvfs_info
->dev
,
131 "failed to find valid OPP for %u KHZ\n",
132 freq_tbl
[i
].frequency
);
136 freq
= freq_tbl
[i
].frequency
/ 1000; /* In MHZ */
137 clk_div
= ((freq
/ CPU_DIV_FREQ_MAX
) & P0_7_CPUCLKDEV_MASK
)
138 << P0_7_CPUCLKDEV_SHIFT
;
139 clk_div
|= ((freq
/ CPU_ATB_FREQ_MAX
) & P0_7_ATBCLKDEV_MASK
)
140 << P0_7_ATBCLKDEV_SHIFT
;
141 clk_div
|= ((freq
/ CPU_DBG_FREQ_MAX
) & P0_7_CSCLKDEV_MASK
)
142 << P0_7_CSCLKDEV_SHIFT
;
145 volt_id
= dev_pm_opp_get_voltage(opp
);
146 volt_id
= (MAX_VOLTAGE
- volt_id
) / VOLTAGE_STEP
;
147 if (volt_id
< PMIC_HIGH_VOLT
) {
148 ema_div
= (CPUEMA_HIGH
<< P0_7_CPUEMA_SHIFT
) |
149 (L2EMA_HIGH
<< P0_7_L2EMA_SHIFT
);
150 } else if (volt_id
> PMIC_LOW_VOLT
) {
151 ema_div
= (CPUEMA_LOW
<< P0_7_CPUEMA_SHIFT
) |
152 (L2EMA_LOW
<< P0_7_L2EMA_SHIFT
);
154 ema_div
= (CPUEMA_MID
<< P0_7_CPUEMA_SHIFT
) |
155 (L2EMA_MID
<< P0_7_L2EMA_SHIFT
);
158 tmp
= (clk_div
| ema_div
| (volt_id
<< P0_7_VDD_SHIFT
)
159 | ((freq
/ FREQ_UNIT
) << P0_7_FREQ_SHIFT
));
161 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMU_P0_7
+ 4 * i
);
168 static void exynos_enable_dvfs(void)
170 unsigned int tmp
, i
, cpu
;
171 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
173 __raw_writel(0, dvfs_info
->base
+ XMU_DVFS_CTRL
);
175 /* Enable PSTATE Change Event */
176 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUEVTEN
);
177 tmp
|= (1 << PSTATE_CHANGED_EVTEN_SHIFT
);
178 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUEVTEN
);
180 /* Enable PSTATE Change IRQ */
181 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQEN
);
182 tmp
|= (1 << PSTATE_CHANGED_IRQEN_SHIFT
);
183 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQEN
);
185 /* Set initial performance index */
186 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++)
187 if (freq_table
[i
].frequency
== dvfs_info
->cur_frequency
)
190 if (freq_table
[i
].frequency
== CPUFREQ_TABLE_END
) {
191 dev_crit(dvfs_info
->dev
, "Boot up frequency not supported\n");
192 /* Assign the highest frequency */
194 dvfs_info
->cur_frequency
= freq_table
[i
].frequency
;
197 dev_info(dvfs_info
->dev
, "Setting dvfs initial frequency = %uKHZ",
198 dvfs_info
->cur_frequency
);
200 for (cpu
= 0; cpu
< CONFIG_NR_CPUS
; cpu
++) {
201 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
202 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
203 tmp
|= (i
<< C0_3_PSTATE_NEW_SHIFT
);
204 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
208 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT
,
209 dvfs_info
->base
+ XMU_DVFS_CTRL
);
212 static unsigned int exynos_getspeed(unsigned int cpu
)
214 return dvfs_info
->cur_frequency
;
217 static int exynos_target(struct cpufreq_policy
*policy
, unsigned int index
)
221 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
223 mutex_lock(&cpufreq_lock
);
225 freqs
.old
= dvfs_info
->cur_frequency
;
226 freqs
.new = freq_table
[index
].frequency
;
228 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_PRECHANGE
);
230 /* Set the target frequency in all C0_3_PSTATE register */
231 for_each_cpu(i
, policy
->cpus
) {
232 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
233 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
234 tmp
|= (index
<< C0_3_PSTATE_NEW_SHIFT
);
236 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
238 mutex_unlock(&cpufreq_lock
);
242 static void exynos_cpufreq_work(struct work_struct
*work
)
244 unsigned int cur_pstate
, index
;
245 struct cpufreq_policy
*policy
= cpufreq_cpu_get(0); /* boot CPU */
246 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
248 /* Ensure we can access cpufreq structures */
249 if (unlikely(dvfs_info
->dvfs_enabled
== false))
252 mutex_lock(&cpufreq_lock
);
253 freqs
.old
= dvfs_info
->cur_frequency
;
255 cur_pstate
= __raw_readl(dvfs_info
->base
+ XMU_P_STATUS
);
256 if (cur_pstate
>> C0_3_PSTATE_VALID_SHIFT
& 0x1)
257 index
= (cur_pstate
>> C0_3_PSTATE_CURR_SHIFT
) & P_VALUE_MASK
;
259 index
= (cur_pstate
>> C0_3_PSTATE_NEW_SHIFT
) & P_VALUE_MASK
;
261 if (likely(index
< dvfs_info
->freq_count
)) {
262 freqs
.new = freq_table
[index
].frequency
;
263 dvfs_info
->cur_frequency
= freqs
.new;
265 dev_crit(dvfs_info
->dev
, "New frequency out of range\n");
266 freqs
.new = dvfs_info
->cur_frequency
;
268 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_POSTCHANGE
);
270 cpufreq_cpu_put(policy
);
271 mutex_unlock(&cpufreq_lock
);
273 enable_irq(dvfs_info
->irq
);
276 static irqreturn_t
exynos_cpufreq_irq(int irq
, void *id
)
280 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQ
);
281 if (tmp
>> PSTATE_CHANGED_SHIFT
& 0x1) {
282 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQ
);
283 disable_irq_nosync(irq
);
284 schedule_work(&dvfs_info
->irq_work
);
289 static void exynos_sort_descend_freq_table(void)
291 struct cpufreq_frequency_table
*freq_tbl
= dvfs_info
->freq_table
;
293 unsigned int tmp_freq
;
295 * Exynos5440 clock controller state logic expects the cpufreq table to
296 * be in descending order. But the OPP library constructs the table in
297 * ascending order. So to make the table descending we just need to
298 * swap the i element with the N - i element.
300 for (i
= 0; i
< dvfs_info
->freq_count
/ 2; i
++) {
301 index
= dvfs_info
->freq_count
- i
- 1;
302 tmp_freq
= freq_tbl
[i
].frequency
;
303 freq_tbl
[i
].frequency
= freq_tbl
[index
].frequency
;
304 freq_tbl
[index
].frequency
= tmp_freq
;
308 static int exynos_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
310 return cpufreq_generic_init(policy
, dvfs_info
->freq_table
,
314 static struct cpufreq_driver exynos_driver
= {
315 .flags
= CPUFREQ_STICKY
| CPUFREQ_ASYNC_NOTIFICATION
,
316 .verify
= cpufreq_generic_frequency_table_verify
,
317 .target_index
= exynos_target
,
318 .get
= exynos_getspeed
,
319 .init
= exynos_cpufreq_cpu_init
,
320 .exit
= cpufreq_generic_exit
,
321 .name
= CPUFREQ_NAME
,
322 .attr
= cpufreq_generic_attr
,
325 static const struct of_device_id exynos_cpufreq_match
[] = {
327 .compatible
= "samsung,exynos5440-cpufreq",
331 MODULE_DEVICE_TABLE(of
, exynos_cpufreq_match
);
333 static int exynos_cpufreq_probe(struct platform_device
*pdev
)
336 struct device_node
*np
;
339 np
= pdev
->dev
.of_node
;
343 dvfs_info
= devm_kzalloc(&pdev
->dev
, sizeof(*dvfs_info
), GFP_KERNEL
);
349 dvfs_info
->dev
= &pdev
->dev
;
351 ret
= of_address_to_resource(np
, 0, &res
);
355 dvfs_info
->base
= devm_ioremap_resource(dvfs_info
->dev
, &res
);
356 if (IS_ERR(dvfs_info
->base
)) {
357 ret
= PTR_ERR(dvfs_info
->base
);
361 dvfs_info
->irq
= irq_of_parse_and_map(np
, 0);
362 if (!dvfs_info
->irq
) {
363 dev_err(dvfs_info
->dev
, "No cpufreq irq found\n");
368 ret
= of_init_opp_table(dvfs_info
->dev
);
370 dev_err(dvfs_info
->dev
, "failed to init OPP table: %d\n", ret
);
374 ret
= dev_pm_opp_init_cpufreq_table(dvfs_info
->dev
,
375 &dvfs_info
->freq_table
);
377 dev_err(dvfs_info
->dev
,
378 "failed to init cpufreq table: %d\n", ret
);
381 dvfs_info
->freq_count
= dev_pm_opp_get_opp_count(dvfs_info
->dev
);
382 exynos_sort_descend_freq_table();
384 if (of_property_read_u32(np
, "clock-latency", &dvfs_info
->latency
))
385 dvfs_info
->latency
= DEF_TRANS_LATENCY
;
387 dvfs_info
->cpu_clk
= devm_clk_get(dvfs_info
->dev
, "armclk");
388 if (IS_ERR(dvfs_info
->cpu_clk
)) {
389 dev_err(dvfs_info
->dev
, "Failed to get cpu clock\n");
390 ret
= PTR_ERR(dvfs_info
->cpu_clk
);
394 dvfs_info
->cur_frequency
= clk_get_rate(dvfs_info
->cpu_clk
);
395 if (!dvfs_info
->cur_frequency
) {
396 dev_err(dvfs_info
->dev
, "Failed to get clock rate\n");
400 dvfs_info
->cur_frequency
/= 1000;
402 INIT_WORK(&dvfs_info
->irq_work
, exynos_cpufreq_work
);
403 ret
= devm_request_irq(dvfs_info
->dev
, dvfs_info
->irq
,
404 exynos_cpufreq_irq
, IRQF_TRIGGER_NONE
,
405 CPUFREQ_NAME
, dvfs_info
);
407 dev_err(dvfs_info
->dev
, "Failed to register IRQ\n");
411 ret
= init_div_table();
413 dev_err(dvfs_info
->dev
, "Failed to initialise div table\n");
417 exynos_enable_dvfs();
418 ret
= cpufreq_register_driver(&exynos_driver
);
420 dev_err(dvfs_info
->dev
,
421 "%s: failed to register cpufreq driver\n", __func__
);
426 dvfs_info
->dvfs_enabled
= true;
430 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
433 dev_err(&pdev
->dev
, "%s: failed initialization\n", __func__
);
437 static int exynos_cpufreq_remove(struct platform_device
*pdev
)
439 cpufreq_unregister_driver(&exynos_driver
);
440 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
444 static struct platform_driver exynos_cpufreq_platdrv
= {
446 .name
= "exynos5440-cpufreq",
447 .owner
= THIS_MODULE
,
448 .of_match_table
= exynos_cpufreq_match
,
450 .probe
= exynos_cpufreq_probe
,
451 .remove
= exynos_cpufreq_remove
,
453 module_platform_driver(exynos_cpufreq_platdrv
);
455 MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
456 MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
457 MODULE_LICENSE("GPL");