2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * EXYNOS4 - CPU frequency scaling support
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/err.h>
15 #include <linux/clk.h>
17 #include <linux/slab.h>
18 #include <linux/regulator/consumer.h>
19 #include <linux/cpufreq.h>
20 #include <linux/notifier.h>
21 #include <linux/suspend.h>
24 #include <mach/regs-clock.h>
25 #include <mach/regs-mem.h>
27 #include <plat/clock.h>
30 static struct clk
*cpu_clk
;
31 static struct clk
*moutcore
;
32 static struct clk
*mout_mpll
;
33 static struct clk
*mout_apll
;
35 static struct regulator
*arm_regulator
;
36 static struct regulator
*int_regulator
;
38 static struct cpufreq_freqs freqs
;
39 static unsigned int memtype
;
41 static unsigned int locking_frequency
;
42 static bool frequency_locked
;
43 static DEFINE_MUTEX(cpufreq_lock
);
45 enum exynos4_memory_type
{
51 enum cpufreq_level_index
{
52 L0
, L1
, L2
, L3
, CPUFREQ_LEVEL_END
,
55 static struct cpufreq_frequency_table exynos4_freq_table
[] = {
60 {0, CPUFREQ_TABLE_END
},
63 static unsigned int clkdiv_cpu0
[CPUFREQ_LEVEL_END
][7] = {
65 * Clock divider value for following
66 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
67 * DIVATB, DIVPCLK_DBG, DIVAPLL }
71 { 0, 3, 7, 3, 3, 0, 1 },
74 { 0, 3, 7, 3, 3, 0, 1 },
77 { 0, 1, 3, 1, 3, 0, 1 },
80 { 0, 0, 1, 0, 3, 1, 1 },
83 static unsigned int clkdiv_cpu1
[CPUFREQ_LEVEL_END
][2] = {
85 * Clock divider value for following
102 static unsigned int clkdiv_dmc0
[CPUFREQ_LEVEL_END
][8] = {
104 * Clock divider value for following
105 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
106 * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
110 { 3, 1, 1, 1, 1, 1, 3, 1 },
113 { 3, 1, 1, 1, 1, 1, 3, 1 },
115 /* DMC L2: 266.7MHz */
116 { 7, 1, 1, 2, 1, 1, 3, 1 },
119 { 7, 1, 1, 3, 1, 1, 3, 1 },
122 static unsigned int clkdiv_top
[CPUFREQ_LEVEL_END
][5] = {
124 * Clock divider value for following
125 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
128 /* ACLK200 L0: 200MHz */
131 /* ACLK200 L1: 200MHz */
134 /* ACLK200 L2: 160MHz */
137 /* ACLK200 L3: 133.3MHz */
141 static unsigned int clkdiv_lr_bus
[CPUFREQ_LEVEL_END
][2] = {
143 * Clock divider value for following
144 * { DIVGDL/R, DIVGPL/R }
147 /* ACLK_GDL/R L0: 200MHz */
150 /* ACLK_GDL/R L1: 200MHz */
153 /* ACLK_GDL/R L2: 160MHz */
156 /* ACLK_GDL/R L3: 133.3MHz */
160 struct cpufreq_voltage_table
{
161 unsigned int index
; /* any */
162 unsigned int arm_volt
; /* uV */
163 unsigned int int_volt
;
166 static struct cpufreq_voltage_table exynos4_volt_table
[CPUFREQ_LEVEL_END
] = {
186 static unsigned int exynos4_apll_pms_table
[CPUFREQ_LEVEL_END
] = {
187 /* APLL FOUT L0: 1000MHz */
188 ((250 << 16) | (6 << 8) | 1),
190 /* APLL FOUT L1: 800MHz */
191 ((200 << 16) | (6 << 8) | 1),
193 /* APLL FOUT L2 : 400MHz */
194 ((200 << 16) | (6 << 8) | 2),
196 /* APLL FOUT L3: 100MHz */
197 ((200 << 16) | (6 << 8) | 4),
200 static int exynos4_verify_speed(struct cpufreq_policy
*policy
)
202 return cpufreq_frequency_table_verify(policy
, exynos4_freq_table
);
205 static unsigned int exynos4_getspeed(unsigned int cpu
)
207 return clk_get_rate(cpu_clk
) / 1000;
210 static void exynos4_set_clkdiv(unsigned int div_index
)
214 /* Change Divider - CPU0 */
216 tmp
= __raw_readl(S5P_CLKDIV_CPU
);
218 tmp
&= ~(S5P_CLKDIV_CPU0_CORE_MASK
| S5P_CLKDIV_CPU0_COREM0_MASK
|
219 S5P_CLKDIV_CPU0_COREM1_MASK
| S5P_CLKDIV_CPU0_PERIPH_MASK
|
220 S5P_CLKDIV_CPU0_ATB_MASK
| S5P_CLKDIV_CPU0_PCLKDBG_MASK
|
221 S5P_CLKDIV_CPU0_APLL_MASK
);
223 tmp
|= ((clkdiv_cpu0
[div_index
][0] << S5P_CLKDIV_CPU0_CORE_SHIFT
) |
224 (clkdiv_cpu0
[div_index
][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT
) |
225 (clkdiv_cpu0
[div_index
][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT
) |
226 (clkdiv_cpu0
[div_index
][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT
) |
227 (clkdiv_cpu0
[div_index
][4] << S5P_CLKDIV_CPU0_ATB_SHIFT
) |
228 (clkdiv_cpu0
[div_index
][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT
) |
229 (clkdiv_cpu0
[div_index
][6] << S5P_CLKDIV_CPU0_APLL_SHIFT
));
231 __raw_writel(tmp
, S5P_CLKDIV_CPU
);
234 tmp
= __raw_readl(S5P_CLKDIV_STATCPU
);
235 } while (tmp
& 0x1111111);
237 /* Change Divider - CPU1 */
239 tmp
= __raw_readl(S5P_CLKDIV_CPU1
);
241 tmp
&= ~((0x7 << 4) | 0x7);
243 tmp
|= ((clkdiv_cpu1
[div_index
][0] << 4) |
244 (clkdiv_cpu1
[div_index
][1] << 0));
246 __raw_writel(tmp
, S5P_CLKDIV_CPU1
);
249 tmp
= __raw_readl(S5P_CLKDIV_STATCPU1
);
250 } while (tmp
& 0x11);
252 /* Change Divider - DMC0 */
254 tmp
= __raw_readl(S5P_CLKDIV_DMC0
);
256 tmp
&= ~(S5P_CLKDIV_DMC0_ACP_MASK
| S5P_CLKDIV_DMC0_ACPPCLK_MASK
|
257 S5P_CLKDIV_DMC0_DPHY_MASK
| S5P_CLKDIV_DMC0_DMC_MASK
|
258 S5P_CLKDIV_DMC0_DMCD_MASK
| S5P_CLKDIV_DMC0_DMCP_MASK
|
259 S5P_CLKDIV_DMC0_COPY2_MASK
| S5P_CLKDIV_DMC0_CORETI_MASK
);
261 tmp
|= ((clkdiv_dmc0
[div_index
][0] << S5P_CLKDIV_DMC0_ACP_SHIFT
) |
262 (clkdiv_dmc0
[div_index
][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT
) |
263 (clkdiv_dmc0
[div_index
][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT
) |
264 (clkdiv_dmc0
[div_index
][3] << S5P_CLKDIV_DMC0_DMC_SHIFT
) |
265 (clkdiv_dmc0
[div_index
][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT
) |
266 (clkdiv_dmc0
[div_index
][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT
) |
267 (clkdiv_dmc0
[div_index
][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT
) |
268 (clkdiv_dmc0
[div_index
][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT
));
270 __raw_writel(tmp
, S5P_CLKDIV_DMC0
);
273 tmp
= __raw_readl(S5P_CLKDIV_STAT_DMC0
);
274 } while (tmp
& 0x11111111);
276 /* Change Divider - TOP */
278 tmp
= __raw_readl(S5P_CLKDIV_TOP
);
280 tmp
&= ~(S5P_CLKDIV_TOP_ACLK200_MASK
| S5P_CLKDIV_TOP_ACLK100_MASK
|
281 S5P_CLKDIV_TOP_ACLK160_MASK
| S5P_CLKDIV_TOP_ACLK133_MASK
|
282 S5P_CLKDIV_TOP_ONENAND_MASK
);
284 tmp
|= ((clkdiv_top
[div_index
][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT
) |
285 (clkdiv_top
[div_index
][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT
) |
286 (clkdiv_top
[div_index
][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT
) |
287 (clkdiv_top
[div_index
][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT
) |
288 (clkdiv_top
[div_index
][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT
));
290 __raw_writel(tmp
, S5P_CLKDIV_TOP
);
293 tmp
= __raw_readl(S5P_CLKDIV_STAT_TOP
);
294 } while (tmp
& 0x11111);
296 /* Change Divider - LEFTBUS */
298 tmp
= __raw_readl(S5P_CLKDIV_LEFTBUS
);
300 tmp
&= ~(S5P_CLKDIV_BUS_GDLR_MASK
| S5P_CLKDIV_BUS_GPLR_MASK
);
302 tmp
|= ((clkdiv_lr_bus
[div_index
][0] << S5P_CLKDIV_BUS_GDLR_SHIFT
) |
303 (clkdiv_lr_bus
[div_index
][1] << S5P_CLKDIV_BUS_GPLR_SHIFT
));
305 __raw_writel(tmp
, S5P_CLKDIV_LEFTBUS
);
308 tmp
= __raw_readl(S5P_CLKDIV_STAT_LEFTBUS
);
309 } while (tmp
& 0x11);
311 /* Change Divider - RIGHTBUS */
313 tmp
= __raw_readl(S5P_CLKDIV_RIGHTBUS
);
315 tmp
&= ~(S5P_CLKDIV_BUS_GDLR_MASK
| S5P_CLKDIV_BUS_GPLR_MASK
);
317 tmp
|= ((clkdiv_lr_bus
[div_index
][0] << S5P_CLKDIV_BUS_GDLR_SHIFT
) |
318 (clkdiv_lr_bus
[div_index
][1] << S5P_CLKDIV_BUS_GPLR_SHIFT
));
320 __raw_writel(tmp
, S5P_CLKDIV_RIGHTBUS
);
323 tmp
= __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS
);
324 } while (tmp
& 0x11);
327 static void exynos4_set_apll(unsigned int index
)
331 /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
332 clk_set_parent(moutcore
, mout_mpll
);
335 tmp
= (__raw_readl(S5P_CLKMUX_STATCPU
)
336 >> S5P_CLKSRC_CPU_MUXCORE_SHIFT
);
338 } while (tmp
!= 0x2);
340 /* 2. Set APLL Lock time */
341 __raw_writel(S5P_APLL_LOCKTIME
, S5P_APLL_LOCK
);
343 /* 3. Change PLL PMS values */
344 tmp
= __raw_readl(S5P_APLL_CON0
);
345 tmp
&= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
346 tmp
|= exynos4_apll_pms_table
[index
];
347 __raw_writel(tmp
, S5P_APLL_CON0
);
349 /* 4. wait_lock_time */
351 tmp
= __raw_readl(S5P_APLL_CON0
);
352 } while (!(tmp
& (0x1 << S5P_APLLCON0_LOCKED_SHIFT
)));
354 /* 5. MUX_CORE_SEL = APLL */
355 clk_set_parent(moutcore
, mout_apll
);
358 tmp
= __raw_readl(S5P_CLKMUX_STATCPU
);
359 tmp
&= S5P_CLKMUX_STATCPU_MUXCORE_MASK
;
360 } while (tmp
!= (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT
));
363 static void exynos4_set_frequency(unsigned int old_index
, unsigned int new_index
)
367 if (old_index
> new_index
) {
368 /* The frequency changing to L0 needs to change apll */
369 if (freqs
.new == exynos4_freq_table
[L0
].frequency
) {
370 /* 1. Change the system clock divider values */
371 exynos4_set_clkdiv(new_index
);
373 /* 2. Change the apll m,p,s value */
374 exynos4_set_apll(new_index
);
376 /* 1. Change the system clock divider values */
377 exynos4_set_clkdiv(new_index
);
379 /* 2. Change just s value in apll m,p,s value */
380 tmp
= __raw_readl(S5P_APLL_CON0
);
382 tmp
|= (exynos4_apll_pms_table
[new_index
] & 0x7);
383 __raw_writel(tmp
, S5P_APLL_CON0
);
387 else if (old_index
< new_index
) {
388 /* The frequency changing from L0 needs to change apll */
389 if (freqs
.old
== exynos4_freq_table
[L0
].frequency
) {
390 /* 1. Change the apll m,p,s value */
391 exynos4_set_apll(new_index
);
393 /* 2. Change the system clock divider values */
394 exynos4_set_clkdiv(new_index
);
396 /* 1. Change just s value in apll m,p,s value */
397 tmp
= __raw_readl(S5P_APLL_CON0
);
399 tmp
|= (exynos4_apll_pms_table
[new_index
] & 0x7);
400 __raw_writel(tmp
, S5P_APLL_CON0
);
402 /* 2. Change the system clock divider values */
403 exynos4_set_clkdiv(new_index
);
408 static int exynos4_target(struct cpufreq_policy
*policy
,
409 unsigned int target_freq
,
410 unsigned int relation
)
412 unsigned int index
, old_index
;
413 unsigned int arm_volt
, int_volt
;
416 freqs
.old
= exynos4_getspeed(policy
->cpu
);
418 mutex_lock(&cpufreq_lock
);
420 if (frequency_locked
&& target_freq
!= locking_frequency
) {
425 if (cpufreq_frequency_table_target(policy
, exynos4_freq_table
,
426 freqs
.old
, relation
, &old_index
))
429 if (cpufreq_frequency_table_target(policy
, exynos4_freq_table
,
430 target_freq
, relation
, &index
))
435 freqs
.new = exynos4_freq_table
[index
].frequency
;
436 freqs
.cpu
= policy
->cpu
;
438 if (freqs
.new == freqs
.old
)
441 /* get the voltage value */
442 arm_volt
= exynos4_volt_table
[index
].arm_volt
;
443 int_volt
= exynos4_volt_table
[index
].int_volt
;
445 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
447 /* control regulator */
448 if (freqs
.new > freqs
.old
) {
450 regulator_set_voltage(arm_regulator
, arm_volt
, arm_volt
);
451 regulator_set_voltage(int_regulator
, int_volt
, int_volt
);
454 /* Clock Configuration Procedure */
455 exynos4_set_frequency(old_index
, index
);
457 /* control regulator */
458 if (freqs
.new < freqs
.old
) {
460 regulator_set_voltage(arm_regulator
, arm_volt
, arm_volt
);
461 regulator_set_voltage(int_regulator
, int_volt
, int_volt
);
464 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
467 mutex_unlock(&cpufreq_lock
);
473 * These suspend/resume are used as syscore_ops, it is already too
474 * late to set regulator voltages at this stage.
476 static int exynos4_cpufreq_suspend(struct cpufreq_policy
*policy
)
481 static int exynos4_cpufreq_resume(struct cpufreq_policy
*policy
)
488 * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
494 * While frequency_locked == true, target() ignores every frequency but
495 * locking_frequency. The locking_frequency value is the initial frequency,
496 * which is set by the bootloader. In order to eliminate possible
497 * inconsistency in clock values, we save and restore frequencies during
498 * suspend and resume and block CPUFREQ activities. Note that the standard
499 * suspend/resume cannot be used as they are too deep (syscore_ops) for
502 static int exynos4_cpufreq_pm_notifier(struct notifier_block
*notifier
,
503 unsigned long pm_event
, void *v
)
505 struct cpufreq_policy
*policy
= cpufreq_cpu_get(0); /* boot CPU */
506 static unsigned int saved_frequency
;
509 mutex_lock(&cpufreq_lock
);
511 case PM_SUSPEND_PREPARE
:
512 if (frequency_locked
)
514 frequency_locked
= true;
516 if (locking_frequency
) {
517 saved_frequency
= exynos4_getspeed(0);
519 mutex_unlock(&cpufreq_lock
);
520 exynos4_target(policy
, locking_frequency
,
522 mutex_lock(&cpufreq_lock
);
526 case PM_POST_SUSPEND
:
528 if (saved_frequency
) {
530 * While frequency_locked, only locking_frequency
531 * is valid for target(). In order to use
532 * saved_frequency while keeping frequency_locked,
533 * we temporarly overwrite locking_frequency.
535 temp
= locking_frequency
;
536 locking_frequency
= saved_frequency
;
538 mutex_unlock(&cpufreq_lock
);
539 exynos4_target(policy
, locking_frequency
,
541 mutex_lock(&cpufreq_lock
);
543 locking_frequency
= temp
;
546 frequency_locked
= false;
550 mutex_unlock(&cpufreq_lock
);
555 static struct notifier_block exynos4_cpufreq_nb
= {
556 .notifier_call
= exynos4_cpufreq_pm_notifier
,
559 static int exynos4_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
563 policy
->cur
= policy
->min
= policy
->max
= exynos4_getspeed(policy
->cpu
);
565 cpufreq_frequency_table_get_attr(exynos4_freq_table
, policy
->cpu
);
567 /* set the transition latency value */
568 policy
->cpuinfo
.transition_latency
= 100000;
571 * EXYNOS4 multi-core processors has 2 cores
572 * that the frequency cannot be set independently.
573 * Each cpu is bound to the same speed.
574 * So the affected cpu is all of the cpus.
576 cpumask_setall(policy
->cpus
);
578 ret
= cpufreq_frequency_table_cpuinfo(policy
, exynos4_freq_table
);
582 cpufreq_frequency_table_get_attr(exynos4_freq_table
, policy
->cpu
);
587 static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
589 cpufreq_frequency_table_put_attr(policy
->cpu
);
593 static struct freq_attr
*exynos4_cpufreq_attr
[] = {
594 &cpufreq_freq_attr_scaling_available_freqs
,
598 static struct cpufreq_driver exynos4_driver
= {
599 .flags
= CPUFREQ_STICKY
,
600 .verify
= exynos4_verify_speed
,
601 .target
= exynos4_target
,
602 .get
= exynos4_getspeed
,
603 .init
= exynos4_cpufreq_cpu_init
,
604 .exit
= exynos4_cpufreq_cpu_exit
,
605 .name
= "exynos4_cpufreq",
606 .attr
= exynos4_cpufreq_attr
,
608 .suspend
= exynos4_cpufreq_suspend
,
609 .resume
= exynos4_cpufreq_resume
,
613 static int __init
exynos4_cpufreq_init(void)
615 cpu_clk
= clk_get(NULL
, "armclk");
617 return PTR_ERR(cpu_clk
);
619 locking_frequency
= exynos4_getspeed(0);
621 moutcore
= clk_get(NULL
, "moutcore");
622 if (IS_ERR(moutcore
))
625 mout_mpll
= clk_get(NULL
, "mout_mpll");
626 if (IS_ERR(mout_mpll
))
629 mout_apll
= clk_get(NULL
, "mout_apll");
630 if (IS_ERR(mout_apll
))
633 arm_regulator
= regulator_get(NULL
, "vdd_arm");
634 if (IS_ERR(arm_regulator
)) {
635 printk(KERN_ERR
"failed to get resource %s\n", "vdd_arm");
639 int_regulator
= regulator_get(NULL
, "vdd_int");
640 if (IS_ERR(int_regulator
)) {
641 printk(KERN_ERR
"failed to get resource %s\n", "vdd_int");
647 * Because DVFS level is different according to DRAM type.
649 memtype
= __raw_readl(S5P_VA_DMC0
+ S5P_DMC0_MEMCON_OFFSET
);
650 memtype
= (memtype
>> S5P_DMC0_MEMTYPE_SHIFT
);
651 memtype
&= S5P_DMC0_MEMTYPE_MASK
;
653 if ((memtype
< DDR2
) && (memtype
> DDR3
)) {
654 printk(KERN_ERR
"%s: wrong memtype= 0x%x\n", __func__
, memtype
);
657 printk(KERN_DEBUG
"%s: memtype= 0x%x\n", __func__
, memtype
);
660 register_pm_notifier(&exynos4_cpufreq_nb
);
662 return cpufreq_register_driver(&exynos4_driver
);
665 if (!IS_ERR(cpu_clk
))
668 if (!IS_ERR(moutcore
))
671 if (!IS_ERR(mout_mpll
))
674 if (!IS_ERR(mout_apll
))
677 if (!IS_ERR(arm_regulator
))
678 regulator_put(arm_regulator
);
680 if (!IS_ERR(int_regulator
))
681 regulator_put(int_regulator
);
683 printk(KERN_ERR
"%s: failed initialization\n", __func__
);
687 late_initcall(exynos4_cpufreq_init
);