Btrfs: lower the bar for chunk allocation
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / cpufreq / exynos4210-cpufreq.c
blobab9741fab92e2341af91ca7a30d664851894666e
1 /*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * EXYNOS4 - CPU frequency scaling support
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/err.h>
15 #include <linux/clk.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/regulator/consumer.h>
19 #include <linux/cpufreq.h>
20 #include <linux/notifier.h>
21 #include <linux/suspend.h>
23 #include <mach/map.h>
24 #include <mach/regs-clock.h>
25 #include <mach/regs-mem.h>
27 #include <plat/clock.h>
28 #include <plat/pm.h>
30 static struct clk *cpu_clk;
31 static struct clk *moutcore;
32 static struct clk *mout_mpll;
33 static struct clk *mout_apll;
35 static struct regulator *arm_regulator;
36 static struct regulator *int_regulator;
38 static struct cpufreq_freqs freqs;
39 static unsigned int memtype;
41 static unsigned int locking_frequency;
42 static bool frequency_locked;
43 static DEFINE_MUTEX(cpufreq_lock);
45 enum exynos4_memory_type {
46 DDR2 = 4,
47 LPDDR2,
48 DDR3,
51 enum cpufreq_level_index {
52 L0, L1, L2, L3, CPUFREQ_LEVEL_END,
55 static struct cpufreq_frequency_table exynos4_freq_table[] = {
56 {L0, 1000*1000},
57 {L1, 800*1000},
58 {L2, 400*1000},
59 {L3, 100*1000},
60 {0, CPUFREQ_TABLE_END},
63 static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
65 * Clock divider value for following
66 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
67 * DIVATB, DIVPCLK_DBG, DIVAPLL }
70 /* ARM L0: 1000MHz */
71 { 0, 3, 7, 3, 3, 0, 1 },
73 /* ARM L1: 800MHz */
74 { 0, 3, 7, 3, 3, 0, 1 },
76 /* ARM L2: 400MHz */
77 { 0, 1, 3, 1, 3, 0, 1 },
79 /* ARM L3: 100MHz */
80 { 0, 0, 1, 0, 3, 1, 1 },
83 static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
85 * Clock divider value for following
86 * { DIVCOPY, DIVHPM }
89 /* ARM L0: 1000MHz */
90 { 3, 0 },
92 /* ARM L1: 800MHz */
93 { 3, 0 },
95 /* ARM L2: 400MHz */
96 { 3, 0 },
98 /* ARM L3: 100MHz */
99 { 3, 0 },
102 static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
104 * Clock divider value for following
105 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
106 * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
109 /* DMC L0: 400MHz */
110 { 3, 1, 1, 1, 1, 1, 3, 1 },
112 /* DMC L1: 400MHz */
113 { 3, 1, 1, 1, 1, 1, 3, 1 },
115 /* DMC L2: 266.7MHz */
116 { 7, 1, 1, 2, 1, 1, 3, 1 },
118 /* DMC L3: 200MHz */
119 { 7, 1, 1, 3, 1, 1, 3, 1 },
122 static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
124 * Clock divider value for following
125 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
128 /* ACLK200 L0: 200MHz */
129 { 3, 7, 4, 5, 1 },
131 /* ACLK200 L1: 200MHz */
132 { 3, 7, 4, 5, 1 },
134 /* ACLK200 L2: 160MHz */
135 { 4, 7, 5, 7, 1 },
137 /* ACLK200 L3: 133.3MHz */
138 { 5, 7, 7, 7, 1 },
141 static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
143 * Clock divider value for following
144 * { DIVGDL/R, DIVGPL/R }
147 /* ACLK_GDL/R L0: 200MHz */
148 { 3, 1 },
150 /* ACLK_GDL/R L1: 200MHz */
151 { 3, 1 },
153 /* ACLK_GDL/R L2: 160MHz */
154 { 4, 1 },
156 /* ACLK_GDL/R L3: 133.3MHz */
157 { 5, 1 },
160 struct cpufreq_voltage_table {
161 unsigned int index; /* any */
162 unsigned int arm_volt; /* uV */
163 unsigned int int_volt;
166 static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
168 .index = L0,
169 .arm_volt = 1200000,
170 .int_volt = 1100000,
171 }, {
172 .index = L1,
173 .arm_volt = 1100000,
174 .int_volt = 1100000,
175 }, {
176 .index = L2,
177 .arm_volt = 1000000,
178 .int_volt = 1000000,
179 }, {
180 .index = L3,
181 .arm_volt = 900000,
182 .int_volt = 1000000,
186 static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
187 /* APLL FOUT L0: 1000MHz */
188 ((250 << 16) | (6 << 8) | 1),
190 /* APLL FOUT L1: 800MHz */
191 ((200 << 16) | (6 << 8) | 1),
193 /* APLL FOUT L2 : 400MHz */
194 ((200 << 16) | (6 << 8) | 2),
196 /* APLL FOUT L3: 100MHz */
197 ((200 << 16) | (6 << 8) | 4),
200 static int exynos4_verify_speed(struct cpufreq_policy *policy)
202 return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
205 static unsigned int exynos4_getspeed(unsigned int cpu)
207 return clk_get_rate(cpu_clk) / 1000;
210 static void exynos4_set_clkdiv(unsigned int div_index)
212 unsigned int tmp;
214 /* Change Divider - CPU0 */
216 tmp = __raw_readl(S5P_CLKDIV_CPU);
218 tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
219 S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
220 S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
221 S5P_CLKDIV_CPU0_APLL_MASK);
223 tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
224 (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
225 (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
226 (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
227 (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
228 (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
229 (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
231 __raw_writel(tmp, S5P_CLKDIV_CPU);
233 do {
234 tmp = __raw_readl(S5P_CLKDIV_STATCPU);
235 } while (tmp & 0x1111111);
237 /* Change Divider - CPU1 */
239 tmp = __raw_readl(S5P_CLKDIV_CPU1);
241 tmp &= ~((0x7 << 4) | 0x7);
243 tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
244 (clkdiv_cpu1[div_index][1] << 0));
246 __raw_writel(tmp, S5P_CLKDIV_CPU1);
248 do {
249 tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
250 } while (tmp & 0x11);
252 /* Change Divider - DMC0 */
254 tmp = __raw_readl(S5P_CLKDIV_DMC0);
256 tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
257 S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
258 S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
259 S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
261 tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
262 (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
263 (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
264 (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
265 (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
266 (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
267 (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
268 (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
270 __raw_writel(tmp, S5P_CLKDIV_DMC0);
272 do {
273 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
274 } while (tmp & 0x11111111);
276 /* Change Divider - TOP */
278 tmp = __raw_readl(S5P_CLKDIV_TOP);
280 tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
281 S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
282 S5P_CLKDIV_TOP_ONENAND_MASK);
284 tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
285 (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
286 (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
287 (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
288 (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
290 __raw_writel(tmp, S5P_CLKDIV_TOP);
292 do {
293 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
294 } while (tmp & 0x11111);
296 /* Change Divider - LEFTBUS */
298 tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
300 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
302 tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
303 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
305 __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
307 do {
308 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
309 } while (tmp & 0x11);
311 /* Change Divider - RIGHTBUS */
313 tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
315 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
317 tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
318 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
320 __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
322 do {
323 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
324 } while (tmp & 0x11);
327 static void exynos4_set_apll(unsigned int index)
329 unsigned int tmp;
331 /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
332 clk_set_parent(moutcore, mout_mpll);
334 do {
335 tmp = (__raw_readl(S5P_CLKMUX_STATCPU)
336 >> S5P_CLKSRC_CPU_MUXCORE_SHIFT);
337 tmp &= 0x7;
338 } while (tmp != 0x2);
340 /* 2. Set APLL Lock time */
341 __raw_writel(S5P_APLL_LOCKTIME, S5P_APLL_LOCK);
343 /* 3. Change PLL PMS values */
344 tmp = __raw_readl(S5P_APLL_CON0);
345 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
346 tmp |= exynos4_apll_pms_table[index];
347 __raw_writel(tmp, S5P_APLL_CON0);
349 /* 4. wait_lock_time */
350 do {
351 tmp = __raw_readl(S5P_APLL_CON0);
352 } while (!(tmp & (0x1 << S5P_APLLCON0_LOCKED_SHIFT)));
354 /* 5. MUX_CORE_SEL = APLL */
355 clk_set_parent(moutcore, mout_apll);
357 do {
358 tmp = __raw_readl(S5P_CLKMUX_STATCPU);
359 tmp &= S5P_CLKMUX_STATCPU_MUXCORE_MASK;
360 } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
363 static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
365 unsigned int tmp;
367 if (old_index > new_index) {
368 /* The frequency changing to L0 needs to change apll */
369 if (freqs.new == exynos4_freq_table[L0].frequency) {
370 /* 1. Change the system clock divider values */
371 exynos4_set_clkdiv(new_index);
373 /* 2. Change the apll m,p,s value */
374 exynos4_set_apll(new_index);
375 } else {
376 /* 1. Change the system clock divider values */
377 exynos4_set_clkdiv(new_index);
379 /* 2. Change just s value in apll m,p,s value */
380 tmp = __raw_readl(S5P_APLL_CON0);
381 tmp &= ~(0x7 << 0);
382 tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
383 __raw_writel(tmp, S5P_APLL_CON0);
387 else if (old_index < new_index) {
388 /* The frequency changing from L0 needs to change apll */
389 if (freqs.old == exynos4_freq_table[L0].frequency) {
390 /* 1. Change the apll m,p,s value */
391 exynos4_set_apll(new_index);
393 /* 2. Change the system clock divider values */
394 exynos4_set_clkdiv(new_index);
395 } else {
396 /* 1. Change just s value in apll m,p,s value */
397 tmp = __raw_readl(S5P_APLL_CON0);
398 tmp &= ~(0x7 << 0);
399 tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
400 __raw_writel(tmp, S5P_APLL_CON0);
402 /* 2. Change the system clock divider values */
403 exynos4_set_clkdiv(new_index);
408 static int exynos4_target(struct cpufreq_policy *policy,
409 unsigned int target_freq,
410 unsigned int relation)
412 unsigned int index, old_index;
413 unsigned int arm_volt, int_volt;
414 int err = -EINVAL;
416 freqs.old = exynos4_getspeed(policy->cpu);
418 mutex_lock(&cpufreq_lock);
420 if (frequency_locked && target_freq != locking_frequency) {
421 err = -EAGAIN;
422 goto out;
425 if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
426 freqs.old, relation, &old_index))
427 goto out;
429 if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
430 target_freq, relation, &index))
431 goto out;
433 err = 0;
435 freqs.new = exynos4_freq_table[index].frequency;
436 freqs.cpu = policy->cpu;
438 if (freqs.new == freqs.old)
439 goto out;
441 /* get the voltage value */
442 arm_volt = exynos4_volt_table[index].arm_volt;
443 int_volt = exynos4_volt_table[index].int_volt;
445 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
447 /* control regulator */
448 if (freqs.new > freqs.old) {
449 /* Voltage up */
450 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
451 regulator_set_voltage(int_regulator, int_volt, int_volt);
454 /* Clock Configuration Procedure */
455 exynos4_set_frequency(old_index, index);
457 /* control regulator */
458 if (freqs.new < freqs.old) {
459 /* Voltage down */
460 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
461 regulator_set_voltage(int_regulator, int_volt, int_volt);
464 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
466 out:
467 mutex_unlock(&cpufreq_lock);
468 return err;
471 #ifdef CONFIG_PM
473 * These suspend/resume are used as syscore_ops, it is already too
474 * late to set regulator voltages at this stage.
476 static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
478 return 0;
481 static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
483 return 0;
485 #endif
488 * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
489 * context
490 * @notifier
491 * @pm_event
492 * @v
494 * While frequency_locked == true, target() ignores every frequency but
495 * locking_frequency. The locking_frequency value is the initial frequency,
496 * which is set by the bootloader. In order to eliminate possible
497 * inconsistency in clock values, we save and restore frequencies during
498 * suspend and resume and block CPUFREQ activities. Note that the standard
499 * suspend/resume cannot be used as they are too deep (syscore_ops) for
500 * regulator actions.
502 static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
503 unsigned long pm_event, void *v)
505 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
506 static unsigned int saved_frequency;
507 unsigned int temp;
509 mutex_lock(&cpufreq_lock);
510 switch (pm_event) {
511 case PM_SUSPEND_PREPARE:
512 if (frequency_locked)
513 goto out;
514 frequency_locked = true;
516 if (locking_frequency) {
517 saved_frequency = exynos4_getspeed(0);
519 mutex_unlock(&cpufreq_lock);
520 exynos4_target(policy, locking_frequency,
521 CPUFREQ_RELATION_H);
522 mutex_lock(&cpufreq_lock);
525 break;
526 case PM_POST_SUSPEND:
528 if (saved_frequency) {
530 * While frequency_locked, only locking_frequency
531 * is valid for target(). In order to use
532 * saved_frequency while keeping frequency_locked,
533 * we temporarly overwrite locking_frequency.
535 temp = locking_frequency;
536 locking_frequency = saved_frequency;
538 mutex_unlock(&cpufreq_lock);
539 exynos4_target(policy, locking_frequency,
540 CPUFREQ_RELATION_H);
541 mutex_lock(&cpufreq_lock);
543 locking_frequency = temp;
546 frequency_locked = false;
547 break;
549 out:
550 mutex_unlock(&cpufreq_lock);
552 return NOTIFY_OK;
555 static struct notifier_block exynos4_cpufreq_nb = {
556 .notifier_call = exynos4_cpufreq_pm_notifier,
559 static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
561 int ret;
563 policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
565 cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
567 /* set the transition latency value */
568 policy->cpuinfo.transition_latency = 100000;
571 * EXYNOS4 multi-core processors has 2 cores
572 * that the frequency cannot be set independently.
573 * Each cpu is bound to the same speed.
574 * So the affected cpu is all of the cpus.
576 cpumask_setall(policy->cpus);
578 ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
579 if (ret)
580 return ret;
582 cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
584 return 0;
587 static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy)
589 cpufreq_frequency_table_put_attr(policy->cpu);
590 return 0;
593 static struct freq_attr *exynos4_cpufreq_attr[] = {
594 &cpufreq_freq_attr_scaling_available_freqs,
595 NULL,
598 static struct cpufreq_driver exynos4_driver = {
599 .flags = CPUFREQ_STICKY,
600 .verify = exynos4_verify_speed,
601 .target = exynos4_target,
602 .get = exynos4_getspeed,
603 .init = exynos4_cpufreq_cpu_init,
604 .exit = exynos4_cpufreq_cpu_exit,
605 .name = "exynos4_cpufreq",
606 .attr = exynos4_cpufreq_attr,
607 #ifdef CONFIG_PM
608 .suspend = exynos4_cpufreq_suspend,
609 .resume = exynos4_cpufreq_resume,
610 #endif
613 static int __init exynos4_cpufreq_init(void)
615 cpu_clk = clk_get(NULL, "armclk");
616 if (IS_ERR(cpu_clk))
617 return PTR_ERR(cpu_clk);
619 locking_frequency = exynos4_getspeed(0);
621 moutcore = clk_get(NULL, "moutcore");
622 if (IS_ERR(moutcore))
623 goto out;
625 mout_mpll = clk_get(NULL, "mout_mpll");
626 if (IS_ERR(mout_mpll))
627 goto out;
629 mout_apll = clk_get(NULL, "mout_apll");
630 if (IS_ERR(mout_apll))
631 goto out;
633 arm_regulator = regulator_get(NULL, "vdd_arm");
634 if (IS_ERR(arm_regulator)) {
635 printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
636 goto out;
639 int_regulator = regulator_get(NULL, "vdd_int");
640 if (IS_ERR(int_regulator)) {
641 printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
642 goto out;
646 * Check DRAM type.
647 * Because DVFS level is different according to DRAM type.
649 memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
650 memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
651 memtype &= S5P_DMC0_MEMTYPE_MASK;
653 if ((memtype < DDR2) && (memtype > DDR3)) {
654 printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
655 goto out;
656 } else {
657 printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
660 register_pm_notifier(&exynos4_cpufreq_nb);
662 return cpufreq_register_driver(&exynos4_driver);
664 out:
665 if (!IS_ERR(cpu_clk))
666 clk_put(cpu_clk);
668 if (!IS_ERR(moutcore))
669 clk_put(moutcore);
671 if (!IS_ERR(mout_mpll))
672 clk_put(mout_mpll);
674 if (!IS_ERR(mout_apll))
675 clk_put(mout_apll);
677 if (!IS_ERR(arm_regulator))
678 regulator_put(arm_regulator);
680 if (!IS_ERR(int_regulator))
681 regulator_put(int_regulator);
683 printk(KERN_ERR "%s: failed initialization\n", __func__);
685 return -EINVAL;
687 late_initcall(exynos4_cpufreq_init);