ENGR00221277 MX6DL/S - Set AXI clock to 270MHz
[wandboard.git] / arch / arm / mach-mx6 / clock.c
blob93b60ac679e31873de3e2801da6ce2c080aee8f2
2 /*
3 * Copyright (C) 2012 Freescale Semiconductor, Inc. All Rights Reserved.
4 */
6 /*
7 * The code contained herein is licensed under the GNU General Public
8 * License. You may obtain a copy of the GNU General Public License
9 * Version 2 or later at the following locations:
11 * http://www.opensource.org/licenses/gpl-license.html
12 * http://www.gnu.org/copyleft/gpl.html
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/time.h>
19 #include <linux/hrtimer.h>
20 #include <linux/mm.h>
21 #include <linux/errno.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
24 #include <linux/io.h>
25 #include <linux/clkdev.h>
26 #include <linux/regulator/consumer.h>
27 #include <asm/div64.h>
28 #include <mach/hardware.h>
29 #include <mach/common.h>
30 #include <mach/clock.h>
31 #include <mach/mxc_dvfs.h>
32 #include <mach/ahci_sata.h>
33 #include <mach/mxc_hdmi.h>
34 #include "crm_regs.h"
35 #include "cpu_op-mx6.h"
36 #include "regs-anadig.h"
38 #ifdef CONFIG_CLK_DEBUG
39 #define __INIT_CLK_DEBUG(n) .name = #n,
40 #else
41 #define __INIT_CLK_DEBUG(n)
42 #endif
44 extern u32 arm_max_freq;
45 extern int mxc_jtag_enabled;
46 extern struct regulator *cpu_regulator;
47 extern struct cpu_op *(*get_cpu_op)(int *op);
48 extern int lp_high_freq;
49 extern int lp_med_freq;
50 extern int wait_mode_arm_podf;
51 extern int lp_audio_freq;
52 extern int cur_arm_podf;
54 void __iomem *apll_base;
56 static void __iomem *timer_base;
57 static struct clk ipu1_clk;
58 static struct clk ipu2_clk;
59 static struct clk axi_clk;
60 static struct clk pll1_sys_main_clk;
61 static struct clk pll2_528_bus_main_clk;
62 static struct clk pll2_pfd_400M;
63 static struct clk pll2_pfd_352M;
64 static struct clk pll3_pfd_540M;
65 static struct clk pll2_pfd_594M;
66 static struct clk pll3_usb_otg_main_clk;
67 static struct clk pll4_audio_main_clk;
68 static struct clk pll5_video_main_clk;
69 static struct clk pll6_mlb150_main_clk;
70 static struct clk pll7_usb_host_main_clk;
71 static struct clk pll8_enet_main_clk;
72 static struct clk apbh_dma_clk;
73 static struct clk openvg_axi_clk;
74 static struct clk enfc_clk;
75 static struct clk usdhc3_clk;
76 static struct clk ipg_clk;
77 static struct clk gpt_clk[];
78 static struct clk clko2_clk;
80 static struct cpu_op *cpu_op_tbl;
81 static int cpu_op_nr;
82 static bool pll1_enabled;
83 static bool arm_needs_pll2_400;
85 #define SPIN_DELAY 1200000 /* in nanoseconds */
87 #define AUDIO_VIDEO_MIN_CLK_FREQ 650000000
88 #define AUDIO_VIDEO_MAX_CLK_FREQ 1300000000
89 DEFINE_SPINLOCK(clk_lock);
90 #define V2_TCN 0x24
91 #define V2_TSTAT 0x08
92 #define V2_TSTAT_ROV (1 << 5)
93 #define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
94 #define MXC_TCTL 0x00
95 #define MXC_TPRER 0x04
96 #define V2_TPRER_PRE24M_OFFSET 12
97 #define V2_TPRER_PRE24M_MASK 0xF
99 /* We need to check the exp status again after timer expiration,
100 * as there might be interrupt coming between the first time exp
101 * and the time reading, then the time reading may be several ms
102 * after the exp checking due to the irq handle, so we need to
103 * check it to make sure the exp return the right value after
104 * timer expiration. */
105 #define WAIT(exp, timeout) \
106 ({ \
107 u32 gpt_rate; \
108 u32 gpt_ticks; \
109 u32 gpt_cnt; \
110 u32 reg; \
111 int result = 1; \
112 gpt_rate = clk_get_rate(&gpt_clk[0]); \
113 gpt_ticks = timeout / (1000000000 / gpt_rate); \
114 reg = __raw_readl(timer_base + V2_TSTAT);\
115 /* Clear the GPT roll over interrupt. */ \
116 if (reg & V2_TSTAT_ROV) { \
117 reg |= V2_TSTAT_ROV;\
118 __raw_writel(reg, timer_base + V2_TSTAT);\
120 gpt_cnt = __raw_readl(timer_base + V2_TCN); \
121 while (!(exp)) { \
122 if ((__raw_readl(timer_base + V2_TCN) - gpt_cnt) > gpt_ticks) { \
123 if (!exp) \
124 result = 0; \
125 break; \
126 } else { \
127 reg = __raw_readl(timer_base + V2_TSTAT);\
128 if (reg & V2_TSTAT_ROV) { \
129 u32 old_cnt = gpt_cnt; \
130 /* Timer has rolled over. \
131 * Calculate the new tcik count. \
132 */ \
133 gpt_cnt = __raw_readl(timer_base + V2_TCN); \
134 gpt_ticks -= (0xFFFFFFFF - old_cnt + gpt_cnt); \
135 /* Clear the roll over interrupt. */ \
136 reg |= V2_TSTAT_ROV;\
137 __raw_writel(reg, timer_base + V2_TSTAT);\
141 result; \
144 /* External clock values passed-in by the board code */
145 static unsigned long external_high_reference, external_low_reference;
146 static unsigned long oscillator_reference, ckih2_reference;
147 static unsigned long anaclk_1_reference, anaclk_2_reference;
149 /* For MX 6DL/S, Video PLL may be used by synchronous display devices,
150 * such as HDMI or LVDS, and also by the EPDC. If EPDC is in use,
151 * it must use the Video PLL to achieve the clock frequencies it needs.
152 * So if EPDC is in use, the "epdc" string should be added to kernel
153 * parameters, in order to set the EPDC parent clock to the Video PLL.
154 * This will have an impact on the behavior of HDMI and LVDS.
156 int epdc_enabled;
157 static int __init epdc_setup(char *__unused)
159 epdc_enabled = 1;
160 return 1;
162 __setup("epdc", epdc_setup);
164 static void __calc_pre_post_dividers(u32 max_podf, u32 div, u32 *pre, u32 *post)
166 u32 min_pre, temp_pre, old_err, err;
168 /* Some of the podfs are 3 bits while others are 6 bits.
169 * Handle both cases here.
171 if (div >= 512 && (max_podf == 64)) {
172 /* For pre = 3bits and podf = 6 bits, max divider is 512. */
173 *pre = 8;
174 *post = 64;
175 } else if (div >= 64 && (max_podf == 8)) {
176 /* For pre = 3bits and podf = 3 bits, max divider is 64. */
177 *pre = 8;
178 *post = 8;
179 } else if (div >= 8) {
180 /* Find the minimum pre-divider for a max podf */
181 if (max_podf == 64)
182 min_pre = (div - 1) / (1 << 6) + 1;
183 else
184 min_pre = (div - 1) / (1 << 3) + 1;
185 old_err = 8;
186 /* Now loop through to find the max pre-divider. */
187 for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
188 err = div % temp_pre;
189 if (err == 0) {
190 *pre = temp_pre;
191 break;
193 err = temp_pre - err;
194 if (err < old_err) {
195 old_err = err;
196 *pre = temp_pre;
199 *post = (div + *pre - 1) / *pre;
200 } else if (div < 8) {
201 *pre = div;
202 *post = 1;
206 static int _clk_enable(struct clk *clk)
208 u32 reg;
209 reg = __raw_readl(clk->enable_reg);
210 reg |= MXC_CCM_CCGRx_CG_MASK << clk->enable_shift;
211 __raw_writel(reg, clk->enable_reg);
213 return 0;
216 static void _clk_disable(struct clk *clk)
218 u32 reg;
219 reg = __raw_readl(clk->enable_reg);
220 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
221 __raw_writel(reg, clk->enable_reg);
224 static void _clk_disable_inwait(struct clk *clk)
226 u32 reg;
227 reg = __raw_readl(clk->enable_reg);
228 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
229 reg |= 1 << clk->enable_shift;
230 __raw_writel(reg, clk->enable_reg);
235 * For the 4-to-1 muxed input clock
237 static inline u32 _get_mux(struct clk *parent, struct clk *m0,
238 struct clk *m1, struct clk *m2, struct clk *m3)
240 if (parent == m0)
241 return 0;
242 else if (parent == m1)
243 return 1;
244 else if (parent == m2)
245 return 2;
246 else if (parent == m3)
247 return 3;
248 else
249 BUG();
251 return 0;
254 static inline void __iomem *_get_pll_base(struct clk *pll)
256 if (pll == &pll1_sys_main_clk)
257 return PLL1_SYS_BASE_ADDR;
258 else if (pll == &pll2_528_bus_main_clk)
259 return PLL2_528_BASE_ADDR;
260 else if (pll == &pll3_usb_otg_main_clk)
261 return PLL3_480_USB1_BASE_ADDR;
262 else if (pll == &pll4_audio_main_clk)
263 return PLL4_AUDIO_BASE_ADDR;
264 else if (pll == &pll5_video_main_clk)
265 return PLL5_VIDEO_BASE_ADDR;
266 else if (pll == &pll6_mlb150_main_clk)
267 return PLL6_MLB_BASE_ADDR;
268 else if (pll == &pll7_usb_host_main_clk)
269 return PLL7_480_USB2_BASE_ADDR;
270 else if (pll == &pll8_enet_main_clk)
271 return PLL8_ENET_BASE_ADDR;
272 else
273 BUG();
274 return NULL;
279 * For the 6-to-1 muxed input clock
281 static inline u32 _get_mux6(struct clk *parent, struct clk *m0, struct clk *m1,
282 struct clk *m2, struct clk *m3, struct clk *m4,
283 struct clk *m5)
285 if (parent == m0)
286 return 0;
287 else if (parent == m1)
288 return 1;
289 else if (parent == m2)
290 return 2;
291 else if (parent == m3)
292 return 3;
293 else if (parent == m4)
294 return 4;
295 else if (parent == m5)
296 return 5;
297 else
298 BUG();
300 return 0;
302 static unsigned long get_high_reference_clock_rate(struct clk *clk)
304 return external_high_reference;
307 static unsigned long get_low_reference_clock_rate(struct clk *clk)
309 return external_low_reference;
312 static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
314 return oscillator_reference;
317 static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
319 return ckih2_reference;
322 static unsigned long _clk_anaclk_1_get_rate(struct clk *clk)
324 return anaclk_1_reference;
327 static int _clk_anaclk_1_set_rate(struct clk *clk, unsigned long rate)
329 anaclk_1_reference = rate;
330 return 0;
333 static unsigned long _clk_anaclk_2_get_rate(struct clk *clk)
335 return anaclk_2_reference;
338 static int _clk_anaclk_2_set_rate(struct clk *clk, unsigned long rate)
340 anaclk_2_reference = rate;
341 return 0;
344 /* External high frequency clock */
345 static struct clk ckih_clk = {
346 __INIT_CLK_DEBUG(ckih_clk)
347 .get_rate = get_high_reference_clock_rate,
350 static struct clk ckih2_clk = {
351 __INIT_CLK_DEBUG(ckih2_clk)
352 .get_rate = get_ckih2_reference_clock_rate,
355 static struct clk osc_clk = {
356 __INIT_CLK_DEBUG(osc_clk)
357 .get_rate = get_oscillator_reference_clock_rate,
360 /* External low frequency (32kHz) clock */
361 static struct clk ckil_clk = {
362 __INIT_CLK_DEBUG(ckil_clk)
363 .get_rate = get_low_reference_clock_rate,
366 static struct clk anaclk_1 = {
367 __INIT_CLK_DEBUG(anaclk_1)
368 .get_rate = _clk_anaclk_1_get_rate,
369 .set_rate = _clk_anaclk_1_set_rate,
372 static struct clk anaclk_2 = {
373 __INIT_CLK_DEBUG(anaclk_2)
374 .get_rate = _clk_anaclk_2_get_rate,
375 .set_rate = _clk_anaclk_2_set_rate,
378 static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
380 u32 frac;
381 u64 tmp;
383 tmp = (u64)clk_get_rate(clk->parent) * 18;
384 tmp += rate/2;
385 do_div(tmp, rate);
386 frac = tmp;
387 frac = frac < 12 ? 12 : frac;
388 frac = frac > 35 ? 35 : frac;
389 tmp = (u64)clk_get_rate(clk->parent) * 18;
390 do_div(tmp, frac);
391 return tmp;
394 static unsigned long pfd_get_rate(struct clk *clk)
396 u32 frac;
397 u64 tmp;
398 tmp = (u64)clk_get_rate(clk->parent) * 18;
400 if (apbh_dma_clk.usecount == 0)
401 apbh_dma_clk.enable(&apbh_dma_clk);
403 frac = (__raw_readl(clk->enable_reg) >> clk->enable_shift) &
404 ANADIG_PFD_FRAC_MASK;
406 do_div(tmp, frac);
408 return tmp;
411 static int pfd_set_rate(struct clk *clk, unsigned long rate)
413 u32 frac;
414 u64 tmp;
415 tmp = (u64)clk_get_rate(clk->parent) * 18;
417 if (apbh_dma_clk.usecount == 0)
418 apbh_dma_clk.enable(&apbh_dma_clk);
420 /* Round up the divider so that we don't set a rate
421 * higher than what is requested. */
422 tmp += rate/2;
423 do_div(tmp, rate);
424 frac = tmp;
425 frac = frac < 12 ? 12 : frac;
426 frac = frac > 35 ? 35 : frac;
427 /* clear clk frac bits */
428 __raw_writel(ANADIG_PFD_FRAC_MASK << clk->enable_shift,
429 (int)clk->enable_reg + 8);
430 /* set clk frac bits */
431 __raw_writel(frac << clk->enable_shift,
432 (int)clk->enable_reg + 4);
434 if (apbh_dma_clk.usecount == 0)
435 apbh_dma_clk.disable(&apbh_dma_clk);
436 return 0;
439 static int _clk_pfd_enable(struct clk *clk)
441 if (apbh_dma_clk.usecount == 0)
442 apbh_dma_clk.enable(&apbh_dma_clk);
444 /* clear clk gate bit */
445 __raw_writel((1 << (clk->enable_shift + 7)),
446 (int)clk->enable_reg + 8);
448 if (apbh_dma_clk.usecount == 0)
449 apbh_dma_clk.disable(&apbh_dma_clk);
451 return 0;
454 static void _clk_pfd_disable(struct clk *clk)
456 if (apbh_dma_clk.usecount == 0)
457 apbh_dma_clk.enable(&apbh_dma_clk);
459 /* set clk gate bit */
460 __raw_writel((1 << (clk->enable_shift + 7)),
461 (int)clk->enable_reg + 4);
463 if (apbh_dma_clk.usecount == 0)
464 apbh_dma_clk.disable(&apbh_dma_clk);
467 static int _clk_pll_enable(struct clk *clk)
469 unsigned int reg;
470 void __iomem *pllbase;
472 pllbase = _get_pll_base(clk);
474 reg = __raw_readl(pllbase);
475 reg &= ~ANADIG_PLL_BYPASS;
476 reg &= ~ANADIG_PLL_POWER_DOWN;
478 /* The 480MHz PLLs have the opposite definition for power bit. */
479 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
480 reg |= ANADIG_PLL_POWER_DOWN;
482 __raw_writel(reg, pllbase);
484 /* It will power on pll3 */
485 if (clk == &pll3_usb_otg_main_clk)
486 __raw_writel(BM_ANADIG_ANA_MISC2_CONTROL0, apll_base + HW_ANADIG_ANA_MISC2_CLR);
488 /* Wait for PLL to lock */
489 if (!WAIT((__raw_readl(pllbase) & ANADIG_PLL_LOCK),
490 SPIN_DELAY))
491 panic("pll enable failed\n");
493 /* Enable the PLL output now*/
494 reg = __raw_readl(pllbase);
495 reg |= ANADIG_PLL_ENABLE;
496 __raw_writel(reg, pllbase);
498 return 0;
501 static void _clk_pll_disable(struct clk *clk)
503 unsigned int reg;
504 void __iomem *pllbase;
506 if ((arm_needs_pll2_400) && (clk == &pll2_528_bus_main_clk))
507 return;
509 pllbase = _get_pll_base(clk);
511 reg = __raw_readl(pllbase);
512 reg |= ANADIG_PLL_BYPASS;
513 reg &= ~ANADIG_PLL_ENABLE;
515 __raw_writel(reg, pllbase);
518 * It will power off PLL3's power, it is the TO1.1 fix
519 * Please see TKT064178 for detail.
521 if (clk == &pll3_usb_otg_main_clk)
522 __raw_writel(BM_ANADIG_ANA_MISC2_CONTROL0, apll_base + HW_ANADIG_ANA_MISC2_SET);
525 static unsigned long _clk_pll1_main_get_rate(struct clk *clk)
527 unsigned int div;
528 unsigned long val;
530 /* If PLL1 is bypassed, its rate will be from OSC directly */
531 if (__raw_readl(PLL1_SYS_BASE_ADDR) & ANADIG_PLL_SYS_BYPASS_MASK)
532 return clk_get_rate(clk->parent);
534 div = __raw_readl(PLL1_SYS_BASE_ADDR) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
535 val = (clk_get_rate(clk->parent) * div) / 2;
536 return val;
539 static int _clk_pll1_main_set_rate(struct clk *clk, unsigned long rate)
541 unsigned int reg, div;
543 if (rate < AUDIO_VIDEO_MIN_CLK_FREQ || rate > AUDIO_VIDEO_MAX_CLK_FREQ)
544 return -EINVAL;
546 div = (rate * 2) / clk_get_rate(clk->parent);
548 /* Update div */
549 reg = __raw_readl(PLL1_SYS_BASE_ADDR) & ~ANADIG_PLL_SYS_DIV_SELECT_MASK;
550 reg |= div;
551 __raw_writel(reg, PLL1_SYS_BASE_ADDR);
553 /* Wait for PLL1 to lock */
554 if (!WAIT((__raw_readl(PLL1_SYS_BASE_ADDR) & ANADIG_PLL_LOCK),
555 SPIN_DELAY))
556 panic("pll1 enable failed\n");
558 return 0;
561 static void _clk_pll1_disable(struct clk *clk)
563 void __iomem *pllbase;
564 u32 reg;
566 pll1_enabled = false;
568 /* Set PLL1 in bypass mode only. */
569 /* We need to be able to set the ARM-PODF bit
570 * when the system enters WAIT mode. And setting
571 * this bit requires PLL1_main to be enabled.
573 pllbase = _get_pll_base(clk);
575 reg = __raw_readl(pllbase);
576 reg |= ANADIG_PLL_BYPASS;
577 __raw_writel(reg, pllbase);
580 static int _clk_pll1_enable(struct clk *clk)
582 _clk_pll_enable(clk);
583 pll1_enabled = true;
584 return 0;
587 static struct clk pll1_sys_main_clk = {
588 __INIT_CLK_DEBUG(pll1_sys_main_clk)
589 .parent = &osc_clk,
590 .get_rate = _clk_pll1_main_get_rate,
591 .set_rate = _clk_pll1_main_set_rate,
592 .enable = _clk_pll1_enable,
593 .disable = _clk_pll1_disable,
596 static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
598 u32 reg;
600 reg = __raw_readl(MXC_CCM_CCSR);
602 if (parent == &pll1_sys_main_clk) {
603 reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
604 __raw_writel(reg, MXC_CCM_CCSR);
605 /* Set the step_clk parent to be lp_apm, to save power. */
606 reg = __raw_readl(MXC_CCM_CCSR);
607 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
608 } else {
609 /* Set STEP_CLK to be the parent*/
610 if (parent == &osc_clk) {
611 /* Set STEP_CLK to be sourced from LPAPM. */
612 reg = __raw_readl(MXC_CCM_CCSR);
613 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
614 __raw_writel(reg, MXC_CCM_CCSR);
615 } else {
616 /* Set STEP_CLK to be sourced from PLL2-PDF (400MHz). */
617 reg = __raw_readl(MXC_CCM_CCSR);
618 reg |= MXC_CCM_CCSR_STEP_SEL;
619 __raw_writel(reg, MXC_CCM_CCSR);
621 reg = __raw_readl(MXC_CCM_CCSR);
622 reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
624 __raw_writel(reg, MXC_CCM_CCSR);
625 return 0;
628 static unsigned long _clk_pll1_sw_get_rate(struct clk *clk)
630 return clk_get_rate(clk->parent);
633 static struct clk pll1_sw_clk = {
634 __INIT_CLK_DEBUG(pll1_sw_clk)
635 .parent = &pll1_sys_main_clk,
636 .set_parent = _clk_pll1_sw_set_parent,
637 .get_rate = _clk_pll1_sw_get_rate,
640 static unsigned long _clk_pll2_main_get_rate(struct clk *clk)
642 unsigned int div;
643 unsigned long val;
645 div = __raw_readl(PLL2_528_BASE_ADDR) & ANADIG_PLL_528_DIV_SELECT;
647 if (div == 1)
648 val = clk_get_rate(clk->parent) * 22;
650 else
651 val = clk_get_rate(clk->parent) * 20;
653 return val;
656 static int _clk_pll2_main_set_rate(struct clk *clk, unsigned long rate)
658 unsigned int reg, div;
660 if (rate == 528000000)
661 div = 1;
662 else if (rate == 480000000)
663 div = 0;
664 else
665 return -EINVAL;
667 reg = __raw_readl(PLL2_528_BASE_ADDR);
668 reg &= ~ANADIG_PLL_528_DIV_SELECT;
669 reg |= div;
670 __raw_writel(reg, PLL2_528_BASE_ADDR);
672 return 0;
675 static struct clk pll2_528_bus_main_clk = {
676 __INIT_CLK_DEBUG(pll2_528_bus_main_clk)
677 .parent = &osc_clk,
678 .get_rate = _clk_pll2_main_get_rate,
679 .set_rate = _clk_pll2_main_set_rate,
680 .enable = _clk_pll_enable,
681 .disable = _clk_pll_disable,
684 static void _clk_pll2_pfd_400M_disable(struct clk *clk)
686 if (!arm_needs_pll2_400)
687 _clk_pfd_disable(clk);
690 static struct clk pll2_pfd_400M = {
691 __INIT_CLK_DEBUG(pll2_pfd_400M)
692 .parent = &pll2_528_bus_main_clk,
693 .enable_reg = (void *)PFD_528_BASE_ADDR,
694 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
695 .enable = _clk_pfd_enable,
696 .disable = _clk_pll2_pfd_400M_disable,
697 .get_rate = pfd_get_rate,
698 .set_rate = pfd_set_rate,
699 .get_rate = pfd_get_rate,
700 .round_rate = pfd_round_rate,
703 static struct clk pll2_pfd_352M = {
704 __INIT_CLK_DEBUG(pll2_pfd_352M)
705 .parent = &pll2_528_bus_main_clk,
706 .enable_reg = (void *)PFD_528_BASE_ADDR,
707 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
708 .enable = _clk_pfd_enable,
709 .disable = _clk_pfd_disable,
710 .set_rate = pfd_set_rate,
711 .get_rate = pfd_get_rate,
712 .round_rate = pfd_round_rate,
715 static struct clk pll2_pfd_594M = {
716 __INIT_CLK_DEBUG(pll2_pfd_594M)
717 .parent = &pll2_528_bus_main_clk,
718 .enable_reg = (void *)PFD_528_BASE_ADDR,
719 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
720 .enable = _clk_pfd_enable,
721 .disable = _clk_pfd_disable,
722 .set_rate = pfd_set_rate,
723 .get_rate = pfd_get_rate,
724 .round_rate = pfd_round_rate,
727 static unsigned long _clk_pll2_200M_get_rate(struct clk *clk)
729 return clk_get_rate(clk->parent) / 2;
732 static struct clk pll2_200M = {
733 __INIT_CLK_DEBUG(pll2_200M)
734 .parent = &pll2_pfd_400M,
735 .get_rate = _clk_pll2_200M_get_rate,
738 static unsigned long _clk_pll3_usb_otg_get_rate(struct clk *clk)
740 unsigned int div;
741 unsigned long val;
743 div = __raw_readl(PLL3_480_USB1_BASE_ADDR)
744 & ANADIG_PLL_480_DIV_SELECT_MASK;
746 if (div == 1)
747 val = clk_get_rate(clk->parent) * 22;
748 else
749 val = clk_get_rate(clk->parent) * 20;
750 return val;
753 static int _clk_pll3_usb_otg_set_rate(struct clk *clk, unsigned long rate)
755 unsigned int reg, div;
757 if (rate == 528000000)
758 div = 1;
759 else if (rate == 480000000)
760 div = 0;
761 else
762 return -EINVAL;
764 reg = __raw_readl(PLL3_480_USB1_BASE_ADDR);
765 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
766 reg |= div;
767 __raw_writel(reg, PLL3_480_USB1_BASE_ADDR);
769 return 0;
773 /* same as pll3_main_clk. These two clocks should always be the same */
774 static struct clk pll3_usb_otg_main_clk = {
775 __INIT_CLK_DEBUG(pll3_usb_otg_main_clk)
776 .parent = &osc_clk,
777 .enable = _clk_pll_enable,
778 .disable = _clk_pll_disable,
779 .set_rate = _clk_pll3_usb_otg_set_rate,
780 .get_rate = _clk_pll3_usb_otg_get_rate,
783 /* for USB OTG */
784 static struct clk usb_phy1_clk = {
785 __INIT_CLK_DEBUG(usb_phy1_clk)
786 .parent = &pll3_usb_otg_main_clk,
787 .set_rate = _clk_pll3_usb_otg_set_rate,
788 .get_rate = _clk_pll3_usb_otg_get_rate,
791 /* For HSIC port 1 */
792 static struct clk usb_phy3_clk = {
793 __INIT_CLK_DEBUG(usb_phy3_clk)
794 .parent = &pll3_usb_otg_main_clk,
795 .set_rate = _clk_pll3_usb_otg_set_rate,
796 .get_rate = _clk_pll3_usb_otg_get_rate,
799 /* For HSIC port 2 */
800 static struct clk usb_phy4_clk = {
801 __INIT_CLK_DEBUG(usb_phy4_clk)
802 .parent = &pll3_usb_otg_main_clk,
803 .set_rate = _clk_pll3_usb_otg_set_rate,
804 .get_rate = _clk_pll3_usb_otg_get_rate,
807 static struct clk pll3_pfd_508M = {
808 __INIT_CLK_DEBUG(pll3_pfd_508M)
809 .parent = &pll3_usb_otg_main_clk,
810 .enable_reg = (void *)PFD_480_BASE_ADDR,
811 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
812 .enable = _clk_pfd_enable,
813 .disable = _clk_pfd_disable,
814 .set_rate = pfd_set_rate,
815 .get_rate = pfd_get_rate,
816 .round_rate = pfd_round_rate,
819 static struct clk pll3_pfd_454M = {
820 __INIT_CLK_DEBUG(pll3_pfd_454M)
821 .parent = &pll3_usb_otg_main_clk,
822 .enable_reg = (void *)PFD_480_BASE_ADDR,
823 .enable_shift = ANADIG_PFD3_FRAC_OFFSET,
824 .enable = _clk_pfd_enable,
825 .disable = _clk_pfd_disable,
826 .set_rate = pfd_set_rate,
827 .get_rate = pfd_get_rate,
828 .round_rate = pfd_round_rate,
831 static struct clk pll3_pfd_720M = {
832 __INIT_CLK_DEBUG(pll3_pfd_720M)
833 .parent = &pll3_usb_otg_main_clk,
834 .enable_reg = (void *)PFD_480_BASE_ADDR,
835 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
836 .enable = _clk_pfd_enable,
837 .disable = _clk_pfd_disable,
838 .set_rate = pfd_set_rate,
839 .get_rate = pfd_get_rate,
840 .round_rate = pfd_round_rate,
843 static int pfd_540M_set_rate(struct clk *clk, unsigned long rate)
845 if ((clk_get_parent(&ipu1_clk) == clk) ||
846 (clk_get_parent(&ipu2_clk) == clk) ||
847 (clk_get_parent(&axi_clk) == clk))
848 WARN(1, "CHANGING rate of 540M PFD when IPU and \
849 AXI is sourced from it \n");
851 return pfd_set_rate(clk, rate);
854 static struct clk pll3_pfd_540M = {
855 __INIT_CLK_DEBUG(pll3_pfd_540M)
856 .parent = &pll3_usb_otg_main_clk,
857 .enable_reg = (void *)PFD_480_BASE_ADDR,
858 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
859 .enable = _clk_pfd_enable,
860 .disable = _clk_pfd_disable,
861 .set_rate = pfd_540M_set_rate,
862 .get_rate = pfd_get_rate,
863 .round_rate = pfd_round_rate,
864 .get_rate = pfd_get_rate,
867 static unsigned long _clk_pll3_sw_get_rate(struct clk *clk)
869 return clk_get_rate(clk->parent);
872 /* same as pll3_main_clk. These two clocks should always be the same */
873 static struct clk pll3_sw_clk = {
874 __INIT_CLK_DEBUG(pll3_sw_clk)
875 .parent = &pll3_usb_otg_main_clk,
876 .get_rate = _clk_pll3_sw_get_rate,
879 static unsigned long _clk_pll3_120M_get_rate(struct clk *clk)
881 return clk_get_rate(clk->parent) / 4;
884 static struct clk pll3_120M = {
885 __INIT_CLK_DEBUG(pll3_120M)
886 .parent = &pll3_sw_clk,
887 .get_rate = _clk_pll3_120M_get_rate,
890 static unsigned long _clk_pll3_80M_get_rate(struct clk *clk)
892 return clk_get_rate(clk->parent) / 6;
895 static struct clk pll3_80M = {
896 __INIT_CLK_DEBUG(pll3_80M)
897 .parent = &pll3_sw_clk,
898 .get_rate = _clk_pll3_80M_get_rate,
901 static unsigned long _clk_pll3_60M_get_rate(struct clk *clk)
903 return clk_get_rate(clk->parent) / 8;
906 static struct clk pll3_60M = {
907 __INIT_CLK_DEBUG(pll3_60M)
908 .parent = &pll3_sw_clk,
909 .get_rate = _clk_pll3_60M_get_rate,
912 static unsigned long _clk_audio_video_get_rate(struct clk *clk)
914 unsigned int div, mfn, mfd;
915 unsigned long rate;
916 unsigned int parent_rate = clk_get_rate(clk->parent);
917 void __iomem *pllbase;
918 int rev = mx6q_revision();
919 unsigned int test_div_sel, control3, post_div = 1;
921 if (clk == &pll4_audio_main_clk)
922 pllbase = PLL4_AUDIO_BASE_ADDR;
923 else
924 pllbase = PLL5_VIDEO_BASE_ADDR;
926 if ((rev >= IMX_CHIP_REVISION_1_1) || cpu_is_mx6dl()) {
927 test_div_sel = (__raw_readl(pllbase)
928 & ANADIG_PLL_AV_TEST_DIV_SEL_MASK)
929 >> ANADIG_PLL_AV_TEST_DIV_SEL_OFFSET;
930 if (test_div_sel == 0)
931 post_div = 4;
932 else if (test_div_sel == 1)
933 post_div = 2;
935 if (clk == &pll5_video_main_clk) {
936 control3 = (__raw_readl(ANA_MISC2_BASE_ADDR)
937 & ANADIG_ANA_MISC2_CONTROL3_MASK)
938 >> ANADIG_ANA_MISC2_CONTROL3_OFFSET;
939 if (control3 == 1)
940 post_div *= 2;
941 else if (control3 == 3)
942 post_div *= 4;
946 div = __raw_readl(pllbase) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
947 mfn = __raw_readl(pllbase + PLL_NUM_DIV_OFFSET);
948 mfd = __raw_readl(pllbase + PLL_DENOM_DIV_OFFSET);
950 rate = (parent_rate * div) + ((parent_rate / mfd) * mfn);
951 rate = rate / post_div;
953 return rate;
956 static int _clk_audio_video_set_rate(struct clk *clk, unsigned long rate)
958 unsigned int reg, div;
959 unsigned int mfn, mfd = 1000000;
960 s64 temp64;
961 unsigned int parent_rate = clk_get_rate(clk->parent);
962 void __iomem *pllbase;
963 unsigned long min_clk_rate, pre_div_rate;
964 int rev = mx6q_revision();
965 u32 test_div_sel = 2;
966 u32 control3 = 0;
968 if ((rev < IMX_CHIP_REVISION_1_1) && !cpu_is_mx6dl())
969 min_clk_rate = AUDIO_VIDEO_MIN_CLK_FREQ;
970 else if (clk == &pll4_audio_main_clk)
971 min_clk_rate = AUDIO_VIDEO_MIN_CLK_FREQ / 4;
972 else
973 min_clk_rate = AUDIO_VIDEO_MIN_CLK_FREQ / 16;
975 if ((rate < min_clk_rate) || (rate > AUDIO_VIDEO_MAX_CLK_FREQ))
976 return -EINVAL;
978 if (clk == &pll4_audio_main_clk)
979 pllbase = PLL4_AUDIO_BASE_ADDR;
980 else
981 pllbase = PLL5_VIDEO_BASE_ADDR;
983 pre_div_rate = rate;
984 if ((rev >= IMX_CHIP_REVISION_1_1) || cpu_is_mx6dl()) {
985 while (pre_div_rate < AUDIO_VIDEO_MIN_CLK_FREQ) {
986 pre_div_rate *= 2;
988 * test_div_sel field values:
989 * 2 -> Divide by 1
990 * 1 -> Divide by 2
991 * 0 -> Divide by 4
993 * control3 field values:
994 * 0 -> Divide by 1
995 * 1 -> Divide by 2
996 * 3 -> Divide by 4
998 if (test_div_sel != 0)
999 test_div_sel--;
1000 else {
1001 control3++;
1002 if (control3 == 2)
1003 control3++;
1008 div = pre_div_rate / parent_rate;
1009 temp64 = (u64) (pre_div_rate - (div * parent_rate));
1010 temp64 *= mfd;
1011 do_div(temp64, parent_rate);
1012 mfn = temp64;
1014 reg = __raw_readl(pllbase)
1015 & ~ANADIG_PLL_SYS_DIV_SELECT_MASK
1016 & ~ANADIG_PLL_AV_TEST_DIV_SEL_MASK;
1017 reg |= div |
1018 (test_div_sel << ANADIG_PLL_AV_TEST_DIV_SEL_OFFSET);
1019 __raw_writel(reg, pllbase);
1020 __raw_writel(mfn, pllbase + PLL_NUM_DIV_OFFSET);
1021 __raw_writel(mfd, pllbase + PLL_DENOM_DIV_OFFSET);
1023 if (rev >= IMX_CHIP_REVISION_1_1) {
1024 reg = __raw_readl(ANA_MISC2_BASE_ADDR)
1025 & ~ANADIG_ANA_MISC2_CONTROL3_MASK;
1026 reg |= control3 << ANADIG_ANA_MISC2_CONTROL3_OFFSET;
1027 __raw_writel(reg, ANA_MISC2_BASE_ADDR);
1030 return 0;
1033 static unsigned long _clk_audio_video_round_rate(struct clk *clk,
1034 unsigned long rate)
1036 unsigned long min_clk_rate;
1037 unsigned int div, post_div = 1;
1038 unsigned int mfn, mfd = 1000000;
1039 s64 temp64;
1040 unsigned int parent_rate = clk_get_rate(clk->parent);
1041 unsigned long pre_div_rate;
1042 u32 test_div_sel = 2;
1043 u32 control3 = 0;
1044 unsigned long final_rate;
1045 int rev = mx6q_revision();
1047 if ((rev < IMX_CHIP_REVISION_1_1) && !cpu_is_mx6dl())
1048 min_clk_rate = AUDIO_VIDEO_MIN_CLK_FREQ;
1049 else if (clk == &pll4_audio_main_clk)
1050 min_clk_rate = AUDIO_VIDEO_MIN_CLK_FREQ / 4;
1051 else
1052 min_clk_rate = AUDIO_VIDEO_MIN_CLK_FREQ / 16;
1054 if (rate < min_clk_rate)
1055 return min_clk_rate;
1057 if (rate > AUDIO_VIDEO_MAX_CLK_FREQ)
1058 return AUDIO_VIDEO_MAX_CLK_FREQ;
1060 pre_div_rate = rate;
1061 if ((rev >= IMX_CHIP_REVISION_1_1) || cpu_is_mx6dl()) {
1062 while (pre_div_rate < AUDIO_VIDEO_MIN_CLK_FREQ) {
1063 pre_div_rate *= 2;
1064 post_div *= 2;
1065 if (test_div_sel != 0)
1066 test_div_sel--;
1067 else {
1068 control3++;
1069 if (control3 == 2)
1070 control3++;
1075 div = pre_div_rate / parent_rate;
1076 temp64 = (u64) (pre_div_rate - (div * parent_rate));
1077 temp64 *= mfd;
1078 do_div(temp64, parent_rate);
1079 mfn = temp64;
1081 final_rate = (parent_rate * div) + ((parent_rate / mfd) * mfn);
1082 final_rate = final_rate / post_div;
1084 return final_rate;
1087 static int _clk_audio_video_set_parent(struct clk *clk, struct clk *parent)
1089 u32 reg;
1090 int mux;
1091 void __iomem *pllbase;
1093 if (clk == &pll4_audio_main_clk)
1094 pllbase = PLL4_AUDIO_BASE_ADDR;
1095 else
1096 pllbase = PLL5_VIDEO_BASE_ADDR;
1098 reg = __raw_readl(pllbase) & ~ANADIG_PLL_BYPASS_CLK_SRC_MASK;
1099 mux = _get_mux6(parent, &osc_clk, &anaclk_1, &anaclk_2,
1100 NULL, NULL, NULL);
1101 reg |= mux << ANADIG_PLL_BYPASS_CLK_SRC_OFFSET;
1102 __raw_writel(reg, pllbase);
1104 /* Set anaclk_x as input */
1105 if (parent == &anaclk_1) {
1106 reg = __raw_readl(ANADIG_MISC1_REG);
1107 reg |= (ANATOP_LVDS_CLK1_IBEN_MASK &
1108 ~ANATOP_LVDS_CLK1_OBEN_MASK);
1109 __raw_writel(reg, ANADIG_MISC1_REG);
1110 } else if (parent == &anaclk_2) {
1111 reg = __raw_readl(ANADIG_MISC1_REG);
1112 reg |= (ANATOP_LVDS_CLK2_IBEN_MASK &
1113 ~ANATOP_LVDS_CLK2_OBEN_MASK);
1114 __raw_writel(reg, ANADIG_MISC1_REG);
1117 return 0;
1120 static struct clk pll4_audio_main_clk = {
1121 __INIT_CLK_DEBUG(pll4_audio_main_clk)
1122 .parent = &osc_clk,
1123 .enable = _clk_pll_enable,
1124 .disable = _clk_pll_disable,
1125 .set_rate = _clk_audio_video_set_rate,
1126 .get_rate = _clk_audio_video_get_rate,
1127 .round_rate = _clk_audio_video_round_rate,
1128 .set_parent = _clk_audio_video_set_parent,
1131 static struct clk pll5_video_main_clk = {
1132 __INIT_CLK_DEBUG(pll5_video_main_clk)
1133 .parent = &osc_clk,
1134 .enable = _clk_pll_enable,
1135 .disable = _clk_pll_disable,
1136 .set_rate = _clk_audio_video_set_rate,
1137 .get_rate = _clk_audio_video_get_rate,
1138 .round_rate = _clk_audio_video_round_rate,
1139 .set_parent = _clk_audio_video_set_parent,
1142 static int _clk_pll_mlb_main_enable(struct clk *clk)
1144 unsigned int reg;
1145 void __iomem *pllbase;
1147 pllbase = _get_pll_base(clk);
1149 reg = __raw_readl(pllbase);
1150 reg &= ~ANADIG_PLL_BYPASS;
1152 reg = 0x0da20800;
1153 __raw_writel(reg, pllbase);
1155 return 0;
1158 static void _clk_pll_mlb_main_disable(struct clk *clk)
1160 unsigned int reg;
1161 void __iomem *pllbase;
1163 pllbase = _get_pll_base(clk);
1165 reg = __raw_readl(pllbase);
1167 reg |= ANADIG_PLL_BYPASS;
1169 __raw_writel(reg, pllbase);
1172 static struct clk pll6_mlb150_main_clk = {
1173 __INIT_CLK_DEBUG(pll6_mlb150_main_clk)
1174 .parent = &osc_clk,
1175 .enable = _clk_pll_mlb_main_enable,
1176 .disable = _clk_pll_mlb_main_disable,
1179 static unsigned long _clk_pll7_usb_otg_get_rate(struct clk *clk)
1181 unsigned int div;
1182 unsigned long val;
1184 div = __raw_readl(PLL7_480_USB2_BASE_ADDR)
1185 & ANADIG_PLL_480_DIV_SELECT_MASK;
1187 if (div == 1)
1188 val = clk_get_rate(clk->parent) * 22;
1189 else
1190 val = clk_get_rate(clk->parent) * 20;
1191 return val;
1194 static int _clk_pll7_usb_otg_set_rate(struct clk *clk, unsigned long rate)
1196 unsigned int reg, div;
1198 if (rate == 528000000)
1199 div = 1;
1200 else if (rate == 480000000)
1201 div = 0;
1202 else
1203 return -EINVAL;
1205 reg = __raw_readl(PLL7_480_USB2_BASE_ADDR);
1206 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
1207 reg |= div;
1208 __raw_writel(reg, PLL7_480_USB2_BASE_ADDR);
1210 return 0;
1213 static struct clk pll7_usb_host_main_clk = {
1214 __INIT_CLK_DEBUG(pll7_usb_host_main_clk)
1215 .parent = &osc_clk,
1216 .enable = _clk_pll_enable,
1217 .disable = _clk_pll_disable,
1218 .set_rate = _clk_pll7_usb_otg_set_rate,
1219 .get_rate = _clk_pll7_usb_otg_get_rate,
1223 static struct clk pll8_enet_main_clk = {
1224 __INIT_CLK_DEBUG(pll8_enet_main_clk)
1225 .parent = &osc_clk,
1226 .enable = _clk_pll_enable,
1227 .disable = _clk_pll_disable,
1230 static unsigned long _clk_arm_get_rate(struct clk *clk)
1232 u32 cacrr, div;
1234 cacrr = __raw_readl(MXC_CCM_CACRR);
1235 div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
1236 return clk_get_rate(clk->parent) / div;
1239 static int _clk_arm_set_rate(struct clk *clk, unsigned long rate)
1241 int i;
1242 u32 div;
1243 unsigned long parent_rate;
1244 unsigned long flags;
1245 unsigned long ipg_clk_rate, max_arm_wait_clk;
1247 for (i = 0; i < cpu_op_nr; i++) {
1248 if (rate == cpu_op_tbl[i].cpu_rate)
1249 break;
1251 if (i >= cpu_op_nr)
1252 return -EINVAL;
1254 spin_lock_irqsave(&clk_lock, flags);
1256 if (rate <= clk_get_rate(&pll2_pfd_400M)) {
1257 /* Source pll1_sw_clk from step_clk which is sourced from
1258 * PLL2_PFD_400M.
1260 if (pll1_sw_clk.parent != &pll2_pfd_400M) {
1261 pll2_pfd_400M.enable(&pll2_pfd_400M);
1262 arm_needs_pll2_400 = true;
1263 pll1_sw_clk.set_parent(&pll1_sw_clk, &pll2_pfd_400M);
1264 pll1_sw_clk.parent = &pll2_pfd_400M;
1266 } else {
1267 /* Make sure PLL1 is enabled */
1268 if (!pll1_enabled)
1269 pll1_sys_main_clk.enable(&pll1_sys_main_clk);
1270 /* Make sure PLL1 rate is what we want */
1271 if (cpu_op_tbl[i].pll_rate != clk_get_rate(&pll1_sys_main_clk)) {
1272 /* If pll1_sw_clk is from pll1_sys_main_clk, switch it */
1273 if (pll1_sw_clk.parent == &pll1_sys_main_clk) {
1274 /* Change the PLL1 rate. */
1275 if (pll2_pfd_400M.usecount != 0)
1276 pll1_sw_clk.set_parent(&pll1_sw_clk, &pll2_pfd_400M);
1277 else
1278 pll1_sw_clk.set_parent(&pll1_sw_clk, &osc_clk);
1280 pll1_sys_main_clk.set_rate(&pll1_sys_main_clk, cpu_op_tbl[i].pll_rate);
1282 /* Make sure pll1_sw_clk is from pll1_sys_main_clk */
1283 pll1_sw_clk.set_parent(&pll1_sw_clk, &pll1_sys_main_clk);
1284 pll1_sw_clk.parent = &pll1_sys_main_clk;
1285 arm_needs_pll2_400 = false;
1286 if (pll2_pfd_400M.usecount == 0)
1287 pll2_pfd_400M.disable(&pll2_pfd_400M);
1289 parent_rate = clk_get_rate(clk->parent);
1290 div = parent_rate / rate;
1291 /* Calculate the ARM_PODF to be applied when the system
1292 * enters WAIT state. The max ARM clk is decided by the
1293 * ipg_clk and has to follow the ratio of ARM_CLK:IPG_CLK of 12:5.
1294 * For ex, when IPG is at 66MHz, ARM_CLK cannot be greater
1295 * than 158MHz.
1296 * Pre-calculate the optimal divider now.
1298 ipg_clk_rate = clk_get_rate(&ipg_clk);
1299 max_arm_wait_clk = (12 * ipg_clk_rate) / 5;
1300 wait_mode_arm_podf = parent_rate / max_arm_wait_clk;
1302 if (div == 0)
1303 div = 1;
1305 if ((parent_rate / div) > rate)
1306 div++;
1308 if (div > 8) {
1309 spin_unlock_irqrestore(&clk_lock, flags);
1310 return -1;
1312 /* Need PLL1-MAIN to be ON to write to ARM-PODF bit. */
1313 if (!pll1_enabled)
1314 pll1_sys_main_clk.enable(&pll1_sys_main_clk);
1316 cur_arm_podf = div;
1318 __raw_writel(div - 1, MXC_CCM_CACRR);
1320 while (__raw_readl(MXC_CCM_CDHIPR))
1323 if (pll1_sys_main_clk.usecount == 1 && arm_needs_pll2_400)
1324 pll1_sys_main_clk.disable(&pll1_sys_main_clk);
1326 spin_unlock_irqrestore(&clk_lock, flags);
1328 return 0;
1331 static struct clk cpu_clk = {
1332 __INIT_CLK_DEBUG(cpu_clk)
1333 .parent = &pll1_sw_clk,
1334 .set_rate = _clk_arm_set_rate,
1335 .get_rate = _clk_arm_get_rate,
1338 static unsigned long _clk_twd_get_rate(struct clk *clk)
1340 return clk_get_rate(clk->parent) / 2;
1343 static struct clk twd_clk = {
1344 __INIT_CLK_DEBUG(twd_clk)
1345 .parent = &cpu_clk,
1346 .get_rate = _clk_twd_get_rate,
1349 static int _clk_periph_set_parent(struct clk *clk, struct clk *parent)
1351 u32 reg;
1352 int mux;
1354 mux = _get_mux6(parent, &pll2_528_bus_main_clk, &pll2_pfd_400M,
1355 &pll2_pfd_352M, &pll2_200M, &pll3_sw_clk, &osc_clk);
1357 if (mux <= 3) {
1358 /* Set the pre_periph_clk multiplexer */
1359 reg = __raw_readl(MXC_CCM_CBCMR);
1360 reg &= ~MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_MASK;
1361 reg |= mux << MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_OFFSET;
1362 __raw_writel(reg, MXC_CCM_CBCMR);
1364 /* Set the periph_clk_sel multiplexer. */
1365 reg = __raw_readl(MXC_CCM_CBCDR);
1366 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
1367 __raw_writel(reg, MXC_CCM_CBCDR);
1368 } else {
1369 reg = __raw_readl(MXC_CCM_CBCDR);
1370 /* Set the periph_clk2_podf divider to divide by 1. */
1371 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
1372 __raw_writel(reg, MXC_CCM_CBCDR);
1374 /* Set the periph_clk2_sel mux. */
1375 reg = __raw_readl(MXC_CCM_CBCMR);
1376 reg &= ~MXC_CCM_CBCMR_PERIPH_CLK2_SEL_MASK;
1377 reg |= ((mux - 4) << MXC_CCM_CBCMR_PERIPH_CLK2_SEL_OFFSET);
1378 __raw_writel(reg, MXC_CCM_CBCMR);
1380 while (__raw_readl(MXC_CCM_CDHIPR))
1383 reg = __raw_readl(MXC_CCM_CBCDR);
1384 /* Set periph_clk_sel to select periph_clk2. */
1385 reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
1386 __raw_writel(reg, MXC_CCM_CBCDR);
1389 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1390 & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY), SPIN_DELAY))
1391 panic("_clk_periph_set_parent failed\n");
1393 return 0;
1396 static unsigned long _clk_periph_get_rate(struct clk *clk)
1398 u32 div = 1;
1399 u32 reg;
1400 unsigned long val;
1402 if ((clk->parent == &pll3_sw_clk) || (clk->parent == &osc_clk)) {
1403 reg = __raw_readl(MXC_CCM_CBCDR)
1404 & MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
1405 div = (reg >> MXC_CCM_CBCDR_PERIPH_CLK2_PODF_OFFSET) + 1;
1407 val = clk_get_rate(clk->parent) / div;
1408 return val;
1411 static struct clk periph_clk = {
1412 __INIT_CLK_DEBUG(periph_clk)
1413 .parent = &pll2_528_bus_main_clk,
1414 .set_parent = _clk_periph_set_parent,
1415 .get_rate = _clk_periph_get_rate,
1418 static unsigned long _clk_axi_get_rate(struct clk *clk)
1420 u32 div, reg;
1421 unsigned long val;
1423 reg = __raw_readl(MXC_CCM_CBCDR) & MXC_CCM_CBCDR_AXI_PODF_MASK;
1424 div = (reg >> MXC_CCM_CBCDR_AXI_PODF_OFFSET);
1426 val = clk_get_rate(clk->parent) / (div + 1);
1427 return val;
1430 static int _clk_axi_set_rate(struct clk *clk, unsigned long rate)
1432 u32 reg, div;
1433 u32 parent_rate = clk_get_rate(clk->parent);
1435 div = parent_rate / rate;
1437 if (div == 0)
1438 div++;
1439 if (((parent_rate / div) != rate) || (div > 8))
1440 return -EINVAL;
1442 reg = __raw_readl(MXC_CCM_CBCDR);
1443 reg &= ~MXC_CCM_CBCDR_AXI_PODF_MASK;
1444 reg |= (div - 1) << MXC_CCM_CBCDR_AXI_PODF_OFFSET;
1445 __raw_writel(reg, MXC_CCM_CBCDR);
1447 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1448 & MXC_CCM_CDHIPR_AXI_PODF_BUSY), SPIN_DELAY))
1449 panic("pll _clk_axi_a_set_rate failed\n");
1451 return 0;
1454 static unsigned long _clk_axi_round_rate(struct clk *clk,
1455 unsigned long rate)
1457 u32 div;
1458 u32 parent_rate = clk_get_rate(clk->parent);
1460 div = parent_rate / rate;
1462 /* Make sure rate is not greater than the maximum
1463 * value for the clock.
1464 * Also prevent a div of 0.
1467 if (div > 8)
1468 div = 8;
1469 else if (div == 0)
1470 div++;
1472 return parent_rate / div;
1475 static int _clk_axi_set_parent(struct clk *clk, struct clk *parent)
1477 u32 reg;
1478 int mux;
1480 mux = _get_mux6(parent, &periph_clk, &pll2_pfd_400M,
1481 &pll3_pfd_540M, NULL, NULL, NULL);
1483 if (mux == 0) {
1484 /* Set the AXI_SEL mux */
1485 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
1486 __raw_writel(reg, MXC_CCM_CBCDR);
1487 } else {
1488 /* Set the AXI_ALT_SEL mux. */
1489 reg = __raw_readl(MXC_CCM_CBCDR)
1490 & ~MXC_CCM_CBCDR_AXI_ALT_SEL_MASK;
1491 reg |= ((mux - 1) << MXC_CCM_CBCDR_AXI_ALT_SEL_OFFSET);
1492 __raw_writel(reg, MXC_CCM_CBCDR);
1494 /* Set the AXI_SEL mux */
1495 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
1496 reg |= MXC_CCM_CBCDR_AXI_SEL;
1497 __raw_writel(reg, MXC_CCM_CBCDR);
1499 return 0;
1502 static struct clk axi_clk = {
1503 __INIT_CLK_DEBUG(axi_clk)
1504 .parent = &periph_clk,
1505 .set_parent = _clk_axi_set_parent,
1506 .set_rate = _clk_axi_set_rate,
1507 .get_rate = _clk_axi_get_rate,
1508 .round_rate = _clk_axi_round_rate,
1511 static unsigned long _clk_ahb_get_rate(struct clk *clk)
1513 u32 reg, div;
1515 reg = __raw_readl(MXC_CCM_CBCDR);
1516 div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
1517 MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
1519 return clk_get_rate(clk->parent) / div;
1522 static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
1524 u32 reg, div;
1525 u32 parent_rate = clk_get_rate(clk->parent);
1527 div = parent_rate / rate;
1528 if (div == 0)
1529 div++;
1530 if (((parent_rate / div) != rate) || (div > 8))
1531 return -EINVAL;
1533 reg = __raw_readl(MXC_CCM_CBCDR);
1534 reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
1535 reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
1536 __raw_writel(reg, MXC_CCM_CBCDR);
1538 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR) & MXC_CCM_CDHIPR_AHB_PODF_BUSY),
1539 SPIN_DELAY))
1540 panic("_clk_ahb_set_rate failed\n");
1542 return 0;
1545 static unsigned long _clk_ahb_round_rate(struct clk *clk,
1546 unsigned long rate)
1548 u32 div;
1549 u32 parent_rate = clk_get_rate(clk->parent);
1551 div = parent_rate / rate;
1553 /* Make sure rate is not greater than the maximum value for the clock.
1554 * Also prevent a div of 0.
1556 if (div == 0)
1557 div++;
1559 if (div > 8)
1560 div = 8;
1562 return parent_rate / div;
1565 static struct clk ahb_clk = {
1566 __INIT_CLK_DEBUG(ahb_clk)
1567 .parent = &periph_clk,
1568 .get_rate = _clk_ahb_get_rate,
1569 .set_rate = _clk_ahb_set_rate,
1570 .round_rate = _clk_ahb_round_rate,
1573 static unsigned long _clk_ipg_get_rate(struct clk *clk)
1575 u32 reg, div;
1577 reg = __raw_readl(MXC_CCM_CBCDR);
1578 div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
1579 MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
1581 return clk_get_rate(clk->parent) / div;
1585 static struct clk ipg_clk = {
1586 __INIT_CLK_DEBUG(ipg_clk)
1587 .parent = &ahb_clk,
1588 .get_rate = _clk_ipg_get_rate,
1591 static struct clk tzasc1_clk = {
1592 __INIT_CLK_DEBUG(tzasc1_clk)
1593 .id = 0,
1594 .parent = &ipg_clk,
1595 .enable_reg = MXC_CCM_CCGR2,
1596 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1597 .enable = _clk_enable,
1598 .disable = _clk_disable_inwait,
1601 static struct clk tzasc2_clk = {
1602 __INIT_CLK_DEBUG(tzasc2_clk)
1603 .id = 0,
1604 .parent = &ipg_clk,
1605 .enable_reg = MXC_CCM_CCGR2,
1606 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
1607 .enable = _clk_enable,
1608 .disable = _clk_disable_inwait,
1611 static struct clk mx6fast1_clk = {
1612 __INIT_CLK_DEBUG(mx6fast1_clk)
1613 .id = 0,
1614 .parent = &ahb_clk,
1615 .enable_reg = MXC_CCM_CCGR4,
1616 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1617 .enable = _clk_enable,
1618 .disable = _clk_disable_inwait,
1621 static struct clk mx6per1_clk = {
1622 __INIT_CLK_DEBUG(mx6per1_clk)
1623 .id = 0,
1624 .parent = &ahb_clk,
1625 .secondary = &mx6fast1_clk,
1626 .enable_reg = MXC_CCM_CCGR4,
1627 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1628 .enable = _clk_enable,
1629 .disable = _clk_disable_inwait,
1632 static struct clk mx6per2_clk = {
1633 __INIT_CLK_DEBUG(mx6per2_clk)
1634 .id = 0,
1635 .parent = &ahb_clk,
1636 .enable_reg = MXC_CCM_CCGR4,
1637 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1638 .enable = _clk_enable,
1639 .disable = _clk_disable_inwait,
1642 static unsigned long _clk_mmdc_ch0_axi_get_rate(struct clk *clk)
1644 u32 reg, div;
1646 reg = __raw_readl(MXC_CCM_CBCDR);
1647 div = ((reg & MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK) >>
1648 MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET) + 1;
1650 return clk_get_rate(clk->parent) / div;
1653 static int _clk_mmdc_ch0_axi_set_rate(struct clk *clk, unsigned long rate)
1655 u32 reg, div;
1656 u32 parent_rate = clk_get_rate(clk->parent);
1658 div = parent_rate / rate;
1659 if (div == 0)
1660 div++;
1661 if (((parent_rate / div) != rate) || (div > 8))
1662 return -EINVAL;
1664 reg = __raw_readl(MXC_CCM_CBCDR);
1665 reg &= ~MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK;
1666 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET;
1667 __raw_writel(reg, MXC_CCM_CBCDR);
1669 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1670 & MXC_CCM_CDHIPR_MMDC_CH0_PODF_BUSY),
1671 SPIN_DELAY))
1672 panic("_clk_mmdc_ch0_axi_set_rate failed\n");
1674 return 0;
1677 static unsigned long _clk_mmdc_ch0_axi_round_rate(struct clk *clk,
1678 unsigned long rate)
1680 u32 div;
1681 u32 parent_rate = clk_get_rate(clk->parent);
1683 div = parent_rate / rate;
1685 /* Make sure rate is not greater than the maximum value for the clock.
1686 * Also prevent a div of 0.
1688 if (div == 0)
1689 div++;
1691 if (div > 8)
1692 div = 8;
1694 return parent_rate / div;
1697 static struct clk mmdc_ch0_axi_clk[] = {
1699 __INIT_CLK_DEBUG(mmdc_ch0_axi_clk)
1700 .id = 0,
1701 .parent = &periph_clk,
1702 .enable = _clk_enable,
1703 .disable = _clk_disable_inwait,
1704 .enable_reg = MXC_CCM_CCGR3,
1705 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1706 .secondary = &mmdc_ch0_axi_clk[1],
1707 .get_rate = _clk_mmdc_ch0_axi_get_rate,
1708 .set_rate = _clk_mmdc_ch0_axi_set_rate,
1709 .round_rate = _clk_mmdc_ch0_axi_round_rate,
1712 __INIT_CLK_DEBUG(mmdc_ch0_ipg_clk)
1713 .id = 0,
1714 .parent = &ipg_clk,
1715 .enable = _clk_enable,
1716 .disable = _clk_disable_inwait,
1717 .enable_reg = MXC_CCM_CCGR3,
1718 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
1719 .secondary = &tzasc1_clk,
1723 static unsigned long _clk_mmdc_ch1_axi_get_rate(struct clk *clk)
1725 u32 reg, div;
1727 reg = __raw_readl(MXC_CCM_CBCDR);
1728 div = ((reg & MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK) >>
1729 MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET) + 1;
1731 return clk_get_rate(clk->parent) / div;
1734 static int _clk_mmdc_ch1_axi_set_rate(struct clk *clk, unsigned long rate)
1736 u32 reg, div;
1737 u32 parent_rate = clk_get_rate(clk->parent);
1739 div = parent_rate / rate;
1740 if (div == 0)
1741 div++;
1742 if (((parent_rate / div) != rate) || (div > 8))
1743 return -EINVAL;
1745 reg = __raw_readl(MXC_CCM_CBCDR);
1746 reg &= ~MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK;
1747 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET;
1748 __raw_writel(reg, MXC_CCM_CBCDR);
1750 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1751 & MXC_CCM_CDHIPR_MMDC_CH1_PODF_BUSY), SPIN_DELAY))
1752 panic("_clk_mmdc_ch1_axi_set_rate failed\n");
1754 return 0;
1757 static unsigned long _clk_mmdc_ch1_axi_round_rate(struct clk *clk,
1758 unsigned long rate)
1760 u32 div;
1761 u32 parent_rate = clk_get_rate(clk->parent);
1763 div = parent_rate / rate;
1765 /* Make sure rate is not greater than the maximum value for the clock.
1766 * Also prevent a div of 0.
1768 if (div == 0)
1769 div++;
1771 if (div > 8)
1772 div = 8;
1774 return parent_rate / div;
1777 static struct clk mmdc_ch1_axi_clk[] = {
1779 __INIT_CLK_DEBUG(mmdc_ch1_axi_clk)
1780 .id = 0,
1781 .parent = &pll2_pfd_400M,
1782 .enable = _clk_enable,
1783 .disable = _clk_disable,
1784 .enable_reg = MXC_CCM_CCGR3,
1785 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1786 .secondary = &mmdc_ch1_axi_clk[1],
1787 .get_rate = _clk_mmdc_ch1_axi_get_rate,
1788 .set_rate = _clk_mmdc_ch1_axi_set_rate,
1789 .round_rate = _clk_mmdc_ch1_axi_round_rate,
1792 .id = 1,
1793 __INIT_CLK_DEBUG(mmdc_ch1_ipg_clk)
1794 .parent = &ipg_clk,
1795 .enable = _clk_enable,
1796 .disable = _clk_disable,
1797 .enable_reg = MXC_CCM_CCGR3,
1798 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1799 .secondary = &tzasc2_clk,
1803 static struct clk ocram_clk = {
1804 __INIT_CLK_DEBUG(ocram_clk)
1805 .id = 0,
1806 .parent = &ahb_clk,
1807 .enable_reg = MXC_CCM_CCGR3,
1808 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
1809 .enable = _clk_enable,
1810 .disable = _clk_disable_inwait,
1813 static unsigned long _clk_ipg_perclk_get_rate(struct clk *clk)
1815 u32 reg, div;
1817 reg = __raw_readl(MXC_CCM_CSCMR1);
1818 div = ((reg & MXC_CCM_CSCMR1_PERCLK_PODF_MASK) >>
1819 MXC_CCM_CSCMR1_PERCLK_PODF_OFFSET) + 1;
1821 return clk_get_rate(clk->parent) / div;
1824 static int _clk_ipg_perclk_set_rate(struct clk *clk, unsigned long rate)
1826 u32 reg, div;
1827 u32 parent_rate = clk_get_rate(clk->parent);
1829 div = parent_rate / rate;
1830 if (div == 0)
1831 div++;
1832 if (((parent_rate / div) != rate) || (div > 64))
1833 return -EINVAL;
1835 reg = __raw_readl(MXC_CCM_CSCMR1);
1836 reg &= ~MXC_CCM_CSCMR1_PERCLK_PODF_MASK;
1837 reg |= (div - 1) << MXC_CCM_CSCMR1_PERCLK_PODF_OFFSET;
1838 __raw_writel(reg, MXC_CCM_CSCMR1);
1840 return 0;
1844 static unsigned long _clk_ipg_perclk_round_rate(struct clk *clk,
1845 unsigned long rate)
1847 u32 div;
1848 u32 parent_rate = clk_get_rate(clk->parent);
1850 div = parent_rate / rate;
1852 /* Make sure rate is not greater than the maximum value for the clock.
1853 * Also prevent a div of 0.
1855 if (div == 0)
1856 div++;
1858 if (div > 64)
1859 div = 64;
1861 return parent_rate / div;
1864 static struct clk ipg_perclk = {
1865 __INIT_CLK_DEBUG(ipg_perclk)
1866 .parent = &ipg_clk,
1867 .get_rate = _clk_ipg_perclk_get_rate,
1868 .set_rate = _clk_ipg_perclk_set_rate,
1869 .round_rate = _clk_ipg_perclk_round_rate,
1872 static struct clk spba_clk = {
1873 __INIT_CLK_DEBUG(spba_clk)
1874 .parent = &ipg_clk,
1875 .enable_reg = MXC_CCM_CCGR5,
1876 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1877 .enable = _clk_enable,
1878 .disable = _clk_disable,
1881 static struct clk sdma_clk[] = {
1883 __INIT_CLK_DEBUG(sdma_clk)
1884 .parent = &ahb_clk,
1885 .enable_reg = MXC_CCM_CCGR5,
1886 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1887 .enable = _clk_enable,
1888 .disable = _clk_disable,
1889 .secondary = &sdma_clk[1],
1892 .parent = &mx6per1_clk,
1893 #ifdef CONFIG_SDMA_IRAM
1894 .secondary = &ocram_clk,
1895 #else
1896 .secondary = &mmdc_ch0_axi_clk[0],
1897 #endif
1901 static int _clk_gpu2d_axi_set_parent(struct clk *clk, struct clk *parent)
1903 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1905 if (parent == &ahb_clk)
1906 reg |= MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1908 __raw_writel(reg, MXC_CCM_CBCMR);
1910 return 0;
1913 static struct clk gpu2d_axi_clk = {
1914 __INIT_CLK_DEBUG(gpu2d_axi_clk)
1915 .parent = &axi_clk,
1916 .set_parent = _clk_gpu2d_axi_set_parent,
1919 static int _clk_gpu3d_axi_set_parent(struct clk *clk, struct clk *parent)
1921 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1923 if (parent == &ahb_clk)
1924 reg |= MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1926 __raw_writel(reg, MXC_CCM_CBCMR);
1928 return 0;
1931 static struct clk gpu3d_axi_clk = {
1932 __INIT_CLK_DEBUG(gpu3d_axi_clk)
1933 .parent = &axi_clk,
1934 .secondary = &mmdc_ch0_axi_clk[0],
1935 .set_parent = _clk_gpu3d_axi_set_parent,
1938 static int _clk_pcie_axi_set_parent(struct clk *clk, struct clk *parent)
1940 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1942 if (parent == &ahb_clk)
1943 reg |= MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1945 __raw_writel(reg, MXC_CCM_CBCMR);
1947 return 0;
1950 static struct clk pcie_axi_clk = {
1951 __INIT_CLK_DEBUG(pcie_axi_clk)
1952 .parent = &axi_clk,
1953 .set_parent = _clk_pcie_axi_set_parent,
1956 static int _clk_vdo_axi_set_parent(struct clk *clk, struct clk *parent)
1958 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1960 if (parent == &ahb_clk)
1961 reg |= MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1963 __raw_writel(reg, MXC_CCM_CBCMR);
1965 return 0;
1968 static struct clk vdo_axi_clk = {
1969 __INIT_CLK_DEBUG(vdo_axi_clk)
1970 .parent = &axi_clk,
1971 .enable_reg = MXC_CCM_CCGR6,
1972 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1973 .enable = _clk_enable,
1974 .disable = _clk_disable,
1975 .set_parent = _clk_vdo_axi_set_parent,
1978 static struct clk vdoa_clk[] = {
1980 __INIT_CLK_DEBUG(vdoa_clk)
1981 .id = 0,
1982 .parent = &vdo_axi_clk,
1983 .enable_reg = MXC_CCM_CCGR2,
1984 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1985 .enable = _clk_enable,
1986 .disable = _clk_disable,
1987 .secondary = &vdoa_clk[1],
1988 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
1991 .parent = &mmdc_ch0_axi_clk[0],
1992 .secondary = &vdoa_clk[2],
1995 .parent = &mx6fast1_clk,
1996 .secondary = &ocram_clk,
2000 static unsigned long mx6_timer_rate(void)
2002 u32 parent_rate = clk_get_rate(&osc_clk);
2004 u32 reg = __raw_readl(timer_base + MXC_TCTL);
2005 u32 div;
2007 if ((reg & V2_TCTL_CLK_OSC_DIV8) == V2_TCTL_CLK_OSC_DIV8) {
2008 if (cpu_is_mx6q())
2009 /* For MX6Q, only options are 24MHz or 24MHz/8*/
2010 return parent_rate / 8;
2011 else {
2012 /* For MX6DLS and MX6Solo, the rate is based on the
2013 * divider value set in prescalar register. */
2014 div = __raw_readl(timer_base + MXC_TPRER);
2015 div = (div >> V2_TPRER_PRE24M_OFFSET) &
2016 V2_TPRER_PRE24M_MASK;
2017 return parent_rate / (div + 1);
2020 return 0;
2023 static unsigned long _clk_gpt_get_rate(struct clk *clk)
2025 unsigned long rate;
2027 if (mx6q_revision() == IMX_CHIP_REVISION_1_0)
2028 return clk_get_rate(clk->parent);
2030 rate = mx6_timer_rate();
2031 if (!rate)
2032 return clk_get_rate(clk->parent);
2034 return rate;
2037 static struct clk gpt_clk[] = {
2039 __INIT_CLK_DEBUG(gpt_clk)
2040 .parent = &osc_clk,
2041 .id = 0,
2042 .enable_reg = MXC_CCM_CCGR1,
2043 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2044 .enable = _clk_enable,
2045 .disable = _clk_disable,
2046 .get_rate = _clk_gpt_get_rate,
2047 .secondary = &gpt_clk[1],
2050 __INIT_CLK_DEBUG(gpt_serial_clk)
2051 .id = 0,
2052 .enable_reg = MXC_CCM_CCGR1,
2053 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
2054 .enable = _clk_enable,
2055 .disable = _clk_disable,
2059 static unsigned long _clk_iim_get_rate(struct clk *clk)
2061 return clk_get_rate(clk->parent);
2064 static struct clk iim_clk = {
2065 __INIT_CLK_DEBUG(iim_clk)
2066 .parent = &ipg_clk,
2067 .enable = _clk_enable,
2068 .enable_reg = MXC_CCM_CCGR2,
2069 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
2070 .disable = _clk_disable,
2071 .get_rate = _clk_iim_get_rate,
2074 static struct clk i2c_clk[] = {
2076 __INIT_CLK_DEBUG(i2c_clk_0)
2077 .id = 0,
2078 .parent = &ipg_perclk,
2079 .enable_reg = MXC_CCM_CCGR2,
2080 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
2081 .enable = _clk_enable,
2082 .disable = _clk_disable,
2085 __INIT_CLK_DEBUG(i2c_clk_1)
2086 .id = 1,
2087 .parent = &ipg_perclk,
2088 .enable_reg = MXC_CCM_CCGR2,
2089 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
2090 .enable = _clk_enable,
2091 .disable = _clk_disable,
2094 __INIT_CLK_DEBUG(i2c_clk_2)
2095 .id = 2,
2096 .parent = &ipg_perclk,
2097 .enable_reg = MXC_CCM_CCGR2,
2098 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
2099 .enable = _clk_enable,
2100 .disable = _clk_disable,
2104 static int _clk_vpu_axi_set_parent(struct clk *clk, struct clk *parent)
2106 int mux;
2107 u32 reg = __raw_readl(MXC_CCM_CBCMR)
2108 & ~MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_MASK;
2110 mux = _get_mux6(parent, &axi_clk, &pll2_pfd_400M,
2111 &pll2_pfd_352M, NULL, NULL, NULL);
2113 reg |= (mux << MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_OFFSET);
2115 __raw_writel(reg, MXC_CCM_CBCMR);
2117 return 0;
2120 static unsigned long _clk_vpu_axi_get_rate(struct clk *clk)
2122 u32 reg, div;
2124 reg = __raw_readl(MXC_CCM_CSCDR1);
2125 div = ((reg & MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK) >>
2126 MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET) + 1;
2128 return clk_get_rate(clk->parent) / div;
2131 static int _clk_vpu_axi_set_rate(struct clk *clk, unsigned long rate)
2133 u32 reg, div;
2134 u32 parent_rate = clk_get_rate(clk->parent);
2136 div = parent_rate / rate;
2137 if (div == 0)
2138 div++;
2139 if (((parent_rate / div) != rate) || (div > 8))
2140 return -EINVAL;
2142 reg = __raw_readl(MXC_CCM_CSCDR1);
2143 reg &= ~MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK;
2144 reg |= (div - 1) << MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET;
2145 __raw_writel(reg, MXC_CCM_CSCDR1);
2147 return 0;
2150 static unsigned long _clk_vpu_axi_round_rate(struct clk *clk,
2151 unsigned long rate)
2153 u32 div;
2154 u32 parent_rate = clk_get_rate(clk->parent);
2156 div = parent_rate / rate;
2158 /* Make sure rate is not greater than the maximum value for the clock.
2159 * Also prevent a div of 0.
2161 if (div == 0)
2162 div++;
2164 if (div > 8)
2165 div = 8;
2167 return parent_rate / div;
2170 static struct clk vpu_clk[] = {
2172 __INIT_CLK_DEBUG(vpu_clk)
2173 .parent = &axi_clk,
2174 .enable_reg = MXC_CCM_CCGR6,
2175 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2176 .enable = _clk_enable,
2177 .disable = _clk_disable,
2178 .set_parent = _clk_vpu_axi_set_parent,
2179 .round_rate = _clk_vpu_axi_round_rate,
2180 .set_rate = _clk_vpu_axi_set_rate,
2181 .get_rate = _clk_vpu_axi_get_rate,
2182 .secondary = &vpu_clk[1],
2183 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2186 .parent = &mmdc_ch0_axi_clk[0],
2187 .secondary = &vpu_clk[2],
2190 .parent = &mx6fast1_clk,
2191 .secondary = &ocram_clk,
2195 static int _clk_ipu1_set_parent(struct clk *clk, struct clk *parent)
2197 int mux;
2198 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
2199 & ~MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_MASK;
2201 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2202 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
2204 reg |= (mux << MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_OFFSET);
2206 __raw_writel(reg, MXC_CCM_CSCDR3);
2208 return 0;
2211 static unsigned long _clk_ipu1_get_rate(struct clk *clk)
2213 u32 reg, div;
2215 reg = __raw_readl(MXC_CCM_CSCDR3);
2216 div = ((reg & MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK) >>
2217 MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET) + 1;
2219 return clk_get_rate(clk->parent) / div;
2222 static int _clk_ipu1_set_rate(struct clk *clk, unsigned long rate)
2224 u32 reg, div;
2225 u32 parent_rate = clk_get_rate(clk->parent);
2227 div = parent_rate / rate;
2228 if (div == 0)
2229 div++;
2230 if (((parent_rate / div) != rate) || (div > 8))
2231 return -EINVAL;
2233 reg = __raw_readl(MXC_CCM_CSCDR3);
2234 reg &= ~MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK;
2235 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET;
2236 __raw_writel(reg, MXC_CCM_CSCDR3);
2238 return 0;
2241 static unsigned long _clk_ipu_round_rate(struct clk *clk,
2242 unsigned long rate)
2244 u32 div;
2245 u32 parent_rate = clk_get_rate(clk->parent);
2247 div = parent_rate / rate;
2249 /* Make sure rate is not greater than the maximum value for the clock.
2250 * Also prevent a div of 0.
2252 if (div == 0)
2253 div++;
2255 if (div > 8)
2256 div = 8;
2258 return parent_rate / div;
2261 static struct clk ipu1_clk = {
2262 __INIT_CLK_DEBUG(ipu1_clk)
2263 .parent = &mmdc_ch0_axi_clk[0],
2264 .secondary = &mmdc_ch0_axi_clk[0],
2265 .enable_reg = MXC_CCM_CCGR3,
2266 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
2267 .enable = _clk_enable,
2268 .disable = _clk_disable,
2269 .set_parent = _clk_ipu1_set_parent,
2270 .round_rate = _clk_ipu_round_rate,
2271 .set_rate = _clk_ipu1_set_rate,
2272 .get_rate = _clk_ipu1_get_rate,
2273 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2276 static int _clk_ipu2_set_parent(struct clk *clk, struct clk *parent)
2278 int mux;
2279 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
2280 & ~MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_MASK;
2282 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2283 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
2285 reg |= (mux << MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_OFFSET);
2287 __raw_writel(reg, MXC_CCM_CSCDR3);
2289 return 0;
2292 static unsigned long _clk_ipu2_get_rate(struct clk *clk)
2294 u32 reg, div;
2296 reg = __raw_readl(MXC_CCM_CSCDR3);
2297 div = ((reg & MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK) >>
2298 MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET) + 1;
2300 return clk_get_rate(clk->parent) / div;
2303 static int _clk_ipu2_set_rate(struct clk *clk, unsigned long rate)
2305 u32 reg, div;
2306 u32 parent_rate = clk_get_rate(clk->parent);
2308 div = parent_rate / rate;
2309 if (div == 0)
2310 div++;
2311 if (((parent_rate / div) != rate) || (div > 8))
2312 return -EINVAL;
2314 reg = __raw_readl(MXC_CCM_CSCDR3);
2315 reg &= ~MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK;
2316 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET;
2317 __raw_writel(reg, MXC_CCM_CSCDR3);
2319 return 0;
2322 static struct clk ipu2_clk = {
2323 __INIT_CLK_DEBUG(ipu2_clk)
2324 .parent = &mmdc_ch0_axi_clk[0],
2325 .secondary = &mmdc_ch0_axi_clk[0],
2326 .enable_reg = MXC_CCM_CCGR3,
2327 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
2328 .enable = _clk_enable,
2329 .disable = _clk_disable,
2330 .set_parent = _clk_ipu2_set_parent,
2331 .round_rate = _clk_ipu_round_rate,
2332 .set_rate = _clk_ipu2_set_rate,
2333 .get_rate = _clk_ipu2_get_rate,
2334 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2337 static struct clk usdhc_dep_clk = {
2338 .parent = &mmdc_ch0_axi_clk[0],
2339 .secondary = &mx6per1_clk,
2342 static unsigned long _clk_usdhc_round_rate(struct clk *clk,
2343 unsigned long rate)
2345 u32 div;
2346 u32 parent_rate = clk_get_rate(clk->parent);
2348 div = parent_rate / rate;
2350 /* Make sure rate is not greater than the maximum value for the clock.
2351 * Also prevent a div of 0.
2353 if (div == 0)
2354 div++;
2356 if (div > 8)
2357 div = 8;
2359 return parent_rate / div;
2362 static int _clk_usdhc1_set_parent(struct clk *clk, struct clk *parent)
2364 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC1_CLK_SEL;
2366 if (parent == &pll2_pfd_352M)
2367 reg |= (MXC_CCM_CSCMR1_USDHC1_CLK_SEL);
2369 __raw_writel(reg, MXC_CCM_CSCMR1);
2371 return 0;
2374 static unsigned long _clk_usdhc1_get_rate(struct clk *clk)
2376 u32 reg, div;
2378 reg = __raw_readl(MXC_CCM_CSCDR1);
2379 div = ((reg & MXC_CCM_CSCDR1_USDHC1_PODF_MASK) >>
2380 MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET) + 1;
2382 return clk_get_rate(clk->parent) / div;
2385 static int _clk_usdhc1_set_rate(struct clk *clk, unsigned long rate)
2387 u32 reg, div;
2388 u32 parent_rate = clk_get_rate(clk->parent);
2390 div = parent_rate / rate;
2391 if (div == 0)
2392 div++;
2393 if (((parent_rate / div) != rate) || (div > 8))
2394 return -EINVAL;
2396 reg = __raw_readl(MXC_CCM_CSCDR1);
2397 reg &= ~MXC_CCM_CSCDR1_USDHC1_PODF_MASK;
2398 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET;
2399 __raw_writel(reg, MXC_CCM_CSCDR1);
2401 return 0;
2404 static struct clk usdhc1_clk = {
2405 __INIT_CLK_DEBUG(usdhc1_clk)
2406 .id = 0,
2407 .parent = &pll2_pfd_400M,
2408 .secondary = &usdhc_dep_clk,
2409 .enable_reg = MXC_CCM_CCGR6,
2410 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2411 .enable = _clk_enable,
2412 .disable = _clk_disable,
2413 .set_parent = _clk_usdhc1_set_parent,
2414 .round_rate = _clk_usdhc_round_rate,
2415 .set_rate = _clk_usdhc1_set_rate,
2416 .get_rate = _clk_usdhc1_get_rate,
2419 static int _clk_usdhc2_set_parent(struct clk *clk, struct clk *parent)
2421 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC2_CLK_SEL;
2423 if (parent == &pll2_pfd_352M)
2424 reg |= (MXC_CCM_CSCMR1_USDHC2_CLK_SEL);
2426 __raw_writel(reg, MXC_CCM_CSCMR1);
2428 return 0;
2431 static unsigned long _clk_usdhc2_get_rate(struct clk *clk)
2433 u32 reg, div;
2435 reg = __raw_readl(MXC_CCM_CSCDR1);
2436 div = ((reg & MXC_CCM_CSCDR1_USDHC2_PODF_MASK) >>
2437 MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET) + 1;
2439 return clk_get_rate(clk->parent) / div;
2442 static int _clk_usdhc2_set_rate(struct clk *clk, unsigned long rate)
2444 u32 reg, div;
2445 u32 parent_rate = clk_get_rate(clk->parent);
2447 div = parent_rate / rate;
2448 if (div == 0)
2449 div++;
2450 if (((parent_rate / div) != rate) || (div > 8))
2451 return -EINVAL;
2453 reg = __raw_readl(MXC_CCM_CSCDR1);
2454 reg &= ~MXC_CCM_CSCDR1_USDHC2_PODF_MASK;
2455 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET;
2456 __raw_writel(reg, MXC_CCM_CSCDR1);
2458 return 0;
2461 static struct clk usdhc2_clk = {
2462 __INIT_CLK_DEBUG(usdhc2_clk)
2463 .id = 1,
2464 .parent = &pll2_pfd_400M,
2465 .secondary = &usdhc_dep_clk,
2466 .enable_reg = MXC_CCM_CCGR6,
2467 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2468 .enable = _clk_enable,
2469 .disable = _clk_disable,
2470 .set_parent = _clk_usdhc2_set_parent,
2471 .round_rate = _clk_usdhc_round_rate,
2472 .set_rate = _clk_usdhc2_set_rate,
2473 .get_rate = _clk_usdhc2_get_rate,
2476 static int _clk_usdhc3_set_parent(struct clk *clk, struct clk *parent)
2478 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC3_CLK_SEL;
2480 if (parent == &pll2_pfd_352M)
2481 reg |= (MXC_CCM_CSCMR1_USDHC3_CLK_SEL);
2483 __raw_writel(reg, MXC_CCM_CSCMR1);
2485 return 0;
2488 static unsigned long _clk_usdhc3_get_rate(struct clk *clk)
2490 u32 reg, div;
2492 reg = __raw_readl(MXC_CCM_CSCDR1);
2493 div = ((reg & MXC_CCM_CSCDR1_USDHC3_PODF_MASK) >>
2494 MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET) + 1;
2496 return clk_get_rate(clk->parent) / div;
2499 static int _clk_usdhc3_set_rate(struct clk *clk, unsigned long rate)
2501 u32 reg, div;
2502 u32 parent_rate = clk_get_rate(clk->parent);
2504 div = parent_rate / rate;
2505 if (div == 0)
2506 div++;
2507 if (((parent_rate / div) != rate) || (div > 8))
2508 return -EINVAL;
2510 reg = __raw_readl(MXC_CCM_CSCDR1);
2511 reg &= ~MXC_CCM_CSCDR1_USDHC3_PODF_MASK;
2512 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET;
2513 __raw_writel(reg, MXC_CCM_CSCDR1);
2515 return 0;
2519 static struct clk usdhc3_clk = {
2520 __INIT_CLK_DEBUG(usdhc3_clk)
2521 .id = 2,
2522 .parent = &pll2_pfd_400M,
2523 .secondary = &usdhc_dep_clk,
2524 .enable_reg = MXC_CCM_CCGR6,
2525 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
2526 .enable = _clk_enable,
2527 .disable = _clk_disable,
2528 .set_parent = _clk_usdhc3_set_parent,
2529 .round_rate = _clk_usdhc_round_rate,
2530 .set_rate = _clk_usdhc3_set_rate,
2531 .get_rate = _clk_usdhc3_get_rate,
2534 static int _clk_usdhc4_set_parent(struct clk *clk, struct clk *parent)
2536 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC4_CLK_SEL;
2538 if (parent == &pll2_pfd_352M)
2539 reg |= (MXC_CCM_CSCMR1_USDHC4_CLK_SEL);
2541 __raw_writel(reg, MXC_CCM_CSCMR1);
2543 return 0;
2546 static unsigned long _clk_usdhc4_get_rate(struct clk *clk)
2548 u32 reg, div;
2550 reg = __raw_readl(MXC_CCM_CSCDR1);
2551 div = ((reg & MXC_CCM_CSCDR1_USDHC4_PODF_MASK) >>
2552 MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET) + 1;
2554 return clk_get_rate(clk->parent) / div;
2557 static int _clk_usdhc4_set_rate(struct clk *clk, unsigned long rate)
2559 u32 reg, div;
2560 u32 parent_rate = clk_get_rate(clk->parent);
2562 div = parent_rate / rate;
2563 if (div == 0)
2564 div++;
2565 if (((parent_rate / div) != rate) || (div > 8))
2566 return -EINVAL;
2568 reg = __raw_readl(MXC_CCM_CSCDR1);
2569 reg &= ~MXC_CCM_CSCDR1_USDHC4_PODF_MASK;
2570 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET;
2571 __raw_writel(reg, MXC_CCM_CSCDR1);
2573 return 0;
2577 static struct clk usdhc4_clk = {
2578 __INIT_CLK_DEBUG(usdhc4_clk)
2579 .id = 3,
2580 .parent = &pll2_pfd_400M,
2581 .secondary = &usdhc_dep_clk,
2582 .enable_reg = MXC_CCM_CCGR6,
2583 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
2584 .enable = _clk_enable,
2585 .disable = _clk_disable,
2586 .set_parent = _clk_usdhc4_set_parent,
2587 .round_rate = _clk_usdhc_round_rate,
2588 .set_rate = _clk_usdhc4_set_rate,
2589 .get_rate = _clk_usdhc4_get_rate,
2592 static unsigned long _clk_ssi_round_rate(struct clk *clk,
2593 unsigned long rate)
2595 u32 pre, post;
2596 u32 parent_rate = clk_get_rate(clk->parent);
2597 u32 div = parent_rate / rate;
2599 if (parent_rate % rate)
2600 div++;
2602 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2604 return parent_rate / (pre * post);
2607 static unsigned long _clk_ssi1_get_rate(struct clk *clk)
2609 u32 reg, prediv, podf;
2611 reg = __raw_readl(MXC_CCM_CS1CDR);
2613 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2614 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2615 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2616 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2618 return clk_get_rate(clk->parent) / (prediv * podf);
2621 static int _clk_ssi1_set_rate(struct clk *clk, unsigned long rate)
2623 u32 reg, div, pre, post;
2624 u32 parent_rate = clk_get_rate(clk->parent);
2626 div = parent_rate / rate;
2627 if (div == 0)
2628 div++;
2629 if (((parent_rate / div) != rate) || div > 512)
2630 return -EINVAL;
2632 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2634 reg = __raw_readl(MXC_CCM_CS1CDR);
2635 reg &= ~(MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK |
2636 MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK);
2637 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET;
2638 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET;
2640 __raw_writel(reg, MXC_CCM_CS1CDR);
2642 return 0;
2646 static int _clk_ssi1_set_parent(struct clk *clk, struct clk *parent)
2648 u32 reg, mux;
2650 reg = __raw_readl(MXC_CCM_CSCMR1)
2651 & ~MXC_CCM_CSCMR1_SSI1_CLK_SEL_MASK;
2653 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2654 &pll4_audio_main_clk, NULL, NULL, NULL);
2655 reg |= (mux << MXC_CCM_CSCMR1_SSI1_CLK_SEL_OFFSET);
2657 __raw_writel(reg, MXC_CCM_CSCMR1);
2659 return 0;
2662 static struct clk ssi1_clk = {
2663 __INIT_CLK_DEBUG(ssi1_clk)
2664 .parent = &pll3_pfd_508M,
2665 .enable_reg = MXC_CCM_CCGR5,
2666 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
2667 .enable = _clk_enable,
2668 .disable = _clk_disable,
2669 .set_parent = _clk_ssi1_set_parent,
2670 .set_rate = _clk_ssi1_set_rate,
2671 .round_rate = _clk_ssi_round_rate,
2672 .get_rate = _clk_ssi1_get_rate,
2673 #ifdef CONFIG_SND_MXC_SOC_IRAM
2674 .secondary = &ocram_clk,
2675 #else
2676 .secondary = &mmdc_ch0_axi_clk[0],
2677 #endif
2678 .flags = AHB_AUDIO_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2681 static unsigned long _clk_ssi2_get_rate(struct clk *clk)
2683 u32 reg, prediv, podf;
2685 reg = __raw_readl(MXC_CCM_CS2CDR);
2687 prediv = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK)
2688 >> MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET) + 1;
2689 podf = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK)
2690 >> MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET) + 1;
2692 return clk_get_rate(clk->parent) / (prediv * podf);
2695 static int _clk_ssi2_set_rate(struct clk *clk, unsigned long rate)
2697 u32 reg, div, pre, post;
2698 u32 parent_rate = clk_get_rate(clk->parent);
2700 div = parent_rate / rate;
2701 if (div == 0)
2702 div++;
2703 if (((parent_rate / div) != rate) || div > 512)
2704 return -EINVAL;
2706 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2708 reg = __raw_readl(MXC_CCM_CS2CDR);
2709 reg &= ~(MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK |
2710 MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK);
2711 reg |= (post - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET;
2712 reg |= (pre - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET;
2714 __raw_writel(reg, MXC_CCM_CS2CDR);
2716 return 0;
2720 static int _clk_ssi2_set_parent(struct clk *clk, struct clk *parent)
2722 u32 reg, mux;
2724 reg = __raw_readl(MXC_CCM_CSCMR1)
2725 & ~MXC_CCM_CSCMR1_SSI2_CLK_SEL_MASK;
2727 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2728 &pll4_audio_main_clk, NULL, NULL, NULL);
2729 reg |= (mux << MXC_CCM_CSCMR1_SSI2_CLK_SEL_OFFSET);
2731 __raw_writel(reg, MXC_CCM_CSCMR1);
2733 return 0;
2736 static struct clk ssi2_clk = {
2737 __INIT_CLK_DEBUG(ssi2_clk)
2738 .parent = &pll3_pfd_508M,
2739 .enable_reg = MXC_CCM_CCGR5,
2740 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2741 .enable = _clk_enable,
2742 .disable = _clk_disable,
2743 .set_parent = _clk_ssi2_set_parent,
2744 .set_rate = _clk_ssi2_set_rate,
2745 .round_rate = _clk_ssi_round_rate,
2746 .get_rate = _clk_ssi2_get_rate,
2747 #ifdef CONFIG_SND_MXC_SOC_IRAM
2748 .secondary = &ocram_clk,
2749 #else
2750 .secondary = &mmdc_ch0_axi_clk[0],
2751 #endif
2752 .flags = AHB_AUDIO_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2755 static unsigned long _clk_ssi3_get_rate(struct clk *clk)
2757 u32 reg, prediv, podf;
2759 reg = __raw_readl(MXC_CCM_CS1CDR);
2761 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2762 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2763 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2764 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2766 return clk_get_rate(clk->parent) / (prediv * podf);
2769 static int _clk_ssi3_set_rate(struct clk *clk, unsigned long rate)
2771 u32 reg, div, pre, post;
2772 u32 parent_rate = clk_get_rate(clk->parent);
2774 div = parent_rate / rate;
2775 if (div == 0)
2776 div++;
2777 if (((parent_rate / div) != rate) || div > 512)
2778 return -EINVAL;
2780 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2782 reg = __raw_readl(MXC_CCM_CS1CDR);
2783 reg &= ~(MXC_CCM_CS1CDR_SSI3_CLK_PODF_MASK|
2784 MXC_CCM_CS1CDR_SSI3_CLK_PRED_MASK);
2785 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PODF_OFFSET;
2786 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PRED_OFFSET;
2788 __raw_writel(reg, MXC_CCM_CS1CDR);
2790 return 0;
2794 static int _clk_ssi3_set_parent(struct clk *clk, struct clk *parent)
2796 u32 reg, mux;
2798 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_SSI3_CLK_SEL_MASK;
2800 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2801 &pll4_audio_main_clk, NULL, NULL, NULL);
2802 reg |= (mux << MXC_CCM_CSCMR1_SSI3_CLK_SEL_OFFSET);
2804 __raw_writel(reg, MXC_CCM_CSCMR1);
2806 return 0;
2809 static struct clk ssi3_clk = {
2810 __INIT_CLK_DEBUG(ssi3_clk)
2811 .parent = &pll3_pfd_508M,
2812 .enable_reg = MXC_CCM_CCGR5,
2813 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
2814 .enable = _clk_enable,
2815 .disable = _clk_disable,
2816 .set_parent = _clk_ssi3_set_parent,
2817 .set_rate = _clk_ssi3_set_rate,
2818 .round_rate = _clk_ssi_round_rate,
2819 .get_rate = _clk_ssi3_get_rate,
2820 #ifdef CONFIG_SND_MXC_SOC_IRAM
2821 .secondary = &ocram_clk,
2822 #else
2823 .secondary = &mmdc_ch0_axi_clk[0],
2824 #endif
2825 .flags = AHB_AUDIO_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2828 static unsigned long _clk_ldb_di_round_rate(struct clk *clk,
2829 unsigned long rate)
2831 u32 parent_rate = clk_get_rate(clk->parent);
2833 if (rate * 7 <= parent_rate + parent_rate/20)
2834 return parent_rate / 7;
2835 else
2836 return 2 * parent_rate / 7;
2839 static unsigned long _clk_ldb_di0_get_rate(struct clk *clk)
2841 u32 div;
2843 div = __raw_readl(MXC_CCM_CSCMR2) &
2844 MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2846 if (div)
2847 return clk_get_rate(clk->parent) / 7;
2849 return (2 * clk_get_rate(clk->parent)) / 7;
2852 static int _clk_ldb_di0_set_rate(struct clk *clk, unsigned long rate)
2854 u32 reg, div = 0;
2855 u32 parent_rate = clk_get_rate(clk->parent);
2857 if (rate * 7 <= parent_rate + parent_rate/20) {
2858 div = 7;
2859 rate = parent_rate / 7;
2860 } else
2861 rate = 2 * parent_rate / 7;
2863 reg = __raw_readl(MXC_CCM_CSCMR2);
2864 if (div == 7)
2865 reg |= MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2866 else
2867 reg &= ~MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2869 __raw_writel(reg, MXC_CCM_CSCMR2);
2871 return 0;
2874 static int _clk_ldb_di0_set_parent(struct clk *clk, struct clk *parent)
2876 u32 reg, mux;
2877 int rev = mx6q_revision();
2879 reg = __raw_readl(MXC_CCM_CS2CDR)
2880 & ~MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_MASK;
2882 mux = _get_mux6(parent, &pll5_video_main_clk,
2883 &pll2_pfd_352M, &pll2_pfd_400M,
2884 (rev == IMX_CHIP_REVISION_1_0) ?
2885 &pll3_pfd_540M : /* MX6Q TO1.0 */
2886 &mmdc_ch1_axi_clk[0], /* MX6Q TO1.1 and MX6DL */
2887 &pll3_usb_otg_main_clk, NULL);
2888 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_OFFSET);
2890 __raw_writel(reg, MXC_CCM_CS2CDR);
2892 return 0;
2895 static struct clk ldb_di0_clk = {
2896 __INIT_CLK_DEBUG(ldb_di0_clk)
2897 .id = 0,
2898 .parent = &pll2_pfd_352M,
2899 .enable_reg = MXC_CCM_CCGR3,
2900 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
2901 .enable = _clk_enable,
2902 .disable = _clk_disable,
2903 .set_parent = _clk_ldb_di0_set_parent,
2904 .set_rate = _clk_ldb_di0_set_rate,
2905 .round_rate = _clk_ldb_di_round_rate,
2906 .get_rate = _clk_ldb_di0_get_rate,
2907 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2910 static unsigned long _clk_ldb_di1_get_rate(struct clk *clk)
2912 u32 div;
2914 div = __raw_readl(MXC_CCM_CSCMR2) &
2915 MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2917 if (div)
2918 return clk_get_rate(clk->parent) / 7;
2920 return (2 * clk_get_rate(clk->parent)) / 7;
2923 static int _clk_ldb_di1_set_rate(struct clk *clk, unsigned long rate)
2925 u32 reg, div = 0;
2926 u32 parent_rate = clk_get_rate(clk->parent);
2928 if (rate * 7 <= parent_rate + parent_rate/20) {
2929 div = 7;
2930 rate = parent_rate / 7;
2931 } else
2932 rate = 2 * parent_rate / 7;
2934 reg = __raw_readl(MXC_CCM_CSCMR2);
2935 if (div == 7)
2936 reg |= MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2937 else
2938 reg &= ~MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2940 __raw_writel(reg, MXC_CCM_CSCMR2);
2942 return 0;
2945 static int _clk_ldb_di1_set_parent(struct clk *clk, struct clk *parent)
2947 u32 reg, mux;
2948 int rev = mx6q_revision();
2950 reg = __raw_readl(MXC_CCM_CS2CDR)
2951 & ~MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_MASK;
2953 mux = _get_mux6(parent, &pll5_video_main_clk,
2954 &pll2_pfd_352M, &pll2_pfd_400M,
2955 (rev == IMX_CHIP_REVISION_1_0) ?
2956 &pll3_pfd_540M : /* MX6Q TO1.0 */
2957 &mmdc_ch1_axi_clk[0], /* MX6Q TO1.1 and MX6DL */
2958 &pll3_usb_otg_main_clk, NULL);
2959 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_OFFSET);
2961 __raw_writel(reg, MXC_CCM_CS2CDR);
2963 return 0;
2966 static struct clk ldb_di1_clk = {
2967 __INIT_CLK_DEBUG(ldb_di1_clk)
2968 .id = 0,
2969 .parent = &pll2_pfd_352M,
2970 .enable_reg = MXC_CCM_CCGR3,
2971 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2972 .enable = _clk_enable,
2973 .disable = _clk_disable,
2974 .set_parent = _clk_ldb_di1_set_parent,
2975 .set_rate = _clk_ldb_di1_set_rate,
2976 .round_rate = _clk_ldb_di_round_rate,
2977 .get_rate = _clk_ldb_di1_get_rate,
2978 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2982 static unsigned long _clk_ipu_di_round_rate(struct clk *clk,
2983 unsigned long rate)
2985 u32 div;
2986 u32 parent_rate = clk_get_rate(clk->parent);
2988 if ((clk->parent == &ldb_di0_clk) ||
2989 (clk->parent == &ldb_di1_clk))
2990 return parent_rate;
2992 div = parent_rate / rate;
2993 /* Round to closest divisor */
2994 if ((parent_rate % rate) > (rate / 2))
2995 div++;
2997 /* Make sure rate is not greater than the maximum value for the clock.
2998 * Also prevent a div of 0.
3000 if (div == 0)
3001 div++;
3003 if (div > 8)
3004 div = 8;
3006 return parent_rate / div;
3009 static unsigned long _clk_ipu1_di0_get_rate(struct clk *clk)
3011 u32 reg, div;
3013 if ((clk->parent == &ldb_di0_clk) ||
3014 (clk->parent == &ldb_di1_clk))
3015 return clk_get_rate(clk->parent);
3017 reg = __raw_readl(MXC_CCM_CHSCCDR);
3019 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK) >>
3020 MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET) + 1;
3022 return clk_get_rate(clk->parent) / div;
3025 static int _clk_ipu1_di0_set_rate(struct clk *clk, unsigned long rate)
3027 u32 reg, div;
3028 u32 parent_rate = clk_get_rate(clk->parent);
3030 if ((clk->parent == &ldb_di0_clk) ||
3031 (clk->parent == &ldb_di1_clk)) {
3032 if (parent_rate == rate)
3033 return 0;
3034 else
3035 return -EINVAL;
3038 div = parent_rate / rate;
3039 if (div == 0)
3040 div++;
3041 if (((parent_rate / div) != rate) || (div > 8))
3042 return -EINVAL;
3044 reg = __raw_readl(MXC_CCM_CHSCCDR);
3045 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK;
3046 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET;
3047 __raw_writel(reg, MXC_CCM_CHSCCDR);
3049 return 0;
3053 static int _clk_ipu1_di0_set_parent(struct clk *clk, struct clk *parent)
3055 u32 reg, mux;
3057 if (parent == &ldb_di0_clk)
3058 mux = 0x3;
3059 else if (parent == &ldb_di1_clk)
3060 mux = 0x4;
3061 else {
3062 reg = __raw_readl(MXC_CCM_CHSCCDR)
3063 & ~MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_MASK;
3065 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3066 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
3067 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
3068 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_OFFSET);
3070 __raw_writel(reg, MXC_CCM_CHSCCDR);
3072 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
3073 mux = 0;
3076 reg = __raw_readl(MXC_CCM_CHSCCDR)
3077 & ~MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_MASK;
3078 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_OFFSET),
3079 MXC_CCM_CHSCCDR);
3081 return 0;
3084 static unsigned long _clk_ipu1_di1_get_rate(struct clk *clk)
3086 u32 reg, div;
3088 if ((clk->parent == &ldb_di0_clk) ||
3089 (clk->parent == &ldb_di1_clk))
3090 return clk_get_rate(clk->parent);
3092 reg = __raw_readl(MXC_CCM_CHSCCDR);
3094 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK)
3095 >> MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET) + 1;
3097 return clk_get_rate(clk->parent) / div;
3100 static int _clk_ipu1_di1_set_rate(struct clk *clk, unsigned long rate)
3102 u32 reg, div;
3103 u32 parent_rate = clk_get_rate(clk->parent);
3105 if ((clk->parent == &ldb_di0_clk) ||
3106 (clk->parent == &ldb_di1_clk)) {
3107 if (parent_rate == rate)
3108 return 0;
3109 else
3110 return -EINVAL;
3113 div = parent_rate / rate;
3114 if (div == 0)
3115 div++;
3116 if (((parent_rate / div) != rate) || (div > 8))
3117 return -EINVAL;
3119 reg = __raw_readl(MXC_CCM_CHSCCDR);
3120 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK;
3121 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET;
3122 __raw_writel(reg, MXC_CCM_CHSCCDR);
3124 return 0;
3128 static int _clk_ipu1_di1_set_parent(struct clk *clk, struct clk *parent)
3130 u32 reg, mux;
3132 if (parent == &ldb_di0_clk)
3133 mux = 0x3;
3134 else if (parent == &ldb_di1_clk)
3135 mux = 0x4;
3136 else {
3137 reg = __raw_readl(MXC_CCM_CHSCCDR)
3138 & ~MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_MASK;
3140 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3141 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
3142 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
3143 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_OFFSET);
3145 __raw_writel(reg, MXC_CCM_CHSCCDR);
3147 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
3148 mux = 0;
3150 reg = __raw_readl(MXC_CCM_CHSCCDR)
3151 & ~MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_MASK;
3152 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_OFFSET),
3153 MXC_CCM_CHSCCDR);
3155 return 0;
3158 static struct clk ipu1_di_clk[] = {
3160 __INIT_CLK_DEBUG(ipu1_di_clk_0)
3161 .id = 0,
3162 .parent = &pll5_video_main_clk,
3163 .enable_reg = MXC_CCM_CCGR3,
3164 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3165 .enable = _clk_enable,
3166 .disable = _clk_disable,
3167 .set_parent = _clk_ipu1_di0_set_parent,
3168 .set_rate = _clk_ipu1_di0_set_rate,
3169 .round_rate = _clk_ipu_di_round_rate,
3170 .get_rate = _clk_ipu1_di0_get_rate,
3171 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3174 __INIT_CLK_DEBUG(ipu1_di_clk_1)
3175 .id = 1,
3176 .parent = &pll5_video_main_clk,
3177 .enable_reg = MXC_CCM_CCGR3,
3178 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3179 .enable = _clk_enable,
3180 .disable = _clk_disable,
3181 .set_parent = _clk_ipu1_di1_set_parent,
3182 .set_rate = _clk_ipu1_di1_set_rate,
3183 .round_rate = _clk_ipu_di_round_rate,
3184 .get_rate = _clk_ipu1_di1_get_rate,
3185 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3189 static unsigned long _clk_ipu2_di0_get_rate(struct clk *clk)
3191 u32 reg, div;
3193 if ((clk->parent == &ldb_di0_clk) ||
3194 (clk->parent == &ldb_di1_clk))
3195 return clk_get_rate(clk->parent);
3197 reg = __raw_readl(MXC_CCM_CSCDR2);
3199 div = ((reg & MXC_CCM_CSCDR2_IPU2_DI0_PODF_MASK) >>
3200 MXC_CCM_CSCDR2_IPU2_DI0_PODF_OFFSET) + 1;
3202 return clk_get_rate(clk->parent) / div;
3205 static int _clk_ipu2_di0_set_rate(struct clk *clk, unsigned long rate)
3207 u32 reg, div;
3208 u32 parent_rate = clk_get_rate(clk->parent);
3210 if ((clk->parent == &ldb_di0_clk) ||
3211 (clk->parent == &ldb_di1_clk)) {
3212 if (parent_rate == rate)
3213 return 0;
3214 else
3215 return -EINVAL;
3218 div = parent_rate / rate;
3219 if (div == 0)
3220 div++;
3221 if (((parent_rate / div) != rate) || (div > 8))
3222 return -EINVAL;
3224 reg = __raw_readl(MXC_CCM_CSCDR2);
3225 reg &= ~MXC_CCM_CSCDR2_IPU2_DI0_PODF_MASK;
3226 reg |= (div - 1) << MXC_CCM_CSCDR2_IPU2_DI0_PODF_OFFSET;
3227 __raw_writel(reg, MXC_CCM_CSCDR2);
3229 return 0;
3232 static int _clk_ipu2_di0_set_parent(struct clk *clk, struct clk *parent)
3234 u32 reg, mux;
3236 if (parent == &ldb_di0_clk)
3237 mux = 0x3;
3238 else if (parent == &ldb_di1_clk)
3239 mux = 0x4;
3240 else {
3241 reg = __raw_readl(MXC_CCM_CSCDR2)
3242 & ~MXC_CCM_CSCDR2_IPU2_DI0_PRE_CLK_SEL_MASK;
3244 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3245 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
3246 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
3247 reg |= (mux << MXC_CCM_CSCDR2_IPU2_DI0_PRE_CLK_SEL_OFFSET);
3249 __raw_writel(reg, MXC_CCM_CSCDR2);
3251 /* Derive clock from divided pre-muxed ipu2_di0 clock.*/
3252 mux = 0;
3254 reg = __raw_readl(MXC_CCM_CSCDR2)
3255 & ~MXC_CCM_CSCDR2_IPU2_DI0_CLK_SEL_MASK;
3256 __raw_writel(reg | (mux << MXC_CCM_CSCDR2_IPU2_DI0_CLK_SEL_OFFSET),
3257 MXC_CCM_CSCDR2);
3259 return 0;
3262 static unsigned long _clk_ipu2_di1_get_rate(struct clk *clk)
3264 u32 reg, div;
3266 if ((clk->parent == &ldb_di0_clk) ||
3267 (clk->parent == &ldb_di1_clk))
3268 return clk_get_rate(clk->parent);
3270 reg = __raw_readl(MXC_CCM_CSCDR2);
3272 div = ((reg & MXC_CCM_CSCDR2_IPU2_DI1_PODF_MASK)
3273 >> MXC_CCM_CSCDR2_IPU2_DI1_PODF_OFFSET) + 1;
3275 return clk_get_rate(clk->parent) / div;
3278 static int _clk_ipu2_di1_set_rate(struct clk *clk, unsigned long rate)
3280 u32 reg, div;
3281 u32 parent_rate = clk_get_rate(clk->parent);
3283 if ((clk->parent == &ldb_di0_clk) ||
3284 (clk->parent == &ldb_di1_clk)) {
3285 if (parent_rate == rate)
3286 return 0;
3287 else
3288 return -EINVAL;
3291 div = parent_rate / rate;
3292 if (div == 0)
3293 div++;
3294 if (((parent_rate / div) != rate) || (div > 8))
3295 return -EINVAL;
3297 reg = __raw_readl(MXC_CCM_CSCDR2);
3298 reg &= ~MXC_CCM_CSCDR2_IPU2_DI1_PODF_MASK;
3299 reg |= (div - 1) << MXC_CCM_CSCDR2_IPU2_DI1_PODF_OFFSET;
3300 __raw_writel(reg, MXC_CCM_CSCDR2);
3302 return 0;
3305 static int _clk_ipu2_di1_set_parent(struct clk *clk, struct clk *parent)
3307 u32 reg, mux;
3309 if (parent == &ldb_di0_clk)
3310 mux = 0x3;
3311 else if (parent == &ldb_di1_clk)
3312 mux = 0x4;
3313 else {
3314 reg = __raw_readl(MXC_CCM_CSCDR2)
3315 & ~MXC_CCM_CSCDR2_IPU2_DI1_PRE_CLK_SEL_MASK;
3317 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3318 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
3319 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
3320 reg |= (mux << MXC_CCM_CSCDR2_IPU2_DI1_PRE_CLK_SEL_OFFSET);
3322 __raw_writel(reg, MXC_CCM_CSCDR2);
3324 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
3325 mux = 0;
3327 reg = __raw_readl(MXC_CCM_CSCDR2)
3328 & ~MXC_CCM_CSCDR2_IPU2_DI1_CLK_SEL_MASK;
3329 __raw_writel(reg | (mux << MXC_CCM_CSCDR2_IPU2_DI1_CLK_SEL_OFFSET),
3330 MXC_CCM_CSCDR2);
3332 return 0;
3335 static struct clk ipu2_di_clk[] = {
3337 __INIT_CLK_DEBUG(ipu2_di_clk_0)
3338 .id = 0,
3339 .parent = &pll5_video_main_clk,
3340 .enable_reg = MXC_CCM_CCGR3,
3341 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3342 .enable = _clk_enable,
3343 .disable = _clk_disable,
3344 .set_parent = _clk_ipu2_di0_set_parent,
3345 .set_rate = _clk_ipu2_di0_set_rate,
3346 .round_rate = _clk_ipu_di_round_rate,
3347 .get_rate = _clk_ipu2_di0_get_rate,
3348 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3351 __INIT_CLK_DEBUG(ipu2_di_clk_1)
3352 .id = 1,
3353 .parent = &pll5_video_main_clk,
3354 .enable_reg = MXC_CCM_CCGR3,
3355 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3356 .enable = _clk_enable,
3357 .disable = _clk_disable,
3358 .set_parent = _clk_ipu2_di1_set_parent,
3359 .set_rate = _clk_ipu2_di1_set_rate,
3360 .round_rate = _clk_ipu_di_round_rate,
3361 .get_rate = _clk_ipu2_di1_get_rate,
3362 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3366 static unsigned long _clk_can_root_round_rate(struct clk *clk,
3367 unsigned long rate)
3369 u32 div;
3370 u32 parent_rate = clk_get_rate(clk->parent);
3372 div = parent_rate / rate;
3374 /* Make sure rate is not greater than the maximum value for the clock.
3375 * Also prevent a div of 0.
3377 if (div == 0)
3378 div++;
3380 if (div > 64)
3381 div = 64;
3383 return parent_rate / div;
3386 static int _clk_can_root_set_rate(struct clk *clk, unsigned long rate)
3388 u32 reg, div;
3389 u32 parent_rate = clk_get_rate(clk->parent);
3391 div = parent_rate / rate;
3392 if (div == 0)
3393 div++;
3394 if (((parent_rate / div) != rate) || (div > 64))
3395 return -EINVAL;
3397 reg = __raw_readl(MXC_CCM_CSCMR2) & MXC_CCM_CSCMR2_CAN_CLK_PODF_MASK;
3398 reg |= ((div - 1) << MXC_CCM_CSCMR2_CAN_CLK_PODF_OFFSET);
3400 __raw_writel(reg, MXC_CCM_CSCMR2);
3402 return 0;
3405 static unsigned long _clk_can_root_get_rate(struct clk *clk)
3407 u32 reg, div;
3408 unsigned long val;
3410 reg = __raw_readl(MXC_CCM_CSCMR2) & MXC_CCM_CSCMR2_CAN_CLK_PODF_MASK;
3411 div = (reg >> MXC_CCM_CSCMR2_CAN_CLK_PODF_OFFSET) + 1;
3412 val = clk_get_rate(clk->parent) / div;
3414 return val;
3417 static struct clk can_clk_root = {
3418 __INIT_CLK_DEBUG(can_clk_root)
3419 .parent = &pll3_60M,
3420 .set_rate = _clk_can_root_set_rate,
3421 .get_rate = _clk_can_root_get_rate,
3422 .round_rate = _clk_can_root_round_rate,
3425 static struct clk can2_clk[] = {
3427 __INIT_CLK_DEBUG(can2_module_clk)
3428 .id = 0,
3429 .parent = &can_clk_root,
3430 .enable_reg = MXC_CCM_CCGR0,
3431 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
3432 .enable = _clk_enable,
3433 .disable = _clk_disable,
3434 .secondary = &can2_clk[1],
3435 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3438 __INIT_CLK_DEBUG(can2_serial_clk)
3439 .id = 1,
3440 .parent = &can_clk_root,
3441 .enable_reg = MXC_CCM_CCGR0,
3442 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
3443 .enable = _clk_enable,
3444 .disable = _clk_disable,
3449 static struct clk can1_clk[] = {
3451 __INIT_CLK_DEBUG(can1_module_clk)
3452 .id = 0,
3453 .parent = &can_clk_root,
3454 .enable_reg = MXC_CCM_CCGR0,
3455 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3456 .enable = _clk_enable,
3457 .disable = _clk_disable,
3458 .secondary = &can1_clk[1],
3459 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3462 __INIT_CLK_DEBUG(can1_serial_clk)
3463 .id = 1,
3464 .parent = &can_clk_root,
3465 .enable_reg = MXC_CCM_CCGR0,
3466 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3467 .enable = _clk_enable,
3468 .disable = _clk_disable,
3472 static unsigned long _clk_spdif_round_rate(struct clk *clk,
3473 unsigned long rate)
3475 u32 pre, post;
3476 u32 parent_rate = clk_get_rate(clk->parent);
3477 u32 div = parent_rate / rate;
3479 if (parent_rate % rate)
3480 div++;
3482 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3484 return parent_rate / (pre * post);
3487 static int _clk_spdif0_set_parent(struct clk *clk, struct clk *parent)
3489 u32 reg, mux;
3491 reg = __raw_readl(MXC_CCM_CDCDR)
3492 & ~MXC_CCM_CDCDR_SPDIF0_CLK_SEL_MASK;
3494 mux = _get_mux6(parent, &pll4_audio_main_clk,
3495 &pll3_pfd_508M, &pll3_pfd_454M,
3496 &pll3_sw_clk, NULL, NULL);
3497 reg |= mux << MXC_CCM_CDCDR_SPDIF0_CLK_SEL_OFFSET;
3499 __raw_writel(reg, MXC_CCM_CDCDR);
3501 return 0;
3504 static unsigned long _clk_spdif0_get_rate(struct clk *clk)
3506 u32 reg, pred, podf;
3508 reg = __raw_readl(MXC_CCM_CDCDR);
3510 pred = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK)
3511 >> MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET) + 1;
3512 podf = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK)
3513 >> MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET) + 1;
3515 return clk_get_rate(clk->parent) / (pred * podf);
3518 static int _clk_spdif0_set_rate(struct clk *clk, unsigned long rate)
3520 u32 reg, div, pre, post;
3521 u32 parent_rate = clk_get_rate(clk->parent);
3523 div = parent_rate / rate;
3524 if (div == 0)
3525 div++;
3526 if (((parent_rate / div) != rate) || div > 64)
3527 return -EINVAL;
3529 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3531 reg = __raw_readl(MXC_CCM_CDCDR);
3532 reg &= ~(MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK|
3533 MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK);
3534 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET;
3535 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET;
3537 __raw_writel(reg, MXC_CCM_CDCDR);
3539 return 0;
3542 static struct clk spdif0_clk[] = {
3544 __INIT_CLK_DEBUG(spdif0_clk_0)
3545 .id = 0,
3546 .parent = &pll3_sw_clk,
3547 .enable = _clk_enable,
3548 .enable_reg = MXC_CCM_CCGR5,
3549 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3550 .disable = _clk_disable,
3551 .secondary = &spdif0_clk[1],
3552 .set_rate = _clk_spdif0_set_rate,
3553 .get_rate = _clk_spdif0_get_rate,
3554 .set_parent = _clk_spdif0_set_parent,
3555 .round_rate = _clk_spdif_round_rate,
3556 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3559 __INIT_CLK_DEBUG(spdif0_clk_1)
3560 .id = 1,
3561 .parent = &ipg_clk,
3562 .secondary = &spba_clk,
3566 static unsigned long _clk_esai_round_rate(struct clk *clk,
3567 unsigned long rate)
3569 u32 pre, post;
3570 u32 parent_rate = clk_get_rate(clk->parent);
3571 u32 div = parent_rate / rate;
3573 if (parent_rate % rate)
3574 div++;
3576 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3578 return parent_rate / (pre * post);
3581 static int _clk_esai_set_parent(struct clk *clk, struct clk *parent)
3583 u32 reg, mux;
3585 reg = __raw_readl(MXC_CCM_CSCMR2) & ~MXC_CCM_CSCMR2_ESAI_CLK_SEL_MASK;
3587 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
3588 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
3589 reg |= mux << MXC_CCM_CSCMR2_ESAI_CLK_SEL_OFFSET;
3591 __raw_writel(reg, MXC_CCM_CSCMR2);
3593 return 0;
3596 static unsigned long _clk_esai_get_rate(struct clk *clk)
3598 u32 reg, pred, podf;
3600 reg = __raw_readl(MXC_CCM_CS1CDR);
3602 pred = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK)
3603 >> MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET) + 1;
3604 podf = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK)
3605 >> MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET) + 1;
3607 return clk_get_rate(clk->parent) / (pred * podf);
3610 static int _clk_esai_set_rate(struct clk *clk, unsigned long rate)
3612 u32 reg, div, pre, post;
3613 u32 parent_rate = clk_get_rate(clk->parent);
3615 div = parent_rate / rate;
3616 if (div == 0)
3617 div++;
3618 if (((parent_rate / div) != rate) || div > 64)
3619 return -EINVAL;
3621 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3623 reg = __raw_readl(MXC_CCM_CS1CDR);
3624 reg &= ~(MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK|
3625 MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK);
3626 reg |= (post - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET;
3627 reg |= (pre - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET;
3629 __raw_writel(reg, MXC_CCM_CS1CDR);
3631 return 0;
3634 static struct clk esai_clk = {
3635 __INIT_CLK_DEBUG(esai_clk)
3636 .id = 0,
3637 .parent = &pll3_sw_clk,
3638 .secondary = &spba_clk,
3639 .enable_reg = MXC_CCM_CCGR1,
3640 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3641 .enable = _clk_enable,
3642 .disable = _clk_disable,
3643 .set_rate = _clk_esai_set_rate,
3644 .get_rate = _clk_esai_get_rate,
3645 .set_parent = _clk_esai_set_parent,
3646 .round_rate = _clk_esai_round_rate,
3649 static int _clk_enet_set_rate(struct clk *clk, unsigned long rate)
3651 unsigned int reg, div = 1;
3653 switch (rate) {
3654 case 25000000:
3655 div = 0;
3656 break;
3657 case 50000000:
3658 div = 1;
3659 break;
3660 case 100000000:
3661 div = 2;
3662 break;
3663 case 125000000:
3664 div = 3;
3665 break;
3666 default:
3667 return -EINVAL;
3669 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3670 reg &= ~ANADIG_PLL_ENET_DIV_SELECT_MASK;
3671 reg |= (div << ANADIG_PLL_ENET_DIV_SELECT_OFFSET);
3672 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3674 return 0;
3677 static unsigned long _clk_enet_get_rate(struct clk *clk)
3679 unsigned int div;
3681 div = (__raw_readl(PLL8_ENET_BASE_ADDR))
3682 & ANADIG_PLL_ENET_DIV_SELECT_MASK;
3684 switch (div) {
3685 case 0:
3686 div = 20;
3687 break;
3688 case 1:
3689 div = 10;
3690 break;
3691 case 3:
3692 div = 5;
3693 break;
3694 case 4:
3695 div = 4;
3696 break;
3699 return 500000000 / div;
3702 static struct clk enet_clk[] = {
3704 __INIT_CLK_DEBUG(enet_clk)
3705 .id = 0,
3706 .parent = &pll8_enet_main_clk,
3707 .enable_reg = MXC_CCM_CCGR1,
3708 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3709 .enable = _clk_enable,
3710 .disable = _clk_disable,
3711 .set_rate = _clk_enet_set_rate,
3712 .get_rate = _clk_enet_get_rate,
3713 .secondary = &enet_clk[1],
3714 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3717 .parent = &mmdc_ch0_axi_clk[0],
3718 .secondary = &mx6per1_clk,
3722 static struct clk ecspi_clk[] = {
3724 __INIT_CLK_DEBUG(ecspi0_clk)
3725 .id = 0,
3726 .parent = &pll3_60M,
3727 .secondary = &spba_clk,
3728 .enable_reg = MXC_CCM_CCGR1,
3729 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3730 .enable = _clk_enable,
3731 .disable = _clk_disable,
3734 __INIT_CLK_DEBUG(ecspi1_clk)
3735 .id = 1,
3736 .parent = &pll3_60M,
3737 .secondary = &spba_clk,
3738 .enable_reg = MXC_CCM_CCGR1,
3739 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3740 .enable = _clk_enable,
3741 .disable = _clk_disable,
3744 __INIT_CLK_DEBUG(ecspi2_clk)
3745 .id = 2,
3746 .parent = &pll3_60M,
3747 .secondary = &spba_clk,
3748 .enable_reg = MXC_CCM_CCGR1,
3749 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3750 .enable = _clk_enable,
3751 .disable = _clk_disable,
3754 __INIT_CLK_DEBUG(ecspi3_clk)
3755 .id = 3,
3756 .parent = &pll3_60M,
3757 .secondary = &spba_clk,
3758 .enable_reg = MXC_CCM_CCGR1,
3759 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3760 .enable = _clk_enable,
3761 .disable = _clk_disable,
3764 __INIT_CLK_DEBUG(ecspi4_clk)
3765 .id = 4,
3766 .parent = &pll3_60M,
3767 .secondary = &spba_clk,
3768 .enable_reg = MXC_CCM_CCGR1,
3769 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3770 .enable = _clk_enable,
3771 .disable = _clk_disable,
3775 static unsigned long _clk_emi_slow_round_rate(struct clk *clk,
3776 unsigned long rate)
3778 u32 div;
3779 u32 parent_rate = clk_get_rate(clk->parent);
3781 div = parent_rate / rate;
3783 /* Make sure rate is not greater than the maximum value for the clock.
3784 * Also prevent a div of 0.
3786 if (div == 0)
3787 div++;
3789 if (div > 8)
3790 div = 8;
3792 return parent_rate / div;
3795 static int _clk_emi_slow_set_parent(struct clk *clk, struct clk *parent)
3797 int mux;
3798 u32 reg = __raw_readl(MXC_CCM_CSCMR1)
3799 & ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_MASK;
3801 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3802 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3803 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_OFFSET);
3804 __raw_writel(reg, MXC_CCM_CSCMR1);
3806 return 0;
3809 static unsigned long _clk_emi_slow_get_rate(struct clk *clk)
3811 u32 reg, div;
3813 reg = __raw_readl(MXC_CCM_CSCMR1);
3814 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK) >>
3815 MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET) + 1;
3817 return clk_get_rate(clk->parent) / div;
3820 static int _clk_emi_slow_set_rate(struct clk *clk, unsigned long rate)
3822 u32 reg, div;
3823 u32 parent_rate = clk_get_rate(clk->parent);
3825 div = parent_rate / rate;
3826 if (div == 0)
3827 div++;
3828 if (((parent_rate / div) != rate) || (div > 8))
3829 return -EINVAL;
3831 reg = __raw_readl(MXC_CCM_CSCMR1);
3832 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK;
3833 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET;
3834 __raw_writel(reg, MXC_CCM_CSCMR1);
3836 return 0;
3839 static struct clk emi_slow_clk = {
3840 __INIT_CLK_DEBUG(emi_slow_clk)
3841 .id = 0,
3842 .parent = &axi_clk,
3843 .enable_reg = MXC_CCM_CCGR6,
3844 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3845 .enable = _clk_enable,
3846 .disable = _clk_disable,
3847 .set_rate = _clk_emi_slow_set_rate,
3848 .get_rate = _clk_emi_slow_get_rate,
3849 .round_rate = _clk_emi_slow_round_rate,
3850 .set_parent = _clk_emi_slow_set_parent,
3853 static unsigned long _clk_emi_round_rate(struct clk *clk,
3854 unsigned long rate)
3856 u32 div;
3857 u32 parent_rate = clk_get_rate(clk->parent);
3859 div = parent_rate / rate;
3861 /* Make sure rate is not greater than the maximum value for the clock.
3862 * Also prevent a div of 0.
3864 if (div == 0)
3865 div++;
3867 if (div > 8)
3868 div = 8;
3870 return parent_rate / div;
3873 static int _clk_emi_set_parent(struct clk *clk, struct clk *parent)
3875 int mux;
3876 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_ACLK_EMI_MASK;
3878 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3879 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3880 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_OFFSET);
3881 __raw_writel(reg, MXC_CCM_CSCMR1);
3883 return 0;
3886 static unsigned long _clk_emi_get_rate(struct clk *clk)
3888 u32 reg, div;
3890 reg = __raw_readl(MXC_CCM_CSCMR1);
3891 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK) >>
3892 MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET) + 1;
3894 return clk_get_rate(clk->parent) / div;
3897 static int _clk_emi_set_rate(struct clk *clk, unsigned long rate)
3899 u32 reg, div;
3900 u32 parent_rate = clk_get_rate(clk->parent);
3902 div = parent_rate / rate;
3903 if (div == 0)
3904 div++;
3905 if (((parent_rate / div) != rate) || (div > 8))
3906 return -EINVAL;
3908 reg = __raw_readl(MXC_CCM_CSCMR1);
3909 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK;
3910 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET;
3911 __raw_writel(reg, MXC_CCM_CSCMR1);
3913 return 0;
3916 static struct clk emi_clk = {
3917 __INIT_CLK_DEBUG(emi_clk)
3918 .id = 0,
3919 .parent = &axi_clk,
3920 .set_rate = _clk_emi_set_rate,
3921 .get_rate = _clk_emi_get_rate,
3922 .round_rate = _clk_emi_round_rate,
3923 .set_parent = _clk_emi_set_parent,
3926 static unsigned long _clk_enfc_round_rate(struct clk *clk,
3927 unsigned long rate)
3929 u32 pre, post;
3930 u32 parent_rate = clk_get_rate(clk->parent);
3931 u32 div = parent_rate / rate;
3933 if (parent_rate % rate)
3934 div++;
3936 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
3938 return parent_rate / (pre * post);
3941 static int _clk_enfc_set_parent(struct clk *clk, struct clk *parent)
3943 u32 reg, mux;
3945 reg = __raw_readl(MXC_CCM_CS2CDR)
3946 & ~MXC_CCM_CS2CDR_ENFC_CLK_SEL_MASK;
3948 mux = _get_mux6(parent, &pll2_pfd_352M,
3949 &pll2_528_bus_main_clk, &pll3_usb_otg_main_clk,
3950 &pll2_pfd_400M, NULL, NULL);
3951 reg |= mux << MXC_CCM_CS2CDR_ENFC_CLK_SEL_OFFSET;
3953 __raw_writel(reg, MXC_CCM_CS2CDR);
3955 return 0;
3958 static unsigned long _clk_enfc_get_rate(struct clk *clk)
3960 u32 reg, pred, podf;
3962 reg = __raw_readl(MXC_CCM_CS2CDR);
3964 pred = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK)
3965 >> MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET) + 1;
3966 podf = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK)
3967 >> MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET) + 1;
3969 return clk_get_rate(clk->parent) / (pred * podf);
3972 static int _clk_enfc_set_rate(struct clk *clk, unsigned long rate)
3974 u32 reg, div, pre, post;
3975 u32 parent_rate = clk_get_rate(clk->parent);
3977 div = parent_rate / rate;
3978 if (div == 0)
3979 div++;
3980 if (((parent_rate / div) != rate) || div > 512)
3981 return -EINVAL;
3983 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
3985 reg = __raw_readl(MXC_CCM_CS2CDR);
3986 reg &= ~(MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK|
3987 MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK);
3988 reg |= (post - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET;
3989 reg |= (pre - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET;
3991 __raw_writel(reg, MXC_CCM_CS2CDR);
3993 return 0;
3996 static struct clk enfc_clk = {
3997 __INIT_CLK_DEBUG(enfc_clk)
3998 .id = 0,
3999 .parent = &pll2_pfd_352M,
4000 .enable_reg = MXC_CCM_CCGR2,
4001 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
4002 .enable = _clk_enable,
4003 .disable = _clk_disable,
4004 .set_rate = _clk_enfc_set_rate,
4005 .get_rate = _clk_enfc_get_rate,
4006 .round_rate = _clk_enfc_round_rate,
4007 .set_parent = _clk_enfc_set_parent,
4010 static unsigned long _clk_uart_round_rate(struct clk *clk,
4011 unsigned long rate)
4013 u32 div;
4014 u32 parent_rate = clk_get_rate(clk->parent);
4016 div = parent_rate / rate;
4018 /* Make sure rate is not greater than the maximum value for the clock.
4019 * Also prevent a div of 0.
4021 if (div == 0)
4022 div++;
4024 if (div > 64)
4025 div = 64;
4027 return parent_rate / div;
4030 static int _clk_uart_set_rate(struct clk *clk, unsigned long rate)
4032 u32 reg, div;
4033 u32 parent_rate = clk_get_rate(clk->parent);
4035 div = parent_rate / rate;
4036 if (div == 0)
4037 div++;
4038 if (((parent_rate / div) != rate) || (div > 64))
4039 return -EINVAL;
4041 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
4042 reg |= ((div - 1) << MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET);
4044 __raw_writel(reg, MXC_CCM_CSCDR1);
4046 return 0;
4049 static unsigned long _clk_uart_get_rate(struct clk *clk)
4051 u32 reg, div;
4052 unsigned long val;
4054 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
4055 div = (reg >> MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET) + 1;
4056 val = clk_get_rate(clk->parent) / div;
4058 return val;
4061 static struct clk uart_clk[] = {
4063 __INIT_CLK_DEBUG(uart_clk)
4064 .id = 0,
4065 .parent = &pll3_80M,
4066 .enable_reg = MXC_CCM_CCGR5,
4067 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
4068 .enable = _clk_enable,
4069 .disable = _clk_disable,
4070 .secondary = &uart_clk[1],
4071 .set_rate = _clk_uart_set_rate,
4072 .get_rate = _clk_uart_get_rate,
4073 .round_rate = _clk_uart_round_rate,
4076 __INIT_CLK_DEBUG(uart_serial_clk)
4077 .id = 1,
4078 .enable_reg = MXC_CCM_CCGR5,
4079 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
4080 .enable = _clk_enable,
4081 .disable = _clk_disable,
4085 static unsigned long _clk_hsi_tx_round_rate(struct clk *clk,
4086 unsigned long rate)
4088 u32 div;
4089 u32 parent_rate = clk_get_rate(clk->parent);
4091 div = parent_rate / rate;
4093 /* Make sure rate is not greater than the maximum value for the clock.
4094 * Also prevent a div of 0.
4096 if (div == 0)
4097 div++;
4099 if (div > 8)
4100 div = 8;
4102 return parent_rate / div;
4105 static int _clk_hsi_tx_set_parent(struct clk *clk, struct clk *parent)
4107 u32 reg = __raw_readl(MXC_CCM_CDCDR) & ~MXC_CCM_CDCDR_HSI_TX_CLK_SEL;
4109 if (parent == &pll2_pfd_400M)
4110 reg |= (MXC_CCM_CDCDR_HSI_TX_CLK_SEL);
4112 __raw_writel(reg, MXC_CCM_CDCDR);
4114 return 0;
4117 static unsigned long _clk_hsi_tx_get_rate(struct clk *clk)
4119 u32 reg, div;
4121 reg = __raw_readl(MXC_CCM_CDCDR);
4122 div = ((reg & MXC_CCM_CDCDR_HSI_TX_PODF_MASK) >>
4123 MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET) + 1;
4125 return clk_get_rate(clk->parent) / div;
4128 static int _clk_hsi_tx_set_rate(struct clk *clk, unsigned long rate)
4130 u32 reg, div;
4131 u32 parent_rate = clk_get_rate(clk->parent);
4133 div = parent_rate / rate;
4134 if (div == 0)
4135 div++;
4136 if (((parent_rate / div) != rate) || (div > 8))
4137 return -EINVAL;
4139 reg = __raw_readl(MXC_CCM_CDCDR);
4140 reg &= ~MXC_CCM_CDCDR_HSI_TX_PODF_MASK;
4141 reg |= (div - 1) << MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET;
4142 __raw_writel(reg, MXC_CCM_CDCDR);
4144 return 0;
4147 static struct clk hsi_tx_clk[] = {
4149 __INIT_CLK_DEBUG(hsi_tx_clk)
4150 .id = 0,
4151 .parent = &pll2_pfd_400M,
4152 .enable_reg = MXC_CCM_CCGR3,
4153 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
4154 .enable = _clk_enable,
4155 .disable = _clk_disable,
4156 .set_parent = _clk_hsi_tx_set_parent,
4157 .round_rate = _clk_hsi_tx_round_rate,
4158 .set_rate = _clk_hsi_tx_set_rate,
4159 .get_rate = _clk_hsi_tx_get_rate,
4160 .secondary = &hsi_tx_clk[1],
4161 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4164 .parent = &mx6per1_clk,
4165 .secondary = &mx6per2_clk,
4169 static struct clk mipi_pllref_clk = {
4170 __INIT_CLK_DEBUG(mipi_pllref_clk)
4171 .id = 0,
4172 .parent = &pll3_pfd_540M,
4173 .enable_reg = MXC_CCM_CCGR3,
4174 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
4175 .enable = _clk_enable,
4176 .disable = _clk_disable,
4179 static struct clk hdmi_clk[] = {
4181 __INIT_CLK_DEBUG(hdmi_isfr_clk)
4182 .id = 0,
4183 .parent = &pll3_pfd_540M,
4184 .enable_reg = MXC_CCM_CCGR2,
4185 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
4186 .enable = _clk_enable,
4187 .disable = _clk_disable,
4190 __INIT_CLK_DEBUG(hdmi_iahb_clk)
4191 .id = 1,
4192 .parent = &ahb_clk,
4193 .enable_reg = MXC_CCM_CCGR2,
4194 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4195 .enable = _clk_enable,
4196 .disable = _clk_disable,
4200 static struct clk caam_clk[] = {
4202 __INIT_CLK_DEBUG(caam_mem_clk)
4203 .id = 0,
4204 .enable_reg = MXC_CCM_CCGR0,
4205 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
4206 .enable = _clk_enable,
4207 .disable = _clk_disable,
4208 .secondary = &caam_clk[1],
4211 __INIT_CLK_DEBUG(caam_aclk_clk)
4212 .id = 1,
4213 .enable_reg = MXC_CCM_CCGR0,
4214 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
4215 .enable = _clk_enable,
4216 .disable = _clk_disable,
4217 .secondary = &caam_clk[2],
4220 __INIT_CLK_DEBUG(caam_ipg_clk)
4221 .id = 2,
4222 .enable_reg = MXC_CCM_CCGR0,
4223 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
4224 .enable = _clk_enable,
4225 .disable = _clk_disable,
4226 .parent = &mmdc_ch0_axi_clk[0],
4227 .secondary = &mx6per1_clk,
4231 static int _clk_asrc_serial_set_parent(struct clk *clk, struct clk *parent)
4233 u32 reg, mux;
4235 reg = __raw_readl(MXC_CCM_CDCDR) & ~MXC_CCM_CDCDR_SPDIF1_CLK_SEL_MASK;
4237 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
4238 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
4239 reg |= mux << MXC_CCM_CDCDR_SPDIF1_CLK_SEL_OFFSET;
4241 __raw_writel(reg, MXC_CCM_CDCDR);
4243 return 0;
4246 static unsigned long _clk_asrc_serial_get_rate(struct clk *clk)
4248 u32 reg, pred, podf;
4250 reg = __raw_readl(MXC_CCM_CDCDR);
4252 pred = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK)
4253 >> MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET) + 1;
4254 podf = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK)
4255 >> MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET) + 1;
4257 return clk_get_rate(clk->parent) / (pred * podf);
4260 static int _clk_asrc_serial_set_rate(struct clk *clk, unsigned long rate)
4262 u32 reg, div, pre, post;
4263 u32 parent_rate = clk_get_rate(clk->parent);
4265 div = parent_rate / rate;
4266 if (div == 0)
4267 div++;
4268 if (((parent_rate / div) != rate) || div > 64)
4269 return -EINVAL;
4271 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
4273 reg = __raw_readl(MXC_CCM_CDCDR);
4274 reg &= ~(MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK|
4275 MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK);
4276 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET;
4277 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET;
4279 __raw_writel(reg, MXC_CCM_CDCDR);
4281 return 0;
4284 static unsigned long _clk_asrc_serial_round_rate(struct clk *clk,
4285 unsigned long rate)
4287 u32 pre, post;
4288 u32 parent_rate = clk_get_rate(clk->parent);
4289 u32 div = parent_rate / rate;
4291 if (parent_rate % rate)
4292 div++;
4294 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
4296 return parent_rate / (pre * post);
4299 static struct clk asrc_clk[] = {
4301 __INIT_CLK_DEBUG(asrc_clk)
4302 .id = 0,
4303 .parent = &pll4_audio_main_clk,
4304 .enable_reg = MXC_CCM_CCGR0,
4305 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
4306 .enable = _clk_enable,
4307 .disable = _clk_disable,
4308 .secondary = &spba_clk,
4311 /*In the MX6 spec, asrc_serial_clk is listed as SPDIF1 clk
4312 * This clock can never be gated and does not have any
4313 * CCGR bits associated with it.
4315 __INIT_CLK_DEBUG(asrc_serial_clk)
4316 .id = 1,
4317 .parent = &pll3_sw_clk,
4318 .set_rate = _clk_asrc_serial_set_rate,
4319 .get_rate = _clk_asrc_serial_get_rate,
4320 .set_parent = _clk_asrc_serial_set_parent,
4321 .round_rate = _clk_asrc_serial_round_rate,
4325 static struct clk apbh_dma_clk = {
4326 __INIT_CLK_DEBUG(apbh_dma_clk)
4327 .parent = &usdhc3_clk,
4328 .secondary = &mx6per1_clk,
4329 .enable = _clk_enable,
4330 .disable = _clk_disable_inwait,
4331 .enable_reg = MXC_CCM_CCGR0,
4332 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
4335 static struct clk aips_tz2_clk = {
4336 __INIT_CLK_DEBUG(aips_tz2_clk)
4337 .parent = &ahb_clk,
4338 .enable_reg = MXC_CCM_CCGR0,
4339 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
4340 .enable = _clk_enable,
4341 .disable = _clk_disable_inwait,
4344 static struct clk aips_tz1_clk = {
4345 __INIT_CLK_DEBUG(aips_tz1_clk)
4346 .parent = &ahb_clk,
4347 .enable_reg = MXC_CCM_CCGR0,
4348 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4349 .enable = _clk_enable,
4350 .disable = _clk_disable_inwait,
4354 static struct clk openvg_axi_clk = {
4355 __INIT_CLK_DEBUG(openvg_axi_clk)
4356 .parent = &gpu2d_axi_clk,
4357 .enable = _clk_enable,
4358 .enable_reg = MXC_CCM_CCGR3,
4359 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
4360 .disable = _clk_disable,
4361 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4364 static unsigned long _clk_gpu3d_core_round_rate(struct clk *clk,
4365 unsigned long rate)
4367 u32 div;
4368 u32 parent_rate = clk_get_rate(clk->parent);
4370 div = parent_rate / rate;
4372 /* Make sure rate is not greater than the maximum value for the clock.
4373 * Also prevent a div of 0.
4375 if (div == 0)
4376 div++;
4378 if (div > 8)
4379 div = 8;
4381 return parent_rate / div;
4384 static int _clk_gpu3d_core_set_parent(struct clk *clk, struct clk *parent)
4386 int mux;
4387 u32 reg = __raw_readl(MXC_CCM_CBCMR)
4388 & ~MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_MASK;
4390 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
4391 &pll3_usb_otg_main_clk,
4392 &pll2_pfd_594M, &pll2_pfd_400M, NULL, NULL);
4393 reg |= (mux << MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_OFFSET);
4394 __raw_writel(reg, MXC_CCM_CBCMR);
4396 return 0;
4399 static unsigned long _clk_gpu3d_core_get_rate(struct clk *clk)
4401 u32 reg, div;
4403 reg = __raw_readl(MXC_CCM_CBCMR);
4404 div = ((reg & MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK) >>
4405 MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET) + 1;
4407 return clk_get_rate(clk->parent) / div;
4410 static int _clk_gpu3d_core_set_rate(struct clk *clk, unsigned long rate)
4412 u32 reg, div;
4413 u32 parent_rate = clk_get_rate(clk->parent);
4415 div = parent_rate / rate;
4416 if (div == 0)
4417 div++;
4418 if (div > 8)
4419 div = 8;
4421 reg = __raw_readl(MXC_CCM_CBCMR);
4422 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
4423 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
4424 __raw_writel(reg, MXC_CCM_CBCMR);
4426 return 0;
4429 static struct clk gpu3d_core_clk[] = {
4431 __INIT_CLK_DEBUG(gpu3d_core_clk)
4432 .parent = &pll2_pfd_594M,
4433 .enable = _clk_enable,
4434 .disable = _clk_disable,
4435 .enable_reg = MXC_CCM_CCGR1,
4436 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
4437 .set_parent = _clk_gpu3d_core_set_parent,
4438 .set_rate = _clk_gpu3d_core_set_rate,
4439 .get_rate = _clk_gpu3d_core_get_rate,
4440 .round_rate = _clk_gpu3d_core_round_rate,
4441 .secondary = &gpu3d_core_clk[1],
4442 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4445 .parent = &gpu3d_axi_clk,
4446 .secondary = &mx6fast1_clk,
4450 static unsigned long _clk_gpu2d_core_round_rate(struct clk *clk,
4451 unsigned long rate)
4453 u32 div;
4454 u32 parent_rate = clk_get_rate(clk->parent);
4456 div = parent_rate / rate;
4458 /* Make sure rate is not greater than the maximum value for the clock.
4459 * Also prevent a div of 0.
4461 if (div == 0)
4462 div++;
4464 if (div > 8)
4465 div = 8;
4467 return parent_rate / div;
4470 static int _clk_gpu2d_core_set_parent(struct clk *clk, struct clk *parent)
4472 int mux;
4473 u32 reg = __raw_readl(MXC_CCM_CBCMR) &
4474 ~MXC_CCM_CBCMR_GPU2D_CLK_SEL_MASK;
4476 /*on mx6dl, 2d core clock sources from 3d shader core clock*/
4477 if (!cpu_is_mx6dl()) {
4478 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
4479 &pll2_pfd_352M, &pll2_pfd_400M, NULL, NULL);
4480 reg |= (mux << MXC_CCM_CBCMR_GPU2D_CLK_SEL_OFFSET);
4481 __raw_writel(reg, MXC_CCM_CBCMR);
4484 return 0;
4487 static unsigned long _clk_gpu2d_core_get_rate(struct clk *clk)
4489 u32 reg, div = 1;
4491 reg = __raw_readl(MXC_CCM_CBCMR);
4492 if (cpu_is_mx6q())
4493 div = ((reg & MXC_CCM_CBCMR_GPU2D_CORE_PODF_MASK) >>
4494 MXC_CCM_CBCMR_GPU2D_CORE_PODF_OFFSET) + 1;
4495 else if (cpu_is_mx6dl())
4496 /* on i.mx6dl, gpu2d_core_clk source from gpu3d_shader_clk */
4497 return clk_get_rate(clk->parent);
4499 return clk_get_rate(clk->parent) / div;
4502 static int _clk_gpu2d_core_set_rate(struct clk *clk, unsigned long rate)
4504 u32 reg, div;
4505 u32 parent_rate = clk_get_rate(clk->parent);
4507 div = parent_rate / rate;
4508 if (div == 0)
4509 div++;
4510 if (((parent_rate / div) != rate) || (div > 8))
4511 return -EINVAL;
4513 reg = __raw_readl(MXC_CCM_CBCMR);
4514 reg &= ~MXC_CCM_CBCMR_GPU2D_CORE_PODF_MASK;
4515 reg |= (div - 1) << MXC_CCM_CBCMR_GPU2D_CORE_PODF_OFFSET;
4516 __raw_writel(reg, MXC_CCM_CBCMR);
4518 return 0;
4520 static struct clk gpu2d_core_clk[] = {
4522 __INIT_CLK_DEBUG(gpu2d_core_clk)
4523 .parent = &pll2_pfd_352M,
4524 .enable = _clk_enable,
4525 .enable_reg = MXC_CCM_CCGR1,
4526 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
4527 .disable = _clk_disable,
4528 .set_parent = _clk_gpu2d_core_set_parent,
4529 .set_rate = _clk_gpu2d_core_set_rate,
4530 .get_rate = _clk_gpu2d_core_get_rate,
4531 .round_rate = _clk_gpu2d_core_round_rate,
4532 .secondary = &mx6fast1_clk,
4533 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4537 static unsigned long _clk_gpu3d_shader_round_rate(struct clk *clk,
4538 unsigned long rate)
4540 u32 div;
4541 u32 parent_rate = clk_get_rate(clk->parent);
4543 div = parent_rate / rate;
4545 /* Make sure rate is not greater than the maximum value for the clock.
4546 * Also prevent a div of 0.
4548 if (div == 0)
4549 div++;
4551 if (div > 8)
4552 div = 8;
4554 return parent_rate / div;
4557 static int _clk_gpu3d_shader_set_parent(struct clk *clk, struct clk *parent)
4559 int mux;
4560 u32 reg = __raw_readl(MXC_CCM_CBCMR)
4561 & ~MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_MASK;
4563 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
4564 &pll3_usb_otg_main_clk,
4565 &pll2_pfd_594M, &pll3_pfd_720M, NULL, NULL);
4566 reg |= (mux << MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_OFFSET);
4567 __raw_writel(reg, MXC_CCM_CBCMR);
4569 return 0;
4572 static unsigned long _clk_gpu3d_shader_get_rate(struct clk *clk)
4574 u32 reg, div;
4576 reg = __raw_readl(MXC_CCM_CBCMR);
4577 div = ((reg & MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK) >>
4578 MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET) + 1;
4580 return clk_get_rate(clk->parent) / div;
4583 static int _clk_gpu3d_shader_set_rate(struct clk *clk, unsigned long rate)
4585 u32 reg, div;
4586 u32 parent_rate = clk_get_rate(clk->parent);
4588 div = parent_rate / rate;
4589 if (div == 0)
4590 div++;
4591 if (div > 8)
4592 div = 8;
4594 reg = __raw_readl(MXC_CCM_CBCMR);
4595 reg &= ~MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK;
4596 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET;
4597 __raw_writel(reg, MXC_CCM_CBCMR);
4599 return 0;
4603 static struct clk gpu3d_shader_clk = {
4604 __INIT_CLK_DEBUG(gpu3d_shader_clk)
4605 .parent = &pll3_pfd_720M,
4606 .secondary = &mmdc_ch0_axi_clk[0],
4607 .set_parent = _clk_gpu3d_shader_set_parent,
4608 .set_rate = _clk_gpu3d_shader_set_rate,
4609 .get_rate = _clk_gpu3d_shader_get_rate,
4610 .round_rate = _clk_gpu3d_shader_round_rate,
4611 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4614 /* set the parent by the ipcg table */
4615 static struct clk gpmi_nand_clk[] = {
4616 { /* gpmi_io_clk */
4617 __INIT_CLK_DEBUG(gpmi_io_clk)
4618 .parent = &enfc_clk,
4619 .secondary = &gpmi_nand_clk[1],
4620 .enable = _clk_enable,
4621 .enable_reg = MXC_CCM_CCGR4,
4622 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
4623 .disable = _clk_disable,
4625 { /* gpmi_apb_clk */
4626 __INIT_CLK_DEBUG(gpmi_apb_clk)
4627 .parent = &usdhc3_clk,
4628 .secondary = &gpmi_nand_clk[2],
4629 .enable = _clk_enable,
4630 .enable_reg = MXC_CCM_CCGR4,
4631 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
4632 .disable = _clk_disable,
4634 { /* bch_clk */
4635 __INIT_CLK_DEBUG(gpmi_bch_clk)
4636 .parent = &usdhc4_clk,
4637 .secondary = &gpmi_nand_clk[3],
4638 .enable = _clk_enable,
4639 .enable_reg = MXC_CCM_CCGR4,
4640 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
4641 .disable = _clk_disable,
4643 { /* bch_apb_clk */
4644 __INIT_CLK_DEBUG(gpmi_bch_apb_clk)
4645 .parent = &usdhc3_clk,
4646 .secondary = &gpmi_nand_clk[4],
4647 .enable = _clk_enable,
4648 .enable_reg = MXC_CCM_CCGR4,
4649 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
4650 .disable = _clk_disable,
4652 { /* bch relative clk */
4653 __INIT_CLK_DEBUG(pl301_mx6qperl_bch)
4654 .parent = &mx6per1_clk,
4655 .secondary = &mmdc_ch0_axi_clk[0],
4659 static struct clk pwm_clk[] = {
4661 __INIT_CLK_DEBUG(pwm_clk_0)
4662 .parent = &ipg_perclk,
4663 .id = 0,
4664 .enable_reg = MXC_CCM_CCGR4,
4665 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
4666 .enable = _clk_enable,
4667 .disable = _clk_disable,
4670 __INIT_CLK_DEBUG(pwm_clk_1)
4671 .parent = &ipg_perclk,
4672 .id = 1,
4673 .enable_reg = MXC_CCM_CCGR4,
4674 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
4675 .enable = _clk_enable,
4676 .disable = _clk_disable,
4679 __INIT_CLK_DEBUG(pwm_clk_2)
4680 .parent = &ipg_perclk,
4681 .id = 2,
4682 .enable_reg = MXC_CCM_CCGR4,
4683 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
4684 .enable = _clk_enable,
4685 .disable = _clk_disable,
4688 __INIT_CLK_DEBUG(pwm_clk_3)
4689 .parent = &ipg_perclk,
4690 .id = 3,
4691 .enable_reg = MXC_CCM_CCGR4,
4692 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
4693 .enable = _clk_enable,
4694 .disable = _clk_disable,
4698 static int _clk_sata_enable(struct clk *clk)
4700 unsigned int reg;
4702 /* Enable SATA ref clock */
4703 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4704 reg |= ANADIG_PLL_ENET_EN_SATA;
4705 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4707 _clk_enable(clk);
4709 return 0;
4712 static void _clk_sata_disable(struct clk *clk)
4714 unsigned int reg;
4716 _clk_disable(clk);
4718 /* Disable SATA ref clock */
4719 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4720 reg &= ~ANADIG_PLL_ENET_EN_SATA;
4721 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4724 static struct clk sata_clk[] = {
4726 __INIT_CLK_DEBUG(sata_clk)
4727 .parent = &pll8_enet_main_clk,
4728 .enable = _clk_sata_enable,
4729 .enable_reg = MXC_CCM_CCGR5,
4730 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
4731 .disable = _clk_sata_disable,
4732 .secondary = &sata_clk[1],
4733 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4736 .parent = &ipg_clk,
4737 .secondary = &sata_clk[2],
4740 .parent = &mmdc_ch0_axi_clk[0],
4741 .secondary = &mx6per1_clk,
4745 static int _clk_pcie_enable(struct clk *clk)
4747 unsigned int reg;
4749 /* Activate LVDS CLK1 (the MiniPCIe slot clock input) */
4750 reg = __raw_readl(ANADIG_MISC1_REG);
4751 reg &= ~ANATOP_LVDS_CLK1_IBEN_MASK;
4752 __raw_writel(reg, ANADIG_MISC1_REG);
4754 reg = __raw_readl(ANADIG_MISC1_REG);
4755 reg |= ANATOP_LVDS_CLK1_SRC_SATA;
4756 __raw_writel(reg, ANADIG_MISC1_REG);
4758 reg = __raw_readl(ANADIG_MISC1_REG);
4759 reg |= ANATOP_LVDS_CLK1_OBEN_MASK;
4760 __raw_writel(reg, ANADIG_MISC1_REG);
4762 /* Enable PCIE ref clock */
4763 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4764 reg |= ANADIG_PLL_ENET_EN_PCIE;
4765 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4767 _clk_enable(clk);
4769 return 0;
4772 static void _clk_pcie_disable(struct clk *clk)
4774 unsigned int reg;
4776 _clk_disable(clk);
4778 /* De-activate LVDS CLK1 (the MiniPCIe slot clock input) */
4779 reg = __raw_readl(ANADIG_MISC1_REG);
4780 reg &= ~ANATOP_LVDS_CLK1_IBEN_MASK;
4781 __raw_writel(reg, ANADIG_MISC1_REG);
4783 reg = __raw_readl(ANADIG_MISC1_REG);
4784 reg &= ~ANATOP_LVDS_CLK1_SRC_SATA;
4785 __raw_writel(reg, ANADIG_MISC1_REG);
4787 reg = __raw_readl(ANADIG_MISC1_REG);
4788 reg &= ~ANATOP_LVDS_CLK1_OBEN_MASK;
4789 __raw_writel(reg, ANADIG_MISC1_REG);
4791 /* Disable PCIE ref clock */
4792 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4793 reg &= ~ANADIG_PLL_ENET_EN_PCIE;
4794 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4797 static struct clk pcie_clk[] = {
4799 __INIT_CLK_DEBUG(pcie_clk)
4800 .parent = &pcie_axi_clk,
4801 .enable = _clk_pcie_enable,
4802 .disable = _clk_pcie_disable,
4803 .enable_reg = MXC_CCM_CCGR4,
4804 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4805 .secondary = &pcie_clk[1],
4806 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4810 * Enable SATA ref clock.
4811 * PCIe needs both sides to have the same source of refernce clock,
4812 * The SATA reference clock is taken out to link partner.
4814 .parent = &sata_clk[0],
4815 .secondary = &pcie_clk[2],
4818 .parent = &mmdc_ch0_axi_clk[0],
4819 .secondary = &mx6fast1_clk,
4823 static struct clk usboh3_clk[] = {
4825 __INIT_CLK_DEBUG(usboh3_clk)
4826 .parent = &ahb_clk,
4827 .enable = _clk_enable,
4828 .enable_reg = MXC_CCM_CCGR6,
4829 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4830 .disable = _clk_disable,
4831 .secondary = &usboh3_clk[1],
4832 .flags = AHB_MED_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4835 .parent = &mmdc_ch0_axi_clk[0],
4836 .secondary = &mx6per1_clk,
4840 static int _clk_mlb_set_parent(struct clk *clk, struct clk *parent)
4842 u32 sel, cbcmr = __raw_readl(MXC_CCM_CBCMR);
4845 * In Rigel validatioin, the MLB sys_clock isn't using the
4846 * right frequency after boot.
4847 * In arik, the register CBCMR controls gpu2d clock, not mlb clock,
4848 * mlb is sourced from axi clock.
4849 * But In rigel, the axi clock is lower than in mx6q, so mlb need to
4850 * find a new clock root.
4851 * The gpu2d clock is the root of mlb clock in rigel.
4852 * Thus we need to add below code in mx6dl.
4853 * */
4854 sel = _get_mux(parent, &axi_clk, &pll3_sw_clk,
4855 &pll2_pfd_352M, &pll2_pfd_400M);
4857 cbcmr &= ~MXC_CCM_CBCMR_MLB_CLK_SEL_MASK;
4858 cbcmr |= sel << MXC_CCM_CBCMR_MLB_CLK_SEL_OFFSET;
4859 __raw_writel(cbcmr, MXC_CCM_CBCMR);
4861 return 0;
4864 static struct clk mlb150_clk = {
4865 __INIT_CLK_DEBUG(mlb150_clk)
4866 .id = 0,
4867 .set_parent = _clk_mlb_set_parent,
4868 .enable_reg = MXC_CCM_CCGR3,
4869 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
4870 .enable = _clk_enable,
4871 .disable = _clk_disable,
4874 static int _clk_enable1(struct clk *clk)
4876 u32 reg;
4877 reg = __raw_readl(clk->enable_reg);
4878 reg |= 1 << clk->enable_shift;
4879 __raw_writel(reg, clk->enable_reg);
4881 return 0;
4884 static void _clk_disable1(struct clk *clk)
4886 u32 reg;
4887 reg = __raw_readl(clk->enable_reg);
4888 reg &= ~(1 << clk->enable_shift);
4889 __raw_writel(reg, clk->enable_reg);
4892 static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
4894 u32 sel, reg;
4896 if (parent == &pll3_usb_otg_main_clk)
4897 sel = 0;
4898 else if (parent == &pll2_528_bus_main_clk)
4899 sel = 1;
4900 else if (parent == &pll1_sys_main_clk)
4901 sel = 2;
4902 else if (parent == &pll5_video_main_clk)
4903 sel = 3;
4904 else if (parent == &axi_clk)
4905 sel = 5;
4906 else if (parent == &enfc_clk)
4907 sel = 6;
4908 else if (parent == &ipu1_di_clk[0])
4909 sel = 7;
4910 else if (parent == &ipu1_di_clk[1])
4911 sel = 8;
4912 else if (parent == &ipu2_di_clk[0])
4913 sel = 9;
4914 else if (parent == &ipu2_di_clk[1])
4915 sel = 10;
4916 else if (parent == &ahb_clk)
4917 sel = 11;
4918 else if (parent == &ipg_clk)
4919 sel = 12;
4920 else if (parent == &ipg_perclk)
4921 sel = 13;
4922 else if (parent == &ckil_clk)
4923 sel = 14;
4924 else if (parent == &pll4_audio_main_clk)
4925 sel = 15;
4926 else if (parent == &clko2_clk) {
4927 reg = __raw_readl(MXC_CCM_CCOSR);
4928 reg |= MXC_CCM_CCOSR_CKOL_MIRROR_CKO2_MASK;
4929 __raw_writel(reg, MXC_CCM_CCOSR);
4930 return 0;
4931 } else
4932 return -EINVAL;
4934 reg = __raw_readl(MXC_CCM_CCOSR);
4935 reg &= ~(MXC_CCM_CCOSR_CKOL_MIRROR_CKO2_MASK |
4936 MXC_CCM_CCOSR_CKOL_SEL_MASK);
4937 reg |= sel << MXC_CCM_CCOSR_CKOL_SEL_OFFSET;
4938 __raw_writel(reg, MXC_CCM_CCOSR);
4939 return 0;
4942 static unsigned long _clk_clko_get_rate(struct clk *clk)
4944 u32 reg = __raw_readl(MXC_CCM_CCOSR);
4945 u32 div = ((reg & MXC_CCM_CCOSR_CKOL_DIV_MASK) >>
4946 MXC_CCM_CCOSR_CKOL_DIV_OFFSET) + 1;
4948 if (clk->parent == &clko2_clk)
4949 /* clko may output clko2 without divider */
4950 return clk_get_rate(clk->parent);
4951 else
4952 return clk_get_rate(clk->parent) / div;
4955 static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
4957 u32 reg;
4958 u32 parent_rate = clk_get_rate(clk->parent);
4959 u32 div = parent_rate / rate;
4961 /* clko may output clko2 without divider */
4962 if (clk->parent == &clko2_clk)
4963 return 0;
4965 if (div == 0)
4966 div++;
4967 if (((parent_rate / div) != rate) || (div > 8))
4968 return -EINVAL;
4970 reg = __raw_readl(MXC_CCM_CCOSR);
4971 reg &= ~MXC_CCM_CCOSR_CKOL_DIV_MASK;
4972 reg |= (div - 1) << MXC_CCM_CCOSR_CKOL_DIV_OFFSET;
4973 __raw_writel(reg, MXC_CCM_CCOSR);
4974 return 0;
4977 static unsigned long _clk_clko_round_rate(struct clk *clk,
4978 unsigned long rate)
4980 u32 parent_rate = clk_get_rate(clk->parent);
4981 u32 div = parent_rate / rate;
4983 /* clko may output clko2 without divider */
4984 if (clk->parent == &clko2_clk)
4985 return parent_rate;
4987 /* Make sure rate is not greater than the maximum value for the clock.
4988 * Also prevent a div of 0.
4990 if (div == 0)
4991 div++;
4992 else if (parent_rate % rate)
4993 div++;
4995 if (div > 8)
4996 div = 8;
4997 return parent_rate / div;
5000 static int _clk_clko2_set_parent(struct clk *clk, struct clk *parent)
5002 u32 sel, reg;
5004 if (parent == &mmdc_ch0_axi_clk[0])
5005 sel = 0;
5006 else if (parent == &mmdc_ch1_axi_clk[0])
5007 sel = 1;
5008 else if (parent == &usdhc4_clk)
5009 sel = 2;
5010 else if (parent == &usdhc1_clk)
5011 sel = 3;
5012 else if (parent == &gpu2d_axi_clk)
5013 sel = 4;
5014 else if (parent == &ecspi_clk[0])
5015 sel = 6;
5016 else if (parent == &gpu3d_axi_clk)
5017 sel = 7;
5018 else if (parent == &usdhc3_clk)
5019 sel = 8;
5020 else if (parent == &pcie_clk[0])
5021 sel = 9;
5022 else if (parent == &ipu1_clk)
5023 sel = 11;
5024 else if (parent == &ipu2_clk)
5025 sel = 12;
5026 else if (parent == &vdo_axi_clk)
5027 sel = 13;
5028 else if (parent == &osc_clk)
5029 sel = 14;
5030 else if (parent == &gpu2d_core_clk[0])
5031 sel = 15;
5032 else if (parent == &gpu3d_core_clk[0])
5033 sel = 16;
5034 else if (parent == &usdhc2_clk)
5035 sel = 17;
5036 else if (parent == &ssi1_clk)
5037 sel = 18;
5038 else if (parent == &ssi2_clk)
5039 sel = 19;
5040 else if (parent == &ssi3_clk)
5041 sel = 20;
5042 else if (parent == &gpu3d_shader_clk)
5043 sel = 21;
5044 else if (parent == &can_clk_root)
5045 sel = 23;
5046 else if (parent == &ldb_di0_clk)
5047 sel = 24;
5048 else if (parent == &ldb_di1_clk)
5049 sel = 25;
5050 else if (parent == &esai_clk)
5051 sel = 26;
5052 else if (parent == &uart_clk[0])
5053 sel = 28;
5054 else if (parent == &spdif0_clk[0])
5055 sel = 29;
5056 else if (parent == &hsi_tx_clk[0])
5057 sel = 31;
5058 else
5059 return -EINVAL;
5061 reg = __raw_readl(MXC_CCM_CCOSR);
5062 reg &= ~MXC_CCM_CCOSR_CKO2_SEL_MASK;
5063 reg |= sel << MXC_CCM_CCOSR_CKO2_SEL_OFFSET;
5064 __raw_writel(reg, MXC_CCM_CCOSR);
5065 return 0;
5068 static unsigned long _clk_clko2_get_rate(struct clk *clk)
5070 u32 reg = __raw_readl(MXC_CCM_CCOSR);
5071 u32 div = ((reg & MXC_CCM_CCOSR_CKO2_DIV_MASK) >>
5072 MXC_CCM_CCOSR_CKO2_DIV_OFFSET) + 1;
5073 return clk_get_rate(clk->parent) / div;
5076 static int _clk_clko2_set_rate(struct clk *clk, unsigned long rate)
5078 u32 reg;
5079 u32 parent_rate = clk_get_rate(clk->parent);
5080 u32 div = parent_rate / rate;
5082 if (div == 0)
5083 div++;
5084 if (((parent_rate / div) != rate) || (div > 8))
5085 return -EINVAL;
5087 reg = __raw_readl(MXC_CCM_CCOSR);
5088 reg &= ~MXC_CCM_CCOSR_CKO2_DIV_MASK;
5089 reg |= (div - 1) << MXC_CCM_CCOSR_CKO2_DIV_OFFSET;
5090 __raw_writel(reg, MXC_CCM_CCOSR);
5091 return 0;
5094 static struct clk clko_clk = {
5095 __INIT_CLK_DEBUG(clko_clk)
5096 .parent = &pll2_528_bus_main_clk,
5097 .enable = _clk_enable1,
5098 .enable_reg = MXC_CCM_CCOSR,
5099 .enable_shift = MXC_CCM_CCOSR_CKOL_EN_OFFSET,
5100 .disable = _clk_disable1,
5101 .set_parent = _clk_clko_set_parent,
5102 .set_rate = _clk_clko_set_rate,
5103 .get_rate = _clk_clko_get_rate,
5104 .round_rate = _clk_clko_round_rate,
5107 static struct clk clko2_clk = {
5108 __INIT_CLK_DEBUG(clko2_clk)
5109 .parent = &usdhc4_clk,
5110 .enable = _clk_enable1,
5111 .enable_reg = MXC_CCM_CCOSR,
5112 .enable_shift = MXC_CCM_CCOSR_CKO2_EN_OFFSET,
5113 .disable = _clk_disable1,
5114 .set_parent = _clk_clko2_set_parent,
5115 .set_rate = _clk_clko2_set_rate,
5116 .get_rate = _clk_clko2_get_rate,
5117 .round_rate = _clk_clko_round_rate,
5120 static struct clk perfmon0_clk = {
5121 __INIT_CLK_DEBUG(perfmon0_clk)
5122 .parent = &mmdc_ch0_axi_clk[0],
5123 .enable = _clk_enable1,
5124 .enable_reg = MXC_CCM_CCGR4,
5125 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
5126 .disable = _clk_disable1,
5129 static struct clk perfmon1_clk = {
5130 __INIT_CLK_DEBUG(perfmon1_clk)
5131 .parent = &ipu1_clk,
5132 .enable = _clk_enable1,
5133 .enable_reg = MXC_CCM_CCGR4,
5134 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
5135 .disable = _clk_disable1,
5138 static struct clk perfmon2_clk = {
5139 __INIT_CLK_DEBUG(perfmon2_clk)
5140 .parent = &mmdc_ch0_axi_clk[0],
5141 .enable = _clk_enable1,
5142 .enable_reg = MXC_CCM_CCGR4,
5143 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
5144 .disable = _clk_disable1,
5147 static struct clk dummy_clk = {
5148 .id = 0,
5151 #define _REGISTER_CLOCK(d, n, c) \
5153 .dev_id = d, \
5154 .con_id = n, \
5155 .clk = &c, \
5159 static struct clk_lookup lookups[] = {
5160 _REGISTER_CLOCK(NULL, "osc", osc_clk),
5161 _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
5162 _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk),
5163 _REGISTER_CLOCK(NULL, "ckil", ckil_clk),
5164 _REGISTER_CLOCK(NULL, "pll1_main_clk", pll1_sys_main_clk),
5165 _REGISTER_CLOCK(NULL, "pll1_sw_clk", pll1_sw_clk),
5166 _REGISTER_CLOCK(NULL, "pll2", pll2_528_bus_main_clk),
5167 _REGISTER_CLOCK(NULL, "pll2_pfd_400M", pll2_pfd_400M),
5168 _REGISTER_CLOCK(NULL, "pll2_pfd_352M", pll2_pfd_352M),
5169 _REGISTER_CLOCK(NULL, "pll2_pfd_594M", pll2_pfd_594M),
5170 _REGISTER_CLOCK(NULL, "pll2_200M", pll2_200M),
5171 _REGISTER_CLOCK(NULL, "pll3_main_clk", pll3_usb_otg_main_clk),
5172 _REGISTER_CLOCK(NULL, "pll3_pfd_508M", pll3_pfd_508M),
5173 _REGISTER_CLOCK(NULL, "pll3_pfd_454M", pll3_pfd_454M),
5174 _REGISTER_CLOCK(NULL, "pll3_pfd_720M", pll3_pfd_720M),
5175 _REGISTER_CLOCK(NULL, "pll3_pfd_540M", pll3_pfd_540M),
5176 _REGISTER_CLOCK(NULL, "pll3_sw_clk", pll3_sw_clk),
5177 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_120M),
5178 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_80M),
5179 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_60M),
5180 _REGISTER_CLOCK(NULL, "pll4", pll4_audio_main_clk),
5181 _REGISTER_CLOCK(NULL, "pll5", pll5_video_main_clk),
5182 _REGISTER_CLOCK(NULL, "pll6", pll6_mlb150_main_clk),
5183 _REGISTER_CLOCK(NULL, "pll3", pll7_usb_host_main_clk),
5184 _REGISTER_CLOCK(NULL, "pll4", pll8_enet_main_clk),
5185 _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk),
5186 _REGISTER_CLOCK("smp_twd", NULL, twd_clk),
5187 _REGISTER_CLOCK(NULL, "periph_clk", periph_clk),
5188 _REGISTER_CLOCK(NULL, "axi_clk", axi_clk),
5189 _REGISTER_CLOCK(NULL, "mmdc_ch0_axi", mmdc_ch0_axi_clk[0]),
5190 _REGISTER_CLOCK(NULL, "mmdc_ch1_axi", mmdc_ch1_axi_clk[0]),
5191 _REGISTER_CLOCK(NULL, "ahb", ahb_clk),
5192 _REGISTER_CLOCK(NULL, "ipg_clk", ipg_clk),
5193 _REGISTER_CLOCK(NULL, "ipg_perclk", ipg_perclk),
5194 _REGISTER_CLOCK(NULL, "spba", spba_clk),
5195 _REGISTER_CLOCK("imx-sdma", NULL, sdma_clk[0]),
5196 _REGISTER_CLOCK(NULL, "gpu2d_axi_clk", gpu2d_axi_clk),
5197 _REGISTER_CLOCK(NULL, "gpu3d_axi_clk", gpu3d_axi_clk),
5198 _REGISTER_CLOCK(NULL, "pcie_axi_clk", pcie_axi_clk),
5199 _REGISTER_CLOCK(NULL, "vdo_axi_clk", vdo_axi_clk),
5200 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
5201 _REGISTER_CLOCK(NULL, "i2c_clk", i2c_clk[0]),
5202 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk[1]),
5203 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk[2]),
5204 _REGISTER_CLOCK(NULL, "vpu_clk", vpu_clk[0]),
5205 _REGISTER_CLOCK(NULL, "ipu1_clk", ipu1_clk),
5206 _REGISTER_CLOCK(NULL, "ipu2_clk", ipu2_clk),
5207 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, usdhc1_clk),
5208 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, usdhc2_clk),
5209 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, usdhc3_clk),
5210 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, usdhc4_clk),
5211 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk),
5212 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk),
5213 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk),
5214 _REGISTER_CLOCK(NULL, "ipu1_di0_clk", ipu1_di_clk[0]),
5215 _REGISTER_CLOCK(NULL, "ipu1_di1_clk", ipu1_di_clk[1]),
5216 _REGISTER_CLOCK(NULL, "ipu2_di0_clk", ipu2_di_clk[0]),
5217 _REGISTER_CLOCK(NULL, "ipu2_di1_clk", ipu2_di_clk[1]),
5218 _REGISTER_CLOCK(NULL, "can_root_clk", can_clk_root),
5219 _REGISTER_CLOCK("imx6q-flexcan.0", NULL, can1_clk[0]),
5220 _REGISTER_CLOCK("imx6q-flexcan.1", NULL, can2_clk[0]),
5221 _REGISTER_CLOCK(NULL, "ldb_di0_clk", ldb_di0_clk),
5222 _REGISTER_CLOCK(NULL, "ldb_di1_clk", ldb_di1_clk),
5223 _REGISTER_CLOCK("mxc_spdif.0", NULL, spdif0_clk[0]),
5224 _REGISTER_CLOCK(NULL, "esai_clk", esai_clk),
5225 _REGISTER_CLOCK("imx6q-ecspi.0", NULL, ecspi_clk[0]),
5226 _REGISTER_CLOCK("imx6q-ecspi.1", NULL, ecspi_clk[1]),
5227 _REGISTER_CLOCK("imx6q-ecspi.2", NULL, ecspi_clk[2]),
5228 _REGISTER_CLOCK("imx6q-ecspi.3", NULL, ecspi_clk[3]),
5229 _REGISTER_CLOCK("imx6q-ecspi.4", NULL, ecspi_clk[4]),
5230 _REGISTER_CLOCK(NULL, "emi_slow_clk", emi_slow_clk),
5231 _REGISTER_CLOCK(NULL, "emi_clk", emi_clk),
5232 _REGISTER_CLOCK(NULL, "enfc_clk", enfc_clk),
5233 _REGISTER_CLOCK("imx-uart.0", NULL, uart_clk[0]),
5234 _REGISTER_CLOCK("imx-uart.1", NULL, uart_clk[0]),
5235 _REGISTER_CLOCK("imx-uart.2", NULL, uart_clk[0]),
5236 _REGISTER_CLOCK("imx-uart.3", NULL, uart_clk[0]),
5237 _REGISTER_CLOCK(NULL, "hsi_tx", hsi_tx_clk[0]),
5238 _REGISTER_CLOCK(NULL, "caam_clk", caam_clk[0]),
5239 _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk[0]),
5240 _REGISTER_CLOCK(NULL, "asrc_serial_clk", asrc_clk[1]),
5241 _REGISTER_CLOCK(NULL, "mxs-dma-apbh", apbh_dma_clk),
5242 _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
5243 _REGISTER_CLOCK(NULL, "gpu3d_clk", gpu3d_core_clk[0]),
5244 _REGISTER_CLOCK(NULL, "gpu2d_clk", gpu2d_core_clk[0]),
5245 _REGISTER_CLOCK(NULL, "gpu3d_shader_clk", gpu3d_shader_clk),
5246 _REGISTER_CLOCK(NULL, "gpt", gpt_clk[0]),
5247 _REGISTER_CLOCK("imx6q-gpmi-nand.0", NULL, gpmi_nand_clk[0]),
5248 _REGISTER_CLOCK(NULL, "gpmi-apb", gpmi_nand_clk[1]),
5249 _REGISTER_CLOCK(NULL, "bch", gpmi_nand_clk[2]),
5250 _REGISTER_CLOCK(NULL, "bch-apb", gpmi_nand_clk[3]),
5251 _REGISTER_CLOCK(NULL, "pl301_mx6qperl-bch", gpmi_nand_clk[4]),
5252 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk[0]),
5253 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm_clk[1]),
5254 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm_clk[2]),
5255 _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm_clk[3]),
5256 _REGISTER_CLOCK(NULL, "pcie_clk", pcie_clk[0]),
5257 _REGISTER_CLOCK("enet.0", NULL, enet_clk[0]),
5258 _REGISTER_CLOCK(NULL, "imx_sata_clk", sata_clk[0]),
5259 _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk[0]),
5260 _REGISTER_CLOCK(NULL, "usb_phy1_clk", usb_phy1_clk),
5261 _REGISTER_CLOCK(NULL, "usb_phy3_clk", usb_phy3_clk),
5262 _REGISTER_CLOCK(NULL, "usb_phy4_clk", usb_phy4_clk),
5263 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk),
5264 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk),
5265 _REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_clk[0]),
5266 _REGISTER_CLOCK(NULL, "hdmi_iahb_clk", hdmi_clk[1]),
5267 _REGISTER_CLOCK(NULL, "mipi_pllref_clk", mipi_pllref_clk),
5268 _REGISTER_CLOCK(NULL, "vdoa", vdoa_clk[0]),
5269 _REGISTER_CLOCK(NULL, NULL, aips_tz2_clk),
5270 _REGISTER_CLOCK(NULL, NULL, aips_tz1_clk),
5271 _REGISTER_CLOCK(NULL, "clko_clk", clko_clk),
5272 _REGISTER_CLOCK(NULL, "clko2_clk", clko2_clk),
5273 _REGISTER_CLOCK(NULL, "pxp_axi", ipu2_clk),
5274 _REGISTER_CLOCK(NULL, "epdc_axi", ipu2_clk),
5275 _REGISTER_CLOCK(NULL, "epdc_pix", ipu2_di_clk[1]),
5276 _REGISTER_CLOCK("mxs-perfmon.0", "perfmon", perfmon0_clk),
5277 _REGISTER_CLOCK("mxs-perfmon.1", "perfmon", perfmon1_clk),
5278 _REGISTER_CLOCK("mxs-perfmon.2", "perfmon", perfmon2_clk),
5279 _REGISTER_CLOCK(NULL, "mlb150_clk", mlb150_clk),
5280 _REGISTER_CLOCK(NULL, "anaclk_1", anaclk_1),
5281 _REGISTER_CLOCK(NULL, "anaclk_2", anaclk_2),
5284 static void clk_tree_init(void)
5287 unsigned int reg;
5289 reg = __raw_readl(MMDC_MDMISC_OFFSET);
5290 if ((reg & MMDC_MDMISC_DDR_TYPE_MASK) ==
5291 (0x1 << MMDC_MDMISC_DDR_TYPE_OFFSET) ||
5292 cpu_is_mx6dl()) {
5293 clk_set_parent(&periph_clk, &pll2_pfd_400M);
5294 printk(KERN_INFO "Set periph_clk's parent to pll2_pfd_400M!\n");
5299 int __init mx6_clocks_init(unsigned long ckil, unsigned long osc,
5300 unsigned long ckih1, unsigned long ckih2)
5302 __iomem void *base;
5303 int i, reg;
5305 external_low_reference = ckil;
5306 external_high_reference = ckih1;
5307 ckih2_reference = ckih2;
5308 oscillator_reference = osc;
5310 timer_base = ioremap(GPT_BASE_ADDR, SZ_4K);
5312 apll_base = ioremap(ANATOP_BASE_ADDR, SZ_4K);
5314 for (i = 0; i < ARRAY_SIZE(lookups); i++) {
5315 clkdev_add(&lookups[i]);
5316 clk_debug_register(lookups[i].clk);
5319 /* Lower the ipg_perclk frequency to 6MHz. */
5320 clk_set_rate(&ipg_perclk, 6000000);
5322 /* Timer needs to be initialized first as the
5323 * the WAIT routines use GPT counter as
5324 * a delay.
5326 if (mx6q_revision() == IMX_CHIP_REVISION_1_0) {
5327 gpt_clk[0].parent = &ipg_perclk;
5328 gpt_clk[0].get_rate = NULL;
5329 } else {
5330 /* Here we use OSC 24M as GPT's clock source, no need to
5331 enable gpt serial clock*/
5332 gpt_clk[0].secondary = NULL;
5335 mxc_timer_init(&gpt_clk[0], timer_base, MXC_INT_GPT);
5337 clk_tree_init();
5339 /* keep correct count. */
5340 clk_enable(&cpu_clk);
5341 clk_enable(&periph_clk);
5342 /* Disable un-necessary PFDs & PLLs */
5343 if (pll2_pfd_400M.usecount == 0 && cpu_is_mx6q())
5344 pll2_pfd_400M.disable(&pll2_pfd_400M);
5345 pll2_pfd_352M.disable(&pll2_pfd_352M);
5346 pll2_pfd_594M.disable(&pll2_pfd_594M);
5348 #if !defined(CONFIG_FEC_1588)
5349 pll3_pfd_454M.disable(&pll3_pfd_454M);
5350 pll3_pfd_508M.disable(&pll3_pfd_508M);
5351 pll3_pfd_540M.disable(&pll3_pfd_540M);
5352 pll3_pfd_720M.disable(&pll3_pfd_720M);
5354 pll3_usb_otg_main_clk.disable(&pll3_usb_otg_main_clk);
5355 #endif
5356 pll4_audio_main_clk.disable(&pll4_audio_main_clk);
5357 pll5_video_main_clk.disable(&pll5_video_main_clk);
5358 pll6_mlb150_main_clk.disable(&pll6_mlb150_main_clk);
5359 pll7_usb_host_main_clk.disable(&pll7_usb_host_main_clk);
5360 pll8_enet_main_clk.disable(&pll8_enet_main_clk);
5362 sata_clk[0].disable(&sata_clk[0]);
5363 pcie_clk[0].disable(&pcie_clk[0]);
5365 /* Initialize Audio and Video PLLs to valid frequency. */
5366 clk_set_rate(&pll4_audio_main_clk, 176000000);
5367 clk_set_rate(&pll5_video_main_clk, 650000000);
5369 clk_set_parent(&ipu1_di_clk[0], &pll5_video_main_clk);
5370 clk_set_parent(&ipu1_di_clk[1], &pll5_video_main_clk);
5371 clk_set_parent(&ipu2_di_clk[0], &pll5_video_main_clk);
5372 clk_set_parent(&ipu2_di_clk[1], &pll5_video_main_clk);
5374 clk_set_parent(&emi_clk, &pll2_pfd_400M);
5375 clk_set_rate(&emi_clk, 200000000);
5378 * on mx6dl, 2d core clock sources from 3d shader core clock,
5379 * but 3d shader clock multiplexer of mx6dl is different from
5380 * mx6q. For instance the equivalent of pll2_pfd_594M on mc6q
5381 * is pll2_pfd_528M on mx6dl. Make a note here.
5383 clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594M);
5384 clk_set_rate(&gpu3d_shader_clk, 594000000);
5385 if (cpu_is_mx6dl()) {
5386 /*for mx6dl, change gpu3d core clk parant to 594_PFD */
5387 clk_set_parent(&gpu3d_core_clk[0], &pll2_pfd_594M);
5388 clk_set_rate(&gpu3d_core_clk[0], 594000000);
5390 /*on mx6dl, 2d core clock sources from 3d shader core clock*/
5391 clk_set_parent(&gpu2d_core_clk[0], &gpu3d_shader_clk);
5392 /* on mx6dl gpu3d_axi_clk source from mmdc0 directly */
5393 clk_set_parent(&gpu3d_axi_clk, &mmdc_ch0_axi_clk[0]);
5394 /* on mx6dl gpu2d_axi_clk source from mmdc0 directly */
5395 clk_set_parent(&gpu2d_axi_clk, &mmdc_ch0_axi_clk[0]);
5397 clk_set_parent(&ipu1_clk, &pll3_pfd_540M);
5398 /* pxp & epdc */
5399 clk_set_parent(&ipu2_clk, &pll2_pfd_400M);
5400 clk_set_rate(&ipu2_clk, 200000000);
5401 clk_set_parent(&axi_clk, &pll3_pfd_540M);
5402 } else if (cpu_is_mx6q()) {
5403 clk_set_parent(&gpu3d_core_clk[0], &mmdc_ch0_axi_clk[0]);
5404 clk_set_rate(&gpu3d_core_clk[0], 528000000);
5405 clk_set_parent(&ipu2_clk, &mmdc_ch0_axi_clk[0]);
5406 clk_set_parent(&ipu1_clk, &mmdc_ch0_axi_clk[0]);
5407 clk_set_parent(&axi_clk, &periph_clk);
5410 /* Need to keep PLL3_PFD_540M enabled until AXI is sourced from it. */
5411 clk_enable(&axi_clk);
5413 if (cpu_is_mx6q())
5414 clk_set_parent(&gpu2d_core_clk[0], &pll3_usb_otg_main_clk);
5416 clk_set_parent(&ldb_di0_clk, &pll2_pfd_352M);
5417 clk_set_parent(&ldb_di1_clk, &pll2_pfd_352M);
5419 /* PCLK camera - J5 */
5420 clk_set_parent(&clko2_clk, &osc_clk);
5421 clk_set_rate(&clko2_clk, 2400000);
5423 clk_set_parent(&clko_clk, &pll4_audio_main_clk);
5425 * FIXME: asrc needs to use asrc_serial(spdif1) clock to do sample
5426 * rate convertion and this clock frequency can not be too high, set
5427 * it to the minimum value 7.5Mhz to make asrc work properly.
5429 clk_set_parent(&asrc_clk[1], &pll3_sw_clk);
5430 clk_set_rate(&asrc_clk[1], 7500000);
5432 /* set the GPMI clock to default frequency : 20MHz */
5433 clk_set_parent(&enfc_clk, &pll2_pfd_400M);
5434 clk_set_rate(&enfc_clk, enfc_clk.round_rate(&enfc_clk, 20000000));
5436 mx6_cpu_op_init();
5437 cpu_op_tbl = get_cpu_op(&cpu_op_nr);
5439 /* Gate off all possible clocks */
5440 if (mxc_jtag_enabled) {
5441 __raw_writel(3 << MXC_CCM_CCGRx_CG11_OFFSET |
5442 3 << MXC_CCM_CCGRx_CG2_OFFSET |
5443 3 << MXC_CCM_CCGRx_CG1_OFFSET |
5444 3 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR0);
5445 } else {
5446 __raw_writel(1 << MXC_CCM_CCGRx_CG11_OFFSET |
5447 3 << MXC_CCM_CCGRx_CG2_OFFSET |
5448 3 << MXC_CCM_CCGRx_CG1_OFFSET |
5449 3 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR0);
5451 if (mx6q_revision() == IMX_CHIP_REVISION_1_0)
5452 /* If GPT use ipg_perclk, we need to enable gpt serial clock */
5453 __raw_writel(3 << MXC_CCM_CCGRx_CG10_OFFSET | 3 << MXC_CCM_CCGRx_CG11_OFFSET, MXC_CCM_CCGR1);
5454 else
5455 __raw_writel(3 << MXC_CCM_CCGRx_CG10_OFFSET, MXC_CCM_CCGR1);
5456 __raw_writel(1 << MXC_CCM_CCGRx_CG12_OFFSET |
5457 1 << MXC_CCM_CCGRx_CG11_OFFSET |
5458 3 << MXC_CCM_CCGRx_CG10_OFFSET |
5459 3 << MXC_CCM_CCGRx_CG9_OFFSET |
5460 3 << MXC_CCM_CCGRx_CG8_OFFSET, MXC_CCM_CCGR2);
5461 __raw_writel(1 << MXC_CCM_CCGRx_CG14_OFFSET |
5462 1 << MXC_CCM_CCGRx_CG13_OFFSET |
5463 3 << MXC_CCM_CCGRx_CG12_OFFSET |
5464 1 << MXC_CCM_CCGRx_CG11_OFFSET |
5465 3 << MXC_CCM_CCGRx_CG10_OFFSET, MXC_CCM_CCGR3);
5466 __raw_writel(3 << MXC_CCM_CCGRx_CG7_OFFSET |
5467 1 << MXC_CCM_CCGRx_CG6_OFFSET |
5468 1 << MXC_CCM_CCGRx_CG4_OFFSET, MXC_CCM_CCGR4);
5469 __raw_writel(1 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR5);
5471 __raw_writel(0, MXC_CCM_CCGR6);
5473 /* S/PDIF */
5474 clk_set_parent(&spdif0_clk[0], &pll3_pfd_454M);
5476 /* MLB150 SYS Clock */
5478 * In Rigel validatioin, the MLB sys_clock isn't using the
5479 * right frequency after boot.
5480 * In arik, the register CBCMR controls gpu2d clock, not mlb clock,
5481 * mlb is sourced from axi clock.
5482 * But In rigel, the axi clock is lower than in mx6q, so mlb need to
5483 * find a new clock root.
5484 * The gpu2d clock is the root of mlb clock in rigel.
5485 * Thus we need to add below code in mx6dl.
5486 * */
5487 if (cpu_is_mx6dl())
5488 clk_set_parent(&mlb150_clk, &pll3_sw_clk);
5491 if (cpu_is_mx6dl()) {
5492 /* pxp & epdc */
5493 clk_set_parent(&ipu2_clk, &pll2_pfd_400M);
5494 clk_set_rate(&ipu2_clk, 200000000);
5495 if (epdc_enabled)
5496 clk_set_parent(&ipu2_di_clk[1], &pll5_video_main_clk);
5497 else
5498 clk_set_parent(&ipu2_di_clk[1], &pll3_pfd_540M);
5501 lp_high_freq = 0;
5502 lp_med_freq = 0;
5503 lp_audio_freq = 0;
5505 /* Turn OFF all unnecessary PHYs. */
5506 if (cpu_is_mx6q()) {
5507 /* Turn off SATA PHY. */
5508 base = ioremap(MX6Q_SATA_BASE_ADDR, SZ_8K);
5509 reg = __raw_readl(base + PORT_PHY_CTL);
5510 __raw_writel(reg | PORT_PHY_CTL_PDDQ_LOC, base + PORT_PHY_CTL);
5513 /* Turn off HDMI PHY. */
5514 base = ioremap(MX6Q_HDMI_ARB_BASE_ADDR, SZ_128K);
5515 reg = __raw_readb(base + HDMI_PHY_CONF0);
5516 __raw_writeb(reg | HDMI_PHY_CONF0_GEN2_PDDQ_MASK, base + HDMI_PHY_CONF0);
5518 reg = __raw_readb(base + HDMI_MC_PHYRSTZ);
5519 __raw_writeb(reg | HDMI_MC_PHYRSTZ_DEASSERT, base + HDMI_MC_PHYRSTZ);
5521 iounmap(base);
5523 base = ioremap(MX6Q_IOMUXC_BASE_ADDR, SZ_4K);
5524 /* Close PLL inside SATA PHY. */
5525 reg = __raw_readl(base + 0x34);
5526 __raw_writel(reg | (1 << 1), base + 0x34);
5528 /* Close PCIE PHY. */
5529 reg = __raw_readl(base + 0x04);
5530 reg |= (1 << 18);
5531 __raw_writel(reg, base + 0x04);
5532 iounmap(base);
5534 return 0;