2 * GPMC support functions
4 * Copyright (C) 2005-2006 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/ioport.h>
22 #include <linux/spinlock.h>
24 #include <linux/module.h>
26 #include <asm/mach-types.h>
27 #include <plat/gpmc.h>
29 #include <plat/sdrc.h>
31 /* GPMC register offsets */
32 #define GPMC_REVISION 0x00
33 #define GPMC_SYSCONFIG 0x10
34 #define GPMC_SYSSTATUS 0x14
35 #define GPMC_IRQSTATUS 0x18
36 #define GPMC_IRQENABLE 0x1c
37 #define GPMC_TIMEOUT_CONTROL 0x40
38 #define GPMC_ERR_ADDRESS 0x44
39 #define GPMC_ERR_TYPE 0x48
40 #define GPMC_CONFIG 0x50
41 #define GPMC_STATUS 0x54
42 #define GPMC_PREFETCH_CONFIG1 0x1e0
43 #define GPMC_PREFETCH_CONFIG2 0x1e4
44 #define GPMC_PREFETCH_CONTROL 0x1ec
45 #define GPMC_PREFETCH_STATUS 0x1f0
46 #define GPMC_ECC_CONFIG 0x1f4
47 #define GPMC_ECC_CONTROL 0x1f8
48 #define GPMC_ECC_SIZE_CONFIG 0x1fc
51 #define GPMC_CS_SIZE 0x30
53 #define GPMC_MEM_START 0x00000000
54 #define GPMC_MEM_END 0x3FFFFFFF
55 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
57 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
58 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
60 #define PREFETCH_FIFOTHRESHOLD (0x40 << 8)
61 #define CS_NUM_SHIFT 24
62 #define ENABLE_PREFETCH (0x1 << 7)
63 #define DMA_MPU_MODE 2
65 /* Structure to save gpmc cs context */
66 struct gpmc_cs_config
{
78 * Structure to save/restore gpmc context
79 * to support core off on OMAP3
81 struct omap3_gpmc_regs
{
89 struct gpmc_cs_config cs_context
[GPMC_CS_NUM
];
92 static struct resource gpmc_mem_root
;
93 static struct resource gpmc_cs_mem
[GPMC_CS_NUM
];
94 static DEFINE_SPINLOCK(gpmc_mem_lock
);
95 static unsigned gpmc_cs_map
;
97 static void __iomem
*gpmc_base
;
99 static struct clk
*gpmc_l3_clk
;
101 static void gpmc_write_reg(int idx
, u32 val
)
103 __raw_writel(val
, gpmc_base
+ idx
);
106 static u32
gpmc_read_reg(int idx
)
108 return __raw_readl(gpmc_base
+ idx
);
111 void gpmc_cs_write_reg(int cs
, int idx
, u32 val
)
113 void __iomem
*reg_addr
;
115 reg_addr
= gpmc_base
+ GPMC_CS0
+ (cs
* GPMC_CS_SIZE
) + idx
;
116 __raw_writel(val
, reg_addr
);
119 u32
gpmc_cs_read_reg(int cs
, int idx
)
121 void __iomem
*reg_addr
;
123 reg_addr
= gpmc_base
+ GPMC_CS0
+ (cs
* GPMC_CS_SIZE
) + idx
;
124 return __raw_readl(reg_addr
);
127 /* TODO: Add support for gpmc_fck to clock framework and use it */
128 unsigned long gpmc_get_fclk_period(void)
130 unsigned long rate
= clk_get_rate(gpmc_l3_clk
);
133 printk(KERN_WARNING
"gpmc_l3_clk not enabled\n");
138 rate
= 1000000000 / rate
; /* In picoseconds */
143 unsigned int gpmc_ns_to_ticks(unsigned int time_ns
)
145 unsigned long tick_ps
;
147 /* Calculate in picosecs to yield more exact results */
148 tick_ps
= gpmc_get_fclk_period();
150 return (time_ns
* 1000 + tick_ps
- 1) / tick_ps
;
153 unsigned int gpmc_ticks_to_ns(unsigned int ticks
)
155 return ticks
* gpmc_get_fclk_period() / 1000;
158 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns
)
160 unsigned long ticks
= gpmc_ns_to_ticks(time_ns
);
162 return ticks
* gpmc_get_fclk_period() / 1000;
166 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
167 int time
, const char *name
)
169 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
174 int ticks
, mask
, nr_bits
;
179 ticks
= gpmc_ns_to_ticks(time
);
180 nr_bits
= end_bit
- st_bit
+ 1;
181 if (ticks
>= 1 << nr_bits
) {
183 printk(KERN_INFO
"GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
184 cs
, name
, time
, ticks
, 1 << nr_bits
);
189 mask
= (1 << nr_bits
) - 1;
190 l
= gpmc_cs_read_reg(cs
, reg
);
193 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
194 cs
, name
, ticks
, gpmc_get_fclk_period() * ticks
/ 1000,
195 (l
>> st_bit
) & mask
, time
);
197 l
&= ~(mask
<< st_bit
);
198 l
|= ticks
<< st_bit
;
199 gpmc_cs_write_reg(cs
, reg
, l
);
205 #define GPMC_SET_ONE(reg, st, end, field) \
206 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
207 t->field, #field) < 0) \
210 #define GPMC_SET_ONE(reg, st, end, field) \
211 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
215 int gpmc_cs_calc_divider(int cs
, unsigned int sync_clk
)
220 l
= sync_clk
* 1000 + (gpmc_get_fclk_period() - 1);
221 div
= l
/ gpmc_get_fclk_period();
230 int gpmc_cs_set_timings(int cs
, const struct gpmc_timings
*t
)
235 div
= gpmc_cs_calc_divider(cs
, t
->sync_clk
);
239 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 0, 3, cs_on
);
240 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 8, 12, cs_rd_off
);
241 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 16, 20, cs_wr_off
);
243 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 0, 3, adv_on
);
244 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 8, 12, adv_rd_off
);
245 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 16, 20, adv_wr_off
);
247 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 0, 3, oe_on
);
248 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 8, 12, oe_off
);
249 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 16, 19, we_on
);
250 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 24, 28, we_off
);
252 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 0, 4, rd_cycle
);
253 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 8, 12, wr_cycle
);
254 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 16, 20, access
);
256 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 24, 27, page_burst_access
);
258 if (cpu_is_omap34xx()) {
259 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 16, 19, wr_data_mux_bus
);
260 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 24, 28, wr_access
);
263 /* caller is expected to have initialized CONFIG1 to cover
264 * at least sync vs async
266 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
267 if (l
& (GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITETYPE_SYNC
)) {
269 printk(KERN_INFO
"GPMC CS%d CLK period is %lu ns (div %d)\n",
270 cs
, (div
* gpmc_get_fclk_period()) / 1000, div
);
274 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, l
);
280 static void gpmc_cs_enable_mem(int cs
, u32 base
, u32 size
)
285 mask
= (1 << GPMC_SECTION_SHIFT
) - size
;
286 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
288 l
= (base
>> GPMC_CHUNK_SHIFT
) & 0x3f;
290 l
|= ((mask
>> GPMC_CHUNK_SHIFT
) & 0x0f) << 8;
291 l
|= GPMC_CONFIG7_CSVALID
;
292 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
295 static void gpmc_cs_disable_mem(int cs
)
299 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
300 l
&= ~GPMC_CONFIG7_CSVALID
;
301 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
304 static void gpmc_cs_get_memconf(int cs
, u32
*base
, u32
*size
)
309 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
310 *base
= (l
& 0x3f) << GPMC_CHUNK_SHIFT
;
311 mask
= (l
>> 8) & 0x0f;
312 *size
= (1 << GPMC_SECTION_SHIFT
) - (mask
<< GPMC_CHUNK_SHIFT
);
315 static int gpmc_cs_mem_enabled(int cs
)
319 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
320 return l
& GPMC_CONFIG7_CSVALID
;
323 int gpmc_cs_set_reserved(int cs
, int reserved
)
325 if (cs
> GPMC_CS_NUM
)
328 gpmc_cs_map
&= ~(1 << cs
);
329 gpmc_cs_map
|= (reserved
? 1 : 0) << cs
;
334 int gpmc_cs_reserved(int cs
)
336 if (cs
> GPMC_CS_NUM
)
339 return gpmc_cs_map
& (1 << cs
);
342 static unsigned long gpmc_mem_align(unsigned long size
)
346 size
= (size
- 1) >> (GPMC_CHUNK_SHIFT
- 1);
347 order
= GPMC_CHUNK_SHIFT
- 1;
356 static int gpmc_cs_insert_mem(int cs
, unsigned long base
, unsigned long size
)
358 struct resource
*res
= &gpmc_cs_mem
[cs
];
361 size
= gpmc_mem_align(size
);
362 spin_lock(&gpmc_mem_lock
);
364 res
->end
= base
+ size
- 1;
365 r
= request_resource(&gpmc_mem_root
, res
);
366 spin_unlock(&gpmc_mem_lock
);
371 int gpmc_cs_request(int cs
, unsigned long size
, unsigned long *base
)
373 struct resource
*res
= &gpmc_cs_mem
[cs
];
376 if (cs
> GPMC_CS_NUM
)
379 size
= gpmc_mem_align(size
);
380 if (size
> (1 << GPMC_SECTION_SHIFT
))
383 spin_lock(&gpmc_mem_lock
);
384 if (gpmc_cs_reserved(cs
)) {
388 if (gpmc_cs_mem_enabled(cs
))
389 r
= adjust_resource(res
, res
->start
& ~(size
- 1), size
);
391 r
= allocate_resource(&gpmc_mem_root
, res
, size
, 0, ~0,
396 gpmc_cs_enable_mem(cs
, res
->start
, resource_size(res
));
398 gpmc_cs_set_reserved(cs
, 1);
400 spin_unlock(&gpmc_mem_lock
);
403 EXPORT_SYMBOL(gpmc_cs_request
);
405 void gpmc_cs_free(int cs
)
407 spin_lock(&gpmc_mem_lock
);
408 if (cs
>= GPMC_CS_NUM
|| cs
< 0 || !gpmc_cs_reserved(cs
)) {
409 printk(KERN_ERR
"Trying to free non-reserved GPMC CS%d\n", cs
);
411 spin_unlock(&gpmc_mem_lock
);
414 gpmc_cs_disable_mem(cs
);
415 release_resource(&gpmc_cs_mem
[cs
]);
416 gpmc_cs_set_reserved(cs
, 0);
417 spin_unlock(&gpmc_mem_lock
);
419 EXPORT_SYMBOL(gpmc_cs_free
);
422 * gpmc_prefetch_enable - configures and starts prefetch transfer
423 * @cs: nand cs (chip select) number
424 * @dma_mode: dma mode enable (1) or disable (0)
425 * @u32_count: number of bytes to be transferred
426 * @is_write: prefetch read(0) or write post(1) mode
428 int gpmc_prefetch_enable(int cs
, int dma_mode
,
429 unsigned int u32_count
, int is_write
)
431 uint32_t prefetch_config1
;
433 if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL
))) {
434 /* Set the amount of bytes to be prefetched */
435 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, u32_count
);
437 /* Set dma/mpu mode, the prefetch read / post write and
438 * enable the engine. Set which cs is has requested for.
440 prefetch_config1
= ((cs
<< CS_NUM_SHIFT
) |
441 PREFETCH_FIFOTHRESHOLD
|
443 (dma_mode
<< DMA_MPU_MODE
) |
445 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, prefetch_config1
);
449 /* Start the prefetch engine */
450 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, 0x1);
454 EXPORT_SYMBOL(gpmc_prefetch_enable
);
457 * gpmc_prefetch_reset - disables and stops the prefetch engine
459 void gpmc_prefetch_reset(void)
461 /* Stop the PFPW engine */
462 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, 0x0);
464 /* Reset/disable the PFPW engine */
465 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, 0x0);
467 EXPORT_SYMBOL(gpmc_prefetch_reset
);
470 * gpmc_prefetch_status - reads prefetch status of engine
472 int gpmc_prefetch_status(void)
474 return gpmc_read_reg(GPMC_PREFETCH_STATUS
);
476 EXPORT_SYMBOL(gpmc_prefetch_status
);
478 static void __init
gpmc_mem_init(void)
481 unsigned long boot_rom_space
= 0;
483 /* never allocate the first page, to facilitate bug detection;
484 * even if we didn't boot from ROM.
486 boot_rom_space
= BOOT_ROM_SPACE
;
487 /* In apollon the CS0 is mapped as 0x0000 0000 */
488 if (machine_is_omap_apollon())
490 gpmc_mem_root
.start
= GPMC_MEM_START
+ boot_rom_space
;
491 gpmc_mem_root
.end
= GPMC_MEM_END
;
493 /* Reserve all regions that has been set up by bootloader */
494 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
497 if (!gpmc_cs_mem_enabled(cs
))
499 gpmc_cs_get_memconf(cs
, &base
, &size
);
500 if (gpmc_cs_insert_mem(cs
, base
, size
) < 0)
505 void __init
gpmc_init(void)
510 if (cpu_is_omap24xx()) {
512 if (cpu_is_omap2420())
513 l
= OMAP2420_GPMC_BASE
;
515 l
= OMAP34XX_GPMC_BASE
;
516 } else if (cpu_is_omap34xx()) {
518 l
= OMAP34XX_GPMC_BASE
;
519 } else if (cpu_is_omap44xx()) {
521 l
= OMAP44XX_GPMC_BASE
;
524 gpmc_l3_clk
= clk_get(NULL
, ck
);
525 if (IS_ERR(gpmc_l3_clk
)) {
526 printk(KERN_ERR
"Could not get GPMC clock %s\n", ck
);
530 gpmc_base
= ioremap(l
, SZ_4K
);
532 clk_put(gpmc_l3_clk
);
533 printk(KERN_ERR
"Could not get GPMC register memory\n");
537 clk_enable(gpmc_l3_clk
);
539 l
= gpmc_read_reg(GPMC_REVISION
);
540 printk(KERN_INFO
"GPMC revision %d.%d\n", (l
>> 4) & 0x0f, l
& 0x0f);
541 /* Set smart idle mode and automatic L3 clock gating */
542 l
= gpmc_read_reg(GPMC_SYSCONFIG
);
544 l
|= (0x02 << 3) | (1 << 0);
545 gpmc_write_reg(GPMC_SYSCONFIG
, l
);
549 #ifdef CONFIG_ARCH_OMAP3
550 static struct omap3_gpmc_regs gpmc_context
;
552 void omap3_gpmc_save_context()
555 gpmc_context
.sysconfig
= gpmc_read_reg(GPMC_SYSCONFIG
);
556 gpmc_context
.irqenable
= gpmc_read_reg(GPMC_IRQENABLE
);
557 gpmc_context
.timeout_ctrl
= gpmc_read_reg(GPMC_TIMEOUT_CONTROL
);
558 gpmc_context
.config
= gpmc_read_reg(GPMC_CONFIG
);
559 gpmc_context
.prefetch_config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
560 gpmc_context
.prefetch_config2
= gpmc_read_reg(GPMC_PREFETCH_CONFIG2
);
561 gpmc_context
.prefetch_control
= gpmc_read_reg(GPMC_PREFETCH_CONTROL
);
562 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
563 gpmc_context
.cs_context
[i
].is_valid
= gpmc_cs_mem_enabled(i
);
564 if (gpmc_context
.cs_context
[i
].is_valid
) {
565 gpmc_context
.cs_context
[i
].config1
=
566 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG1
);
567 gpmc_context
.cs_context
[i
].config2
=
568 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG2
);
569 gpmc_context
.cs_context
[i
].config3
=
570 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG3
);
571 gpmc_context
.cs_context
[i
].config4
=
572 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG4
);
573 gpmc_context
.cs_context
[i
].config5
=
574 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG5
);
575 gpmc_context
.cs_context
[i
].config6
=
576 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG6
);
577 gpmc_context
.cs_context
[i
].config7
=
578 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG7
);
583 void omap3_gpmc_restore_context()
586 gpmc_write_reg(GPMC_SYSCONFIG
, gpmc_context
.sysconfig
);
587 gpmc_write_reg(GPMC_IRQENABLE
, gpmc_context
.irqenable
);
588 gpmc_write_reg(GPMC_TIMEOUT_CONTROL
, gpmc_context
.timeout_ctrl
);
589 gpmc_write_reg(GPMC_CONFIG
, gpmc_context
.config
);
590 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, gpmc_context
.prefetch_config1
);
591 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, gpmc_context
.prefetch_config2
);
592 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, gpmc_context
.prefetch_control
);
593 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
594 if (gpmc_context
.cs_context
[i
].is_valid
) {
595 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG1
,
596 gpmc_context
.cs_context
[i
].config1
);
597 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG2
,
598 gpmc_context
.cs_context
[i
].config2
);
599 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG3
,
600 gpmc_context
.cs_context
[i
].config3
);
601 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG4
,
602 gpmc_context
.cs_context
[i
].config4
);
603 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG5
,
604 gpmc_context
.cs_context
[i
].config5
);
605 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG6
,
606 gpmc_context
.cs_context
[i
].config6
);
607 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG7
,
608 gpmc_context
.cs_context
[i
].config7
);
612 #endif /* CONFIG_ARCH_OMAP3 */