percpu, x86: Avoid warnings of unused variables in per cpu
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / mtd / nand / omap2.c
blobee87325c7712a8eeee4bbc683f93014c9901f337
1 /*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/platform_device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h>
14 #include <linux/jiffies.h>
15 #include <linux/sched.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/nand.h>
18 #include <linux/mtd/partitions.h>
19 #include <linux/io.h>
20 #include <linux/slab.h>
22 #include <plat/dma.h>
23 #include <plat/gpmc.h>
24 #include <plat/nand.h>
26 #define GPMC_IRQ_STATUS 0x18
27 #define GPMC_ECC_CONFIG 0x1F4
28 #define GPMC_ECC_CONTROL 0x1F8
29 #define GPMC_ECC_SIZE_CONFIG 0x1FC
30 #define GPMC_ECC1_RESULT 0x200
32 #define DRIVER_NAME "omap2-nand"
34 #define NAND_WP_OFF 0
35 #define NAND_WP_BIT 0x00000010
37 #define GPMC_BUF_FULL 0x00000001
38 #define GPMC_BUF_EMPTY 0x00000000
40 #define NAND_Ecc_P1e (1 << 0)
41 #define NAND_Ecc_P2e (1 << 1)
42 #define NAND_Ecc_P4e (1 << 2)
43 #define NAND_Ecc_P8e (1 << 3)
44 #define NAND_Ecc_P16e (1 << 4)
45 #define NAND_Ecc_P32e (1 << 5)
46 #define NAND_Ecc_P64e (1 << 6)
47 #define NAND_Ecc_P128e (1 << 7)
48 #define NAND_Ecc_P256e (1 << 8)
49 #define NAND_Ecc_P512e (1 << 9)
50 #define NAND_Ecc_P1024e (1 << 10)
51 #define NAND_Ecc_P2048e (1 << 11)
53 #define NAND_Ecc_P1o (1 << 16)
54 #define NAND_Ecc_P2o (1 << 17)
55 #define NAND_Ecc_P4o (1 << 18)
56 #define NAND_Ecc_P8o (1 << 19)
57 #define NAND_Ecc_P16o (1 << 20)
58 #define NAND_Ecc_P32o (1 << 21)
59 #define NAND_Ecc_P64o (1 << 22)
60 #define NAND_Ecc_P128o (1 << 23)
61 #define NAND_Ecc_P256o (1 << 24)
62 #define NAND_Ecc_P512o (1 << 25)
63 #define NAND_Ecc_P1024o (1 << 26)
64 #define NAND_Ecc_P2048o (1 << 27)
66 #define TF(value) (value ? 1 : 0)
68 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
69 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
70 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
71 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
72 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
73 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
74 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
75 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
77 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
78 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
79 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
80 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
81 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
82 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
83 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
84 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
86 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
87 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
88 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
89 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
90 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
91 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
92 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
93 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
95 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
96 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
97 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
98 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
99 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
100 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
101 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
102 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
104 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
105 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
107 #ifdef CONFIG_MTD_PARTITIONS
108 static const char *part_probes[] = { "cmdlinepart", NULL };
109 #endif
111 #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
112 static int use_prefetch = 1;
114 /* "modprobe ... use_prefetch=0" etc */
115 module_param(use_prefetch, bool, 0);
116 MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
118 #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
119 static int use_dma = 1;
121 /* "modprobe ... use_dma=0" etc */
122 module_param(use_dma, bool, 0);
123 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
124 #else
125 const int use_dma;
126 #endif
127 #else
128 const int use_prefetch;
129 const int use_dma;
130 #endif
132 struct omap_nand_info {
133 struct nand_hw_control controller;
134 struct omap_nand_platform_data *pdata;
135 struct mtd_info mtd;
136 struct mtd_partition *parts;
137 struct nand_chip nand;
138 struct platform_device *pdev;
140 int gpmc_cs;
141 unsigned long phys_base;
142 void __iomem *gpmc_cs_baseaddr;
143 void __iomem *gpmc_baseaddr;
144 void __iomem *nand_pref_fifo_add;
145 struct completion comp;
146 int dma_ch;
150 * omap_nand_wp - This function enable or disable the Write Protect feature
151 * @mtd: MTD device structure
152 * @mode: WP ON/OFF
154 static void omap_nand_wp(struct mtd_info *mtd, int mode)
156 struct omap_nand_info *info = container_of(mtd,
157 struct omap_nand_info, mtd);
159 unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
161 if (mode)
162 config &= ~(NAND_WP_BIT); /* WP is ON */
163 else
164 config |= (NAND_WP_BIT); /* WP is OFF */
166 __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
170 * omap_hwcontrol - hardware specific access to control-lines
171 * @mtd: MTD device structure
172 * @cmd: command to device
173 * @ctrl:
174 * NAND_NCE: bit 0 -> don't care
175 * NAND_CLE: bit 1 -> Command Latch
176 * NAND_ALE: bit 2 -> Address Latch
178 * NOTE: boards may use different bits for these!!
180 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
182 struct omap_nand_info *info = container_of(mtd,
183 struct omap_nand_info, mtd);
184 switch (ctrl) {
185 case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
186 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
187 GPMC_CS_NAND_COMMAND;
188 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
189 GPMC_CS_NAND_DATA;
190 break;
192 case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
193 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
194 GPMC_CS_NAND_ADDRESS;
195 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
196 GPMC_CS_NAND_DATA;
197 break;
199 case NAND_CTRL_CHANGE | NAND_NCE:
200 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
201 GPMC_CS_NAND_DATA;
202 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
203 GPMC_CS_NAND_DATA;
204 break;
207 if (cmd != NAND_CMD_NONE)
208 __raw_writeb(cmd, info->nand.IO_ADDR_W);
212 * omap_read_buf8 - read data from NAND controller into buffer
213 * @mtd: MTD device structure
214 * @buf: buffer to store date
215 * @len: number of bytes to read
217 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
219 struct nand_chip *nand = mtd->priv;
221 ioread8_rep(nand->IO_ADDR_R, buf, len);
225 * omap_write_buf8 - write buffer to NAND controller
226 * @mtd: MTD device structure
227 * @buf: data buffer
228 * @len: number of bytes to write
230 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
232 struct omap_nand_info *info = container_of(mtd,
233 struct omap_nand_info, mtd);
234 u_char *p = (u_char *)buf;
236 while (len--) {
237 iowrite8(*p++, info->nand.IO_ADDR_W);
238 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
239 GPMC_STATUS) & GPMC_BUF_FULL));
244 * omap_read_buf16 - read data from NAND controller into buffer
245 * @mtd: MTD device structure
246 * @buf: buffer to store date
247 * @len: number of bytes to read
249 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
251 struct nand_chip *nand = mtd->priv;
253 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
257 * omap_write_buf16 - write buffer to NAND controller
258 * @mtd: MTD device structure
259 * @buf: data buffer
260 * @len: number of bytes to write
262 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
264 struct omap_nand_info *info = container_of(mtd,
265 struct omap_nand_info, mtd);
266 u16 *p = (u16 *) buf;
268 /* FIXME try bursts of writesw() or DMA ... */
269 len >>= 1;
271 while (len--) {
272 iowrite16(*p++, info->nand.IO_ADDR_W);
274 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
275 GPMC_STATUS) & GPMC_BUF_FULL))
281 * omap_read_buf_pref - read data from NAND controller into buffer
282 * @mtd: MTD device structure
283 * @buf: buffer to store date
284 * @len: number of bytes to read
286 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
288 struct omap_nand_info *info = container_of(mtd,
289 struct omap_nand_info, mtd);
290 uint32_t pfpw_status = 0, r_count = 0;
291 int ret = 0;
292 u32 *p = (u32 *)buf;
294 /* take care of subpage reads */
295 if (len % 4) {
296 if (info->nand.options & NAND_BUSWIDTH_16)
297 omap_read_buf16(mtd, buf, len % 4);
298 else
299 omap_read_buf8(mtd, buf, len % 4);
300 p = (u32 *) (buf + len % 4);
301 len -= len % 4;
304 /* configure and start prefetch transfer */
305 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
306 if (ret) {
307 /* PFPW engine is busy, use cpu copy method */
308 if (info->nand.options & NAND_BUSWIDTH_16)
309 omap_read_buf16(mtd, buf, len);
310 else
311 omap_read_buf8(mtd, buf, len);
312 } else {
313 do {
314 pfpw_status = gpmc_prefetch_status();
315 r_count = ((pfpw_status >> 24) & 0x7F) >> 2;
316 ioread32_rep(info->nand_pref_fifo_add, p, r_count);
317 p += r_count;
318 len -= r_count << 2;
319 } while (len);
321 /* disable and stop the PFPW engine */
322 gpmc_prefetch_reset();
327 * omap_write_buf_pref - write buffer to NAND controller
328 * @mtd: MTD device structure
329 * @buf: data buffer
330 * @len: number of bytes to write
332 static void omap_write_buf_pref(struct mtd_info *mtd,
333 const u_char *buf, int len)
335 struct omap_nand_info *info = container_of(mtd,
336 struct omap_nand_info, mtd);
337 uint32_t pfpw_status = 0, w_count = 0;
338 int i = 0, ret = 0;
339 u16 *p = (u16 *) buf;
341 /* take care of subpage writes */
342 if (len % 2 != 0) {
343 writeb(*buf, info->nand.IO_ADDR_R);
344 p = (u16 *)(buf + 1);
345 len--;
348 /* configure and start prefetch transfer */
349 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
350 if (ret) {
351 /* PFPW engine is busy, use cpu copy method */
352 if (info->nand.options & NAND_BUSWIDTH_16)
353 omap_write_buf16(mtd, buf, len);
354 else
355 omap_write_buf8(mtd, buf, len);
356 } else {
357 pfpw_status = gpmc_prefetch_status();
358 while (pfpw_status & 0x3FFF) {
359 w_count = ((pfpw_status >> 24) & 0x7F) >> 1;
360 for (i = 0; (i < w_count) && len; i++, len -= 2)
361 iowrite16(*p++, info->nand_pref_fifo_add);
362 pfpw_status = gpmc_prefetch_status();
365 /* disable and stop the PFPW engine */
366 gpmc_prefetch_reset();
370 #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
372 * omap_nand_dma_cb: callback on the completion of dma transfer
373 * @lch: logical channel
374 * @ch_satuts: channel status
375 * @data: pointer to completion data structure
377 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
379 complete((struct completion *) data);
383 * omap_nand_dma_transfer: configer and start dma transfer
384 * @mtd: MTD device structure
385 * @addr: virtual address in RAM of source/destination
386 * @len: number of data bytes to be transferred
387 * @is_write: flag for read/write operation
389 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
390 unsigned int len, int is_write)
392 struct omap_nand_info *info = container_of(mtd,
393 struct omap_nand_info, mtd);
394 uint32_t prefetch_status = 0;
395 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
396 DMA_FROM_DEVICE;
397 dma_addr_t dma_addr;
398 int ret;
400 /* The fifo depth is 64 bytes. We have a sync at each frame and frame
401 * length is 64 bytes.
403 int buf_len = len >> 6;
405 if (addr >= high_memory) {
406 struct page *p1;
408 if (((size_t)addr & PAGE_MASK) !=
409 ((size_t)(addr + len - 1) & PAGE_MASK))
410 goto out_copy;
411 p1 = vmalloc_to_page(addr);
412 if (!p1)
413 goto out_copy;
414 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
417 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
418 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
419 dev_err(&info->pdev->dev,
420 "Couldn't DMA map a %d byte buffer\n", len);
421 goto out_copy;
424 if (is_write) {
425 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
426 info->phys_base, 0, 0);
427 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
428 dma_addr, 0, 0);
429 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
430 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
431 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
432 } else {
433 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
434 info->phys_base, 0, 0);
435 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
436 dma_addr, 0, 0);
437 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
438 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
439 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
441 /* configure and start prefetch transfer */
442 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
443 if (ret)
444 /* PFPW engine is busy, use cpu copy methode */
445 goto out_copy;
447 init_completion(&info->comp);
449 omap_start_dma(info->dma_ch);
451 /* setup and start DMA using dma_addr */
452 wait_for_completion(&info->comp);
454 while (0x3fff & (prefetch_status = gpmc_prefetch_status()))
456 /* disable and stop the PFPW engine */
457 gpmc_prefetch_reset();
459 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
460 return 0;
462 out_copy:
463 if (info->nand.options & NAND_BUSWIDTH_16)
464 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
465 : omap_write_buf16(mtd, (u_char *) addr, len);
466 else
467 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
468 : omap_write_buf8(mtd, (u_char *) addr, len);
469 return 0;
471 #else
472 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
473 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
474 unsigned int len, int is_write)
476 return 0;
478 #endif
481 * omap_read_buf_dma_pref - read data from NAND controller into buffer
482 * @mtd: MTD device structure
483 * @buf: buffer to store date
484 * @len: number of bytes to read
486 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
488 if (len <= mtd->oobsize)
489 omap_read_buf_pref(mtd, buf, len);
490 else
491 /* start transfer in DMA mode */
492 omap_nand_dma_transfer(mtd, buf, len, 0x0);
496 * omap_write_buf_dma_pref - write buffer to NAND controller
497 * @mtd: MTD device structure
498 * @buf: data buffer
499 * @len: number of bytes to write
501 static void omap_write_buf_dma_pref(struct mtd_info *mtd,
502 const u_char *buf, int len)
504 if (len <= mtd->oobsize)
505 omap_write_buf_pref(mtd, buf, len);
506 else
507 /* start transfer in DMA mode */
508 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
512 * omap_verify_buf - Verify chip data against buffer
513 * @mtd: MTD device structure
514 * @buf: buffer containing the data to compare
515 * @len: number of bytes to compare
517 static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
519 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
520 mtd);
521 u16 *p = (u16 *) buf;
523 len >>= 1;
524 while (len--) {
525 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
526 return -EFAULT;
529 return 0;
532 #ifdef CONFIG_MTD_NAND_OMAP_HWECC
534 * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
535 * @mtd: MTD device structure
537 static void omap_hwecc_init(struct mtd_info *mtd)
539 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
540 mtd);
541 struct nand_chip *chip = mtd->priv;
542 unsigned long val = 0x0;
544 /* Read from ECC Control Register */
545 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
546 /* Clear all ECC | Enable Reg1 */
547 val = ((0x00000001<<8) | 0x00000001);
548 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
550 /* Read from ECC Size Config Register */
551 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
552 /* ECCSIZE1=512 | Select eccResultsize[0-3] */
553 val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
554 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
558 * gen_true_ecc - This function will generate true ECC value
559 * @ecc_buf: buffer to store ecc code
561 * This generated true ECC value can be used when correcting
562 * data read from NAND flash memory core
564 static void gen_true_ecc(u8 *ecc_buf)
566 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
567 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
569 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
570 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
571 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
572 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
573 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
574 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
578 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
579 * @ecc_data1: ecc code from nand spare area
580 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
581 * @page_data: page data
583 * This function compares two ECC's and indicates if there is an error.
584 * If the error can be corrected it will be corrected to the buffer.
586 static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
587 u8 *ecc_data2, /* read from register */
588 u8 *page_data)
590 uint i;
591 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
592 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
593 u8 ecc_bit[24];
594 u8 ecc_sum = 0;
595 u8 find_bit = 0;
596 uint find_byte = 0;
597 int isEccFF;
599 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
601 gen_true_ecc(ecc_data1);
602 gen_true_ecc(ecc_data2);
604 for (i = 0; i <= 2; i++) {
605 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
606 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
609 for (i = 0; i < 8; i++) {
610 tmp0_bit[i] = *ecc_data1 % 2;
611 *ecc_data1 = *ecc_data1 / 2;
614 for (i = 0; i < 8; i++) {
615 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
616 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
619 for (i = 0; i < 8; i++) {
620 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
621 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
624 for (i = 0; i < 8; i++) {
625 comp0_bit[i] = *ecc_data2 % 2;
626 *ecc_data2 = *ecc_data2 / 2;
629 for (i = 0; i < 8; i++) {
630 comp1_bit[i] = *(ecc_data2 + 1) % 2;
631 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
634 for (i = 0; i < 8; i++) {
635 comp2_bit[i] = *(ecc_data2 + 2) % 2;
636 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
639 for (i = 0; i < 6; i++)
640 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
642 for (i = 0; i < 8; i++)
643 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
645 for (i = 0; i < 8; i++)
646 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
648 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
649 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
651 for (i = 0; i < 24; i++)
652 ecc_sum += ecc_bit[i];
654 switch (ecc_sum) {
655 case 0:
656 /* Not reached because this function is not called if
657 * ECC values are equal
659 return 0;
661 case 1:
662 /* Uncorrectable error */
663 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
664 return -1;
666 case 11:
667 /* UN-Correctable error */
668 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
669 return -1;
671 case 12:
672 /* Correctable error */
673 find_byte = (ecc_bit[23] << 8) +
674 (ecc_bit[21] << 7) +
675 (ecc_bit[19] << 6) +
676 (ecc_bit[17] << 5) +
677 (ecc_bit[15] << 4) +
678 (ecc_bit[13] << 3) +
679 (ecc_bit[11] << 2) +
680 (ecc_bit[9] << 1) +
681 ecc_bit[7];
683 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
685 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
686 "offset: %d, bit: %d\n", find_byte, find_bit);
688 page_data[find_byte] ^= (1 << find_bit);
690 return 0;
691 default:
692 if (isEccFF) {
693 if (ecc_data2[0] == 0 &&
694 ecc_data2[1] == 0 &&
695 ecc_data2[2] == 0)
696 return 0;
698 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
699 return -1;
704 * omap_correct_data - Compares the ECC read with HW generated ECC
705 * @mtd: MTD device structure
706 * @dat: page data
707 * @read_ecc: ecc read from nand flash
708 * @calc_ecc: ecc read from HW ECC registers
710 * Compares the ecc read from nand spare area with ECC registers values
711 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
712 * and correction.
714 static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
715 u_char *read_ecc, u_char *calc_ecc)
717 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
718 mtd);
719 int blockCnt = 0, i = 0, ret = 0;
721 /* Ex NAND_ECC_HW12_2048 */
722 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
723 (info->nand.ecc.size == 2048))
724 blockCnt = 4;
725 else
726 blockCnt = 1;
728 for (i = 0; i < blockCnt; i++) {
729 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
730 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
731 if (ret < 0)
732 return ret;
734 read_ecc += 3;
735 calc_ecc += 3;
736 dat += 512;
738 return 0;
742 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
743 * @mtd: MTD device structure
744 * @dat: The pointer to data on which ecc is computed
745 * @ecc_code: The ecc_code buffer
747 * Using noninverted ECC can be considered ugly since writing a blank
748 * page ie. padding will clear the ECC bytes. This is no problem as long
749 * nobody is trying to write data on the seemingly unused page. Reading
750 * an erased page will produce an ECC mismatch between generated and read
751 * ECC bytes that has to be dealt with separately.
753 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
754 u_char *ecc_code)
756 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
757 mtd);
758 unsigned long val = 0x0;
759 unsigned long reg;
761 /* Start Reading from HW ECC1_Result = 0x200 */
762 reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
763 val = __raw_readl(reg);
764 *ecc_code++ = val; /* P128e, ..., P1e */
765 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
766 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
767 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
768 reg += 4;
770 return 0;
774 * omap_enable_hwecc - This function enables the hardware ecc functionality
775 * @mtd: MTD device structure
776 * @mode: Read/Write mode
778 static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
780 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
781 mtd);
782 struct nand_chip *chip = mtd->priv;
783 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
784 unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
786 switch (mode) {
787 case NAND_ECC_READ:
788 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
789 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
790 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
791 break;
792 case NAND_ECC_READSYN:
793 __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
794 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
795 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
796 break;
797 case NAND_ECC_WRITE:
798 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
799 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
800 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
801 break;
802 default:
803 DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
804 mode);
805 break;
808 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
810 #endif
813 * omap_wait - wait until the command is done
814 * @mtd: MTD device structure
815 * @chip: NAND Chip structure
817 * Wait function is called during Program and erase operations and
818 * the way it is called from MTD layer, we should wait till the NAND
819 * chip is ready after the programming/erase operation has completed.
821 * Erase can take up to 400ms and program up to 20ms according to
822 * general NAND and SmartMedia specs
824 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
826 struct nand_chip *this = mtd->priv;
827 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
828 mtd);
829 unsigned long timeo = jiffies;
830 int status = NAND_STATUS_FAIL, state = this->state;
832 if (state == FL_ERASING)
833 timeo += (HZ * 400) / 1000;
834 else
835 timeo += (HZ * 20) / 1000;
837 this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
838 GPMC_CS_NAND_COMMAND;
839 this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
841 __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
843 while (time_before(jiffies, timeo)) {
844 status = __raw_readb(this->IO_ADDR_R);
845 if (status & NAND_STATUS_READY)
846 break;
847 cond_resched();
849 return status;
853 * omap_dev_ready - calls the platform specific dev_ready function
854 * @mtd: MTD device structure
856 static int omap_dev_ready(struct mtd_info *mtd)
858 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
859 mtd);
860 unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
862 if ((val & 0x100) == 0x100) {
863 /* Clear IRQ Interrupt */
864 val |= 0x100;
865 val &= ~(0x0);
866 __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
867 } else {
868 unsigned int cnt = 0;
869 while (cnt++ < 0x1FF) {
870 if ((val & 0x100) == 0x100)
871 return 0;
872 val = __raw_readl(info->gpmc_baseaddr +
873 GPMC_IRQ_STATUS);
877 return 1;
880 static int __devinit omap_nand_probe(struct platform_device *pdev)
882 struct omap_nand_info *info;
883 struct omap_nand_platform_data *pdata;
884 int err;
886 pdata = pdev->dev.platform_data;
887 if (pdata == NULL) {
888 dev_err(&pdev->dev, "platform data missing\n");
889 return -ENODEV;
892 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
893 if (!info)
894 return -ENOMEM;
896 platform_set_drvdata(pdev, info);
898 spin_lock_init(&info->controller.lock);
899 init_waitqueue_head(&info->controller.wq);
901 info->pdev = pdev;
903 info->gpmc_cs = pdata->cs;
904 info->gpmc_baseaddr = pdata->gpmc_baseaddr;
905 info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
906 info->phys_base = pdata->phys_base;
908 info->mtd.priv = &info->nand;
909 info->mtd.name = dev_name(&pdev->dev);
910 info->mtd.owner = THIS_MODULE;
912 info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
913 info->nand.options |= NAND_SKIP_BBTSCAN;
915 /* NAND write protect off */
916 omap_nand_wp(&info->mtd, NAND_WP_OFF);
918 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
919 pdev->dev.driver->name)) {
920 err = -EBUSY;
921 goto out_free_info;
924 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
925 if (!info->nand.IO_ADDR_R) {
926 err = -ENOMEM;
927 goto out_release_mem_region;
930 info->nand.controller = &info->controller;
932 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
933 info->nand.cmd_ctrl = omap_hwcontrol;
936 * If RDY/BSY line is connected to OMAP then use the omap ready
937 * funcrtion and the generic nand_wait function which reads the status
938 * register after monitoring the RDY/BSY line.Otherwise use a standard
939 * chip delay which is slightly more than tR (AC Timing) of the NAND
940 * device and read status register until you get a failure or success
942 if (pdata->dev_ready) {
943 info->nand.dev_ready = omap_dev_ready;
944 info->nand.chip_delay = 0;
945 } else {
946 info->nand.waitfunc = omap_wait;
947 info->nand.chip_delay = 50;
950 if (use_prefetch) {
951 /* copy the virtual address of nand base for fifo access */
952 info->nand_pref_fifo_add = info->nand.IO_ADDR_R;
954 info->nand.read_buf = omap_read_buf_pref;
955 info->nand.write_buf = omap_write_buf_pref;
956 if (use_dma) {
957 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
958 omap_nand_dma_cb, &info->comp, &info->dma_ch);
959 if (err < 0) {
960 info->dma_ch = -1;
961 printk(KERN_WARNING "DMA request failed."
962 " Non-dma data transfer mode\n");
963 } else {
964 omap_set_dma_dest_burst_mode(info->dma_ch,
965 OMAP_DMA_DATA_BURST_16);
966 omap_set_dma_src_burst_mode(info->dma_ch,
967 OMAP_DMA_DATA_BURST_16);
969 info->nand.read_buf = omap_read_buf_dma_pref;
970 info->nand.write_buf = omap_write_buf_dma_pref;
973 } else {
974 if (info->nand.options & NAND_BUSWIDTH_16) {
975 info->nand.read_buf = omap_read_buf16;
976 info->nand.write_buf = omap_write_buf16;
977 } else {
978 info->nand.read_buf = omap_read_buf8;
979 info->nand.write_buf = omap_write_buf8;
982 info->nand.verify_buf = omap_verify_buf;
984 #ifdef CONFIG_MTD_NAND_OMAP_HWECC
985 info->nand.ecc.bytes = 3;
986 info->nand.ecc.size = 512;
987 info->nand.ecc.calculate = omap_calculate_ecc;
988 info->nand.ecc.hwctl = omap_enable_hwecc;
989 info->nand.ecc.correct = omap_correct_data;
990 info->nand.ecc.mode = NAND_ECC_HW;
992 /* init HW ECC */
993 omap_hwecc_init(&info->mtd);
994 #else
995 info->nand.ecc.mode = NAND_ECC_SOFT;
996 #endif
998 /* DIP switches on some boards change between 8 and 16 bit
999 * bus widths for flash. Try the other width if the first try fails.
1001 if (nand_scan(&info->mtd, 1)) {
1002 info->nand.options ^= NAND_BUSWIDTH_16;
1003 if (nand_scan(&info->mtd, 1)) {
1004 err = -ENXIO;
1005 goto out_release_mem_region;
1009 #ifdef CONFIG_MTD_PARTITIONS
1010 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1011 if (err > 0)
1012 add_mtd_partitions(&info->mtd, info->parts, err);
1013 else if (pdata->parts)
1014 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
1015 else
1016 #endif
1017 add_mtd_device(&info->mtd);
1019 platform_set_drvdata(pdev, &info->mtd);
1021 return 0;
1023 out_release_mem_region:
1024 release_mem_region(info->phys_base, NAND_IO_SIZE);
1025 out_free_info:
1026 kfree(info);
1028 return err;
1031 static int omap_nand_remove(struct platform_device *pdev)
1033 struct mtd_info *mtd = platform_get_drvdata(pdev);
1034 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1035 mtd);
1037 platform_set_drvdata(pdev, NULL);
1038 if (use_dma)
1039 omap_free_dma(info->dma_ch);
1041 /* Release NAND device, its internal structures and partitions */
1042 nand_release(&info->mtd);
1043 iounmap(info->nand_pref_fifo_add);
1044 kfree(&info->mtd);
1045 return 0;
1048 static struct platform_driver omap_nand_driver = {
1049 .probe = omap_nand_probe,
1050 .remove = omap_nand_remove,
1051 .driver = {
1052 .name = DRIVER_NAME,
1053 .owner = THIS_MODULE,
1057 static int __init omap_nand_init(void)
1059 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
1061 /* This check is required if driver is being
1062 * loaded run time as a module
1064 if ((1 == use_dma) && (0 == use_prefetch)) {
1065 printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
1066 "without use_prefetch'. Prefetch will not be"
1067 " used in either mode (mpu or dma)\n");
1069 return platform_driver_register(&omap_nand_driver);
1072 static void __exit omap_nand_exit(void)
1074 platform_driver_unregister(&omap_nand_driver);
1077 module_init(omap_nand_init);
1078 module_exit(omap_nand_exit);
1080 MODULE_ALIAS(DRIVER_NAME);
1081 MODULE_LICENSE("GPL");
1082 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");