ACPI: Enable bit 11 in _PDC to advertise hw coord
[linux-2.6/mini2440.git] / drivers / mtd / onenand / omap2.c
blob96ecc1766fa80346ebf825b42ad8f5cff1cb13be
1 /*
2 * linux/drivers/mtd/onenand/omap2.c
4 * OneNAND driver for OMAP2 / OMAP3
6 * Copyright © 2005-2006 Nokia Corporation
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <linux/device.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/onenand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/platform_device.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/io.h>
38 #include <asm/mach/flash.h>
39 #include <mach/gpmc.h>
40 #include <mach/onenand.h>
41 #include <mach/gpio.h>
42 #include <mach/pm.h>
44 #include <mach/dma.h>
46 #include <mach/board.h>
48 #define DRIVER_NAME "omap2-onenand"
50 #define ONENAND_IO_SIZE SZ_128K
51 #define ONENAND_BUFRAM_SIZE (1024 * 5)
53 struct omap2_onenand {
54 struct platform_device *pdev;
55 int gpmc_cs;
56 unsigned long phys_base;
57 int gpio_irq;
58 struct mtd_info mtd;
59 struct mtd_partition *parts;
60 struct onenand_chip onenand;
61 struct completion irq_done;
62 struct completion dma_done;
63 int dma_channel;
64 int freq;
65 int (*setup)(void __iomem *base, int freq);
68 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
70 struct omap2_onenand *c = data;
72 complete(&c->dma_done);
75 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
77 struct omap2_onenand *c = dev_id;
79 complete(&c->irq_done);
81 return IRQ_HANDLED;
84 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
86 return readw(c->onenand.base + reg);
89 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
90 int reg)
92 writew(value, c->onenand.base + reg);
95 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
97 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
98 msg, state, ctrl, intr);
101 static void wait_warn(char *msg, int state, unsigned int ctrl,
102 unsigned int intr)
104 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
105 "intr 0x%04x\n", msg, state, ctrl, intr);
108 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
110 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
111 unsigned int intr = 0;
112 unsigned int ctrl;
113 unsigned long timeout;
114 u32 syscfg;
116 if (state == FL_RESETING) {
117 int i;
119 for (i = 0; i < 20; i++) {
120 udelay(1);
121 intr = read_reg(c, ONENAND_REG_INTERRUPT);
122 if (intr & ONENAND_INT_MASTER)
123 break;
125 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
126 if (ctrl & ONENAND_CTRL_ERROR) {
127 wait_err("controller error", state, ctrl, intr);
128 return -EIO;
130 if (!(intr & ONENAND_INT_RESET)) {
131 wait_err("timeout", state, ctrl, intr);
132 return -EIO;
134 return 0;
137 if (state != FL_READING) {
138 int result;
140 /* Turn interrupts on */
141 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
142 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
143 syscfg |= ONENAND_SYS_CFG1_IOBE;
144 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
145 if (cpu_is_omap34xx())
146 /* Add a delay to let GPIO settle */
147 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
150 INIT_COMPLETION(c->irq_done);
151 if (c->gpio_irq) {
152 result = gpio_get_value(c->gpio_irq);
153 if (result == -1) {
154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
155 intr = read_reg(c, ONENAND_REG_INTERRUPT);
156 wait_err("gpio error", state, ctrl, intr);
157 return -EIO;
159 } else
160 result = 0;
161 if (result == 0) {
162 int retry_cnt = 0;
163 retry:
164 result = wait_for_completion_timeout(&c->irq_done,
165 msecs_to_jiffies(20));
166 if (result == 0) {
167 /* Timeout after 20ms */
168 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
169 if (ctrl & ONENAND_CTRL_ONGO) {
171 * The operation seems to be still going
172 * so give it some more time.
174 retry_cnt += 1;
175 if (retry_cnt < 3)
176 goto retry;
177 intr = read_reg(c,
178 ONENAND_REG_INTERRUPT);
179 wait_err("timeout", state, ctrl, intr);
180 return -EIO;
182 intr = read_reg(c, ONENAND_REG_INTERRUPT);
183 if ((intr & ONENAND_INT_MASTER) == 0)
184 wait_warn("timeout", state, ctrl, intr);
187 } else {
188 int retry_cnt = 0;
190 /* Turn interrupts off */
191 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
192 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
193 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
195 timeout = jiffies + msecs_to_jiffies(20);
196 while (1) {
197 if (time_before(jiffies, timeout)) {
198 intr = read_reg(c, ONENAND_REG_INTERRUPT);
199 if (intr & ONENAND_INT_MASTER)
200 break;
201 } else {
202 /* Timeout after 20ms */
203 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
204 if (ctrl & ONENAND_CTRL_ONGO) {
206 * The operation seems to be still going
207 * so give it some more time.
209 retry_cnt += 1;
210 if (retry_cnt < 3) {
211 timeout = jiffies +
212 msecs_to_jiffies(20);
213 continue;
216 break;
221 intr = read_reg(c, ONENAND_REG_INTERRUPT);
222 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
224 if (intr & ONENAND_INT_READ) {
225 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
227 if (ecc) {
228 unsigned int addr1, addr8;
230 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
231 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
232 if (ecc & ONENAND_ECC_2BIT_ALL) {
233 printk(KERN_ERR "onenand_wait: ECC error = "
234 "0x%04x, addr1 %#x, addr8 %#x\n",
235 ecc, addr1, addr8);
236 mtd->ecc_stats.failed++;
237 return -EBADMSG;
238 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
239 printk(KERN_NOTICE "onenand_wait: correctable "
240 "ECC error = 0x%04x, addr1 %#x, "
241 "addr8 %#x\n", ecc, addr1, addr8);
242 mtd->ecc_stats.corrected++;
245 } else if (state == FL_READING) {
246 wait_err("timeout", state, ctrl, intr);
247 return -EIO;
250 if (ctrl & ONENAND_CTRL_ERROR) {
251 wait_err("controller error", state, ctrl, intr);
252 if (ctrl & ONENAND_CTRL_LOCK)
253 printk(KERN_ERR "onenand_wait: "
254 "Device is write protected!!!\n");
255 return -EIO;
258 if (ctrl & 0xFE9F)
259 wait_warn("unexpected controller status", state, ctrl, intr);
261 return 0;
264 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
266 struct onenand_chip *this = mtd->priv;
268 if (ONENAND_CURRENT_BUFFERRAM(this)) {
269 if (area == ONENAND_DATARAM)
270 return mtd->writesize;
271 if (area == ONENAND_SPARERAM)
272 return mtd->oobsize;
275 return 0;
278 #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
280 static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
281 unsigned char *buffer, int offset,
282 size_t count)
284 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
285 struct onenand_chip *this = mtd->priv;
286 dma_addr_t dma_src, dma_dst;
287 int bram_offset;
288 unsigned long timeout;
289 void *buf = (void *)buffer;
290 size_t xtra;
291 volatile unsigned *done;
293 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
294 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
295 goto out_copy;
297 if (buf >= high_memory) {
298 struct page *p1;
300 if (((size_t)buf & PAGE_MASK) !=
301 ((size_t)(buf + count - 1) & PAGE_MASK))
302 goto out_copy;
303 p1 = vmalloc_to_page(buf);
304 if (!p1)
305 goto out_copy;
306 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
309 xtra = count & 3;
310 if (xtra) {
311 count -= xtra;
312 memcpy(buf + count, this->base + bram_offset + count, xtra);
315 dma_src = c->phys_base + bram_offset;
316 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
317 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
318 dev_err(&c->pdev->dev,
319 "Couldn't DMA map a %d byte buffer\n",
320 count);
321 goto out_copy;
324 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
325 count >> 2, 1, 0, 0, 0);
326 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
327 dma_src, 0, 0);
328 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
329 dma_dst, 0, 0);
331 INIT_COMPLETION(c->dma_done);
332 omap_start_dma(c->dma_channel);
334 timeout = jiffies + msecs_to_jiffies(20);
335 done = &c->dma_done.done;
336 while (time_before(jiffies, timeout))
337 if (*done)
338 break;
340 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
342 if (!*done) {
343 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
344 goto out_copy;
347 return 0;
349 out_copy:
350 memcpy(buf, this->base + bram_offset, count);
351 return 0;
354 static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
355 const unsigned char *buffer,
356 int offset, size_t count)
358 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
359 struct onenand_chip *this = mtd->priv;
360 dma_addr_t dma_src, dma_dst;
361 int bram_offset;
362 unsigned long timeout;
363 void *buf = (void *)buffer;
364 volatile unsigned *done;
366 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
367 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
368 goto out_copy;
370 /* panic_write() may be in an interrupt context */
371 if (in_interrupt())
372 goto out_copy;
374 if (buf >= high_memory) {
375 struct page *p1;
377 if (((size_t)buf & PAGE_MASK) !=
378 ((size_t)(buf + count - 1) & PAGE_MASK))
379 goto out_copy;
380 p1 = vmalloc_to_page(buf);
381 if (!p1)
382 goto out_copy;
383 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
386 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
387 dma_dst = c->phys_base + bram_offset;
388 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
389 dev_err(&c->pdev->dev,
390 "Couldn't DMA map a %d byte buffer\n",
391 count);
392 return -1;
395 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
396 count >> 2, 1, 0, 0, 0);
397 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
398 dma_src, 0, 0);
399 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
400 dma_dst, 0, 0);
402 INIT_COMPLETION(c->dma_done);
403 omap_start_dma(c->dma_channel);
405 timeout = jiffies + msecs_to_jiffies(20);
406 done = &c->dma_done.done;
407 while (time_before(jiffies, timeout))
408 if (*done)
409 break;
411 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
413 if (!*done) {
414 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
415 goto out_copy;
418 return 0;
420 out_copy:
421 memcpy(this->base + bram_offset, buf, count);
422 return 0;
425 #else
427 int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
428 unsigned char *buffer, int offset,
429 size_t count);
431 int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
432 const unsigned char *buffer,
433 int offset, size_t count);
435 #endif
437 #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
439 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
440 unsigned char *buffer, int offset,
441 size_t count)
443 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
444 struct onenand_chip *this = mtd->priv;
445 dma_addr_t dma_src, dma_dst;
446 int bram_offset;
448 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
449 /* DMA is not used. Revisit PM requirements before enabling it. */
450 if (1 || (c->dma_channel < 0) ||
451 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
452 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
453 memcpy(buffer, (__force void *)(this->base + bram_offset),
454 count);
455 return 0;
458 dma_src = c->phys_base + bram_offset;
459 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
460 DMA_FROM_DEVICE);
461 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
462 dev_err(&c->pdev->dev,
463 "Couldn't DMA map a %d byte buffer\n",
464 count);
465 return -1;
468 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
469 count / 4, 1, 0, 0, 0);
470 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
471 dma_src, 0, 0);
472 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
473 dma_dst, 0, 0);
475 INIT_COMPLETION(c->dma_done);
476 omap_start_dma(c->dma_channel);
477 wait_for_completion(&c->dma_done);
479 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
481 return 0;
484 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
485 const unsigned char *buffer,
486 int offset, size_t count)
488 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
489 struct onenand_chip *this = mtd->priv;
490 dma_addr_t dma_src, dma_dst;
491 int bram_offset;
493 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
494 /* DMA is not used. Revisit PM requirements before enabling it. */
495 if (1 || (c->dma_channel < 0) ||
496 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
497 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
498 memcpy((__force void *)(this->base + bram_offset), buffer,
499 count);
500 return 0;
503 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
504 DMA_TO_DEVICE);
505 dma_dst = c->phys_base + bram_offset;
506 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
507 dev_err(&c->pdev->dev,
508 "Couldn't DMA map a %d byte buffer\n",
509 count);
510 return -1;
513 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
514 count / 2, 1, 0, 0, 0);
515 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
516 dma_src, 0, 0);
517 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
518 dma_dst, 0, 0);
520 INIT_COMPLETION(c->dma_done);
521 omap_start_dma(c->dma_channel);
522 wait_for_completion(&c->dma_done);
524 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
526 return 0;
529 #else
531 int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
532 unsigned char *buffer, int offset,
533 size_t count);
535 int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
536 const unsigned char *buffer,
537 int offset, size_t count);
539 #endif
541 static struct platform_driver omap2_onenand_driver;
543 static int __adjust_timing(struct device *dev, void *data)
545 int ret = 0;
546 struct omap2_onenand *c;
548 c = dev_get_drvdata(dev);
550 BUG_ON(c->setup == NULL);
552 /* DMA is not in use so this is all that is needed */
553 /* Revisit for OMAP3! */
554 ret = c->setup(c->onenand.base, c->freq);
556 return ret;
559 int omap2_onenand_rephase(void)
561 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
562 NULL, __adjust_timing);
565 static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
567 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
569 /* With certain content in the buffer RAM, the OMAP boot ROM code
570 * can recognize the flash chip incorrectly. Zero it out before
571 * soft reset.
573 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
576 static int __devinit omap2_onenand_probe(struct platform_device *pdev)
578 struct omap_onenand_platform_data *pdata;
579 struct omap2_onenand *c;
580 int r;
582 pdata = pdev->dev.platform_data;
583 if (pdata == NULL) {
584 dev_err(&pdev->dev, "platform data missing\n");
585 return -ENODEV;
588 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
589 if (!c)
590 return -ENOMEM;
592 init_completion(&c->irq_done);
593 init_completion(&c->dma_done);
594 c->gpmc_cs = pdata->cs;
595 c->gpio_irq = pdata->gpio_irq;
596 c->dma_channel = pdata->dma_channel;
597 if (c->dma_channel < 0) {
598 /* if -1, don't use DMA */
599 c->gpio_irq = 0;
602 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
603 if (r < 0) {
604 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
605 goto err_kfree;
608 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
609 pdev->dev.driver->name) == NULL) {
610 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
611 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
612 r = -EBUSY;
613 goto err_free_cs;
615 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
616 if (c->onenand.base == NULL) {
617 r = -ENOMEM;
618 goto err_release_mem_region;
621 if (pdata->onenand_setup != NULL) {
622 r = pdata->onenand_setup(c->onenand.base, c->freq);
623 if (r < 0) {
624 dev_err(&pdev->dev, "Onenand platform setup failed: "
625 "%d\n", r);
626 goto err_iounmap;
628 c->setup = pdata->onenand_setup;
631 if (c->gpio_irq) {
632 if ((r = omap_request_gpio(c->gpio_irq)) < 0) {
633 dev_err(&pdev->dev, "Failed to request GPIO%d for "
634 "OneNAND\n", c->gpio_irq);
635 goto err_iounmap;
637 gpio_direction_input(c->gpio_irq);
639 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
641 pdev->dev.driver->name, c)) < 0)
642 goto err_release_gpio;
645 if (c->dma_channel >= 0) {
646 r = omap_request_dma(0, pdev->dev.driver->name,
647 omap2_onenand_dma_cb, (void *) c,
648 &c->dma_channel);
649 if (r == 0) {
650 omap_set_dma_write_mode(c->dma_channel,
651 OMAP_DMA_WRITE_NON_POSTED);
652 omap_set_dma_src_data_pack(c->dma_channel, 1);
653 omap_set_dma_src_burst_mode(c->dma_channel,
654 OMAP_DMA_DATA_BURST_8);
655 omap_set_dma_dest_data_pack(c->dma_channel, 1);
656 omap_set_dma_dest_burst_mode(c->dma_channel,
657 OMAP_DMA_DATA_BURST_8);
658 } else {
659 dev_info(&pdev->dev,
660 "failed to allocate DMA for OneNAND, "
661 "using PIO instead\n");
662 c->dma_channel = -1;
666 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
667 "base %p\n", c->gpmc_cs, c->phys_base,
668 c->onenand.base);
670 c->pdev = pdev;
671 c->mtd.name = dev_name(&pdev->dev);
672 c->mtd.priv = &c->onenand;
673 c->mtd.owner = THIS_MODULE;
675 if (c->dma_channel >= 0) {
676 struct onenand_chip *this = &c->onenand;
678 this->wait = omap2_onenand_wait;
679 if (cpu_is_omap34xx()) {
680 this->read_bufferram = omap3_onenand_read_bufferram;
681 this->write_bufferram = omap3_onenand_write_bufferram;
682 } else {
683 this->read_bufferram = omap2_onenand_read_bufferram;
684 this->write_bufferram = omap2_onenand_write_bufferram;
688 if ((r = onenand_scan(&c->mtd, 1)) < 0)
689 goto err_release_dma;
691 switch ((c->onenand.version_id >> 4) & 0xf) {
692 case 0:
693 c->freq = 40;
694 break;
695 case 1:
696 c->freq = 54;
697 break;
698 case 2:
699 c->freq = 66;
700 break;
701 case 3:
702 c->freq = 83;
703 break;
706 #ifdef CONFIG_MTD_PARTITIONS
707 if (pdata->parts != NULL)
708 r = add_mtd_partitions(&c->mtd, pdata->parts,
709 pdata->nr_parts);
710 else
711 #endif
712 r = add_mtd_device(&c->mtd);
713 if (r < 0)
714 goto err_release_onenand;
716 platform_set_drvdata(pdev, c);
718 return 0;
720 err_release_onenand:
721 onenand_release(&c->mtd);
722 err_release_dma:
723 if (c->dma_channel != -1)
724 omap_free_dma(c->dma_channel);
725 if (c->gpio_irq)
726 free_irq(gpio_to_irq(c->gpio_irq), c);
727 err_release_gpio:
728 if (c->gpio_irq)
729 omap_free_gpio(c->gpio_irq);
730 err_iounmap:
731 iounmap(c->onenand.base);
732 err_release_mem_region:
733 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
734 err_free_cs:
735 gpmc_cs_free(c->gpmc_cs);
736 err_kfree:
737 kfree(c);
739 return r;
742 static int __devexit omap2_onenand_remove(struct platform_device *pdev)
744 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
746 BUG_ON(c == NULL);
748 #ifdef CONFIG_MTD_PARTITIONS
749 if (c->parts)
750 del_mtd_partitions(&c->mtd);
751 else
752 del_mtd_device(&c->mtd);
753 #else
754 del_mtd_device(&c->mtd);
755 #endif
757 onenand_release(&c->mtd);
758 if (c->dma_channel != -1)
759 omap_free_dma(c->dma_channel);
760 omap2_onenand_shutdown(pdev);
761 platform_set_drvdata(pdev, NULL);
762 if (c->gpio_irq) {
763 free_irq(gpio_to_irq(c->gpio_irq), c);
764 omap_free_gpio(c->gpio_irq);
766 iounmap(c->onenand.base);
767 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
768 kfree(c);
770 return 0;
773 static struct platform_driver omap2_onenand_driver = {
774 .probe = omap2_onenand_probe,
775 .remove = omap2_onenand_remove,
776 .shutdown = omap2_onenand_shutdown,
777 .driver = {
778 .name = DRIVER_NAME,
779 .owner = THIS_MODULE,
783 static int __init omap2_onenand_init(void)
785 printk(KERN_INFO "OneNAND driver initializing\n");
786 return platform_driver_register(&omap2_onenand_driver);
789 static void __exit omap2_onenand_exit(void)
791 platform_driver_unregister(&omap2_onenand_driver);
794 module_init(omap2_onenand_init);
795 module_exit(omap2_onenand_exit);
797 MODULE_ALIAS(DRIVER_NAME);
798 MODULE_LICENSE("GPL");
799 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
800 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");