2 * drivers/mtd/nand/gpio.c
4 * Updated, and converted to generic GPIO based driver by Russell King.
6 * Written by Ben Dooks <ben@simtec.co.uk>
7 * Based on 2.4 version by Mark Whittaker
9 * © 2004 Simtec Electronics
11 * Device driver for NAND connected via GPIO
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/gpio.h>
26 #include <linux/mtd/mtd.h>
27 #include <linux/mtd/nand.h>
28 #include <linux/mtd/partitions.h>
29 #include <linux/mtd/nand-gpio.h>
31 #include <linux/of_address.h>
32 #include <linux/of_gpio.h>
35 void __iomem
*io_sync
;
36 struct mtd_info mtd_info
;
37 struct nand_chip nand_chip
;
38 struct gpio_nand_platdata plat
;
41 #define gpio_nand_getpriv(x) container_of(x, struct gpiomtd, mtd_info)
47 * Make sure the GPIO state changes occur in-order with writes to NAND
49 * Needed on PXA due to bus-reordering within the SoC itself (see section on
50 * I/O ordering in PXA manual (section 2.3, p35)
52 static void gpio_nand_dosync(struct gpiomtd
*gpiomtd
)
56 if (gpiomtd
->io_sync
) {
58 * Linux memory barriers don't cater for what's required here.
59 * What's required is what's here - a read from a separate
60 * region with a dependency on that read.
62 tmp
= readl(gpiomtd
->io_sync
);
63 asm volatile("mov %1, %0\n" : "=r" (tmp
) : "r" (tmp
));
67 static inline void gpio_nand_dosync(struct gpiomtd
*gpiomtd
) {}
70 static void gpio_nand_cmd_ctrl(struct mtd_info
*mtd
, int cmd
, unsigned int ctrl
)
72 struct gpiomtd
*gpiomtd
= gpio_nand_getpriv(mtd
);
74 gpio_nand_dosync(gpiomtd
);
76 if (ctrl
& NAND_CTRL_CHANGE
) {
77 gpio_set_value(gpiomtd
->plat
.gpio_nce
, !(ctrl
& NAND_NCE
));
78 gpio_set_value(gpiomtd
->plat
.gpio_cle
, !!(ctrl
& NAND_CLE
));
79 gpio_set_value(gpiomtd
->plat
.gpio_ale
, !!(ctrl
& NAND_ALE
));
80 gpio_nand_dosync(gpiomtd
);
82 if (cmd
== NAND_CMD_NONE
)
85 writeb(cmd
, gpiomtd
->nand_chip
.IO_ADDR_W
);
86 gpio_nand_dosync(gpiomtd
);
89 static void gpio_nand_writebuf(struct mtd_info
*mtd
, const u_char
*buf
, int len
)
91 struct nand_chip
*this = mtd
->priv
;
93 iowrite8_rep(this->IO_ADDR_W
, buf
, len
);
96 static void gpio_nand_readbuf(struct mtd_info
*mtd
, u_char
*buf
, int len
)
98 struct nand_chip
*this = mtd
->priv
;
100 ioread8_rep(this->IO_ADDR_R
, buf
, len
);
103 static void gpio_nand_writebuf16(struct mtd_info
*mtd
, const u_char
*buf
,
106 struct nand_chip
*this = mtd
->priv
;
108 if (IS_ALIGNED((unsigned long)buf
, 2)) {
109 iowrite16_rep(this->IO_ADDR_W
, buf
, len
>>1);
112 unsigned short *ptr
= (unsigned short *)buf
;
114 for (i
= 0; i
< len
; i
+= 2, ptr
++)
115 writew(*ptr
, this->IO_ADDR_W
);
119 static void gpio_nand_readbuf16(struct mtd_info
*mtd
, u_char
*buf
, int len
)
121 struct nand_chip
*this = mtd
->priv
;
123 if (IS_ALIGNED((unsigned long)buf
, 2)) {
124 ioread16_rep(this->IO_ADDR_R
, buf
, len
>>1);
127 unsigned short *ptr
= (unsigned short *)buf
;
129 for (i
= 0; i
< len
; i
+= 2, ptr
++)
130 *ptr
= readw(this->IO_ADDR_R
);
134 static int gpio_nand_devready(struct mtd_info
*mtd
)
136 struct gpiomtd
*gpiomtd
= gpio_nand_getpriv(mtd
);
138 if (gpio_is_valid(gpiomtd
->plat
.gpio_rdy
))
139 return gpio_get_value(gpiomtd
->plat
.gpio_rdy
);
145 static const struct of_device_id gpio_nand_id_table
[] = {
146 { .compatible
= "gpio-control-nand" },
149 MODULE_DEVICE_TABLE(of
, gpio_nand_id_table
);
151 static int gpio_nand_get_config_of(const struct device
*dev
,
152 struct gpio_nand_platdata
*plat
)
156 if (!of_property_read_u32(dev
->of_node
, "bank-width", &val
)) {
158 plat
->options
|= NAND_BUSWIDTH_16
;
159 } else if (val
!= 1) {
160 dev_err(dev
, "invalid bank-width %u\n", val
);
165 plat
->gpio_rdy
= of_get_gpio(dev
->of_node
, 0);
166 plat
->gpio_nce
= of_get_gpio(dev
->of_node
, 1);
167 plat
->gpio_ale
= of_get_gpio(dev
->of_node
, 2);
168 plat
->gpio_cle
= of_get_gpio(dev
->of_node
, 3);
169 plat
->gpio_nwp
= of_get_gpio(dev
->of_node
, 4);
171 if (!of_property_read_u32(dev
->of_node
, "chip-delay", &val
))
172 plat
->chip_delay
= val
;
177 static struct resource
*gpio_nand_get_io_sync_of(struct platform_device
*pdev
)
179 struct resource
*r
= devm_kzalloc(&pdev
->dev
, sizeof(*r
), GFP_KERNEL
);
182 if (!r
|| of_property_read_u64(pdev
->dev
.of_node
,
183 "gpio-control-nand,io-sync-reg", &addr
))
187 r
->end
= r
->start
+ 0x3;
188 r
->flags
= IORESOURCE_MEM
;
192 #else /* CONFIG_OF */
193 static inline int gpio_nand_get_config_of(const struct device
*dev
,
194 struct gpio_nand_platdata
*plat
)
199 static inline struct resource
*
200 gpio_nand_get_io_sync_of(struct platform_device
*pdev
)
204 #endif /* CONFIG_OF */
206 static inline int gpio_nand_get_config(const struct device
*dev
,
207 struct gpio_nand_platdata
*plat
)
209 int ret
= gpio_nand_get_config_of(dev
, plat
);
214 if (dev
->platform_data
) {
215 memcpy(plat
, dev
->platform_data
, sizeof(*plat
));
222 static inline struct resource
*
223 gpio_nand_get_io_sync(struct platform_device
*pdev
)
225 struct resource
*r
= gpio_nand_get_io_sync_of(pdev
);
230 return platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
233 static int gpio_nand_remove(struct platform_device
*dev
)
235 struct gpiomtd
*gpiomtd
= platform_get_drvdata(dev
);
236 struct resource
*res
;
238 nand_release(&gpiomtd
->mtd_info
);
240 res
= gpio_nand_get_io_sync(dev
);
241 iounmap(gpiomtd
->io_sync
);
243 release_mem_region(res
->start
, resource_size(res
));
245 res
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
246 iounmap(gpiomtd
->nand_chip
.IO_ADDR_R
);
247 release_mem_region(res
->start
, resource_size(res
));
249 if (gpio_is_valid(gpiomtd
->plat
.gpio_nwp
))
250 gpio_set_value(gpiomtd
->plat
.gpio_nwp
, 0);
251 gpio_set_value(gpiomtd
->plat
.gpio_nce
, 1);
253 gpio_free(gpiomtd
->plat
.gpio_cle
);
254 gpio_free(gpiomtd
->plat
.gpio_ale
);
255 gpio_free(gpiomtd
->plat
.gpio_nce
);
256 if (gpio_is_valid(gpiomtd
->plat
.gpio_nwp
))
257 gpio_free(gpiomtd
->plat
.gpio_nwp
);
258 if (gpio_is_valid(gpiomtd
->plat
.gpio_rdy
))
259 gpio_free(gpiomtd
->plat
.gpio_rdy
);
264 static void __iomem
*request_and_remap(struct resource
*res
, size_t size
,
265 const char *name
, int *err
)
269 if (!request_mem_region(res
->start
, resource_size(res
), name
)) {
274 ptr
= ioremap(res
->start
, size
);
276 release_mem_region(res
->start
, resource_size(res
));
282 static int gpio_nand_probe(struct platform_device
*dev
)
284 struct gpiomtd
*gpiomtd
;
285 struct nand_chip
*this;
286 struct resource
*res0
, *res1
;
287 struct mtd_part_parser_data ppdata
= {};
290 if (!dev
->dev
.of_node
&& !dev
->dev
.platform_data
)
293 res0
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
297 gpiomtd
= devm_kzalloc(&dev
->dev
, sizeof(*gpiomtd
), GFP_KERNEL
);
298 if (gpiomtd
== NULL
) {
299 dev_err(&dev
->dev
, "failed to create NAND MTD\n");
303 this = &gpiomtd
->nand_chip
;
304 this->IO_ADDR_R
= request_and_remap(res0
, 2, "NAND", &ret
);
305 if (!this->IO_ADDR_R
) {
306 dev_err(&dev
->dev
, "unable to map NAND\n");
310 res1
= gpio_nand_get_io_sync(dev
);
312 gpiomtd
->io_sync
= request_and_remap(res1
, 4, "NAND sync", &ret
);
313 if (!gpiomtd
->io_sync
) {
314 dev_err(&dev
->dev
, "unable to map sync NAND\n");
319 ret
= gpio_nand_get_config(&dev
->dev
, &gpiomtd
->plat
);
323 ret
= gpio_request(gpiomtd
->plat
.gpio_nce
, "NAND NCE");
326 gpio_direction_output(gpiomtd
->plat
.gpio_nce
, 1);
327 if (gpio_is_valid(gpiomtd
->plat
.gpio_nwp
)) {
328 ret
= gpio_request(gpiomtd
->plat
.gpio_nwp
, "NAND NWP");
331 gpio_direction_output(gpiomtd
->plat
.gpio_nwp
, 1);
333 ret
= gpio_request(gpiomtd
->plat
.gpio_ale
, "NAND ALE");
336 gpio_direction_output(gpiomtd
->plat
.gpio_ale
, 0);
337 ret
= gpio_request(gpiomtd
->plat
.gpio_cle
, "NAND CLE");
340 gpio_direction_output(gpiomtd
->plat
.gpio_cle
, 0);
341 if (gpio_is_valid(gpiomtd
->plat
.gpio_rdy
)) {
342 ret
= gpio_request(gpiomtd
->plat
.gpio_rdy
, "NAND RDY");
345 gpio_direction_input(gpiomtd
->plat
.gpio_rdy
);
349 this->IO_ADDR_W
= this->IO_ADDR_R
;
350 this->ecc
.mode
= NAND_ECC_SOFT
;
351 this->options
= gpiomtd
->plat
.options
;
352 this->chip_delay
= gpiomtd
->plat
.chip_delay
;
354 /* install our routines */
355 this->cmd_ctrl
= gpio_nand_cmd_ctrl
;
356 this->dev_ready
= gpio_nand_devready
;
358 if (this->options
& NAND_BUSWIDTH_16
) {
359 this->read_buf
= gpio_nand_readbuf16
;
360 this->write_buf
= gpio_nand_writebuf16
;
362 this->read_buf
= gpio_nand_readbuf
;
363 this->write_buf
= gpio_nand_writebuf
;
366 /* set the mtd private data for the nand driver */
367 gpiomtd
->mtd_info
.priv
= this;
368 gpiomtd
->mtd_info
.owner
= THIS_MODULE
;
370 if (nand_scan(&gpiomtd
->mtd_info
, 1)) {
371 dev_err(&dev
->dev
, "no nand chips found?\n");
376 if (gpiomtd
->plat
.adjust_parts
)
377 gpiomtd
->plat
.adjust_parts(&gpiomtd
->plat
,
378 gpiomtd
->mtd_info
.size
);
380 ppdata
.of_node
= dev
->dev
.of_node
;
381 ret
= mtd_device_parse_register(&gpiomtd
->mtd_info
, NULL
, &ppdata
,
383 gpiomtd
->plat
.num_parts
);
386 platform_set_drvdata(dev
, gpiomtd
);
391 if (gpio_is_valid(gpiomtd
->plat
.gpio_nwp
))
392 gpio_set_value(gpiomtd
->plat
.gpio_nwp
, 0);
393 if (gpio_is_valid(gpiomtd
->plat
.gpio_rdy
))
394 gpio_free(gpiomtd
->plat
.gpio_rdy
);
396 gpio_free(gpiomtd
->plat
.gpio_cle
);
398 gpio_free(gpiomtd
->plat
.gpio_ale
);
400 if (gpio_is_valid(gpiomtd
->plat
.gpio_nwp
))
401 gpio_free(gpiomtd
->plat
.gpio_nwp
);
403 gpio_free(gpiomtd
->plat
.gpio_nce
);
405 iounmap(gpiomtd
->io_sync
);
407 release_mem_region(res1
->start
, resource_size(res1
));
409 iounmap(gpiomtd
->nand_chip
.IO_ADDR_R
);
410 release_mem_region(res0
->start
, resource_size(res0
));
415 static struct platform_driver gpio_nand_driver
= {
416 .probe
= gpio_nand_probe
,
417 .remove
= gpio_nand_remove
,
420 .of_match_table
= of_match_ptr(gpio_nand_id_table
),
424 module_platform_driver(gpio_nand_driver
);
426 MODULE_LICENSE("GPL");
427 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
428 MODULE_DESCRIPTION("GPIO NAND Driver");