2 * Broadcom SiliconBackplane chipcommon serial flash interface
4 * Copyright (C) 2012, Broadcom Corporation. All Rights Reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <linux/version.h>
23 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
24 #include <linux/config.h>
27 #include <linux/reciprocal_div.h>
28 #include <linux/module.h>
29 #include <linux/slab.h>
30 #include <linux/ioport.h>
31 #include <linux/mtd/mtd.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
36 #include <linux/mtd/compatmac.h>
38 /* #include <linux/mtd/nand.h> */
41 #include <linux/errno.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
58 #ifdef CONFIG_MTD_PARTITIONS
59 extern struct mtd_partition
*
60 init_nflash_mtd_partitions(hndnand_t
*nfl
, struct mtd_info
*mtd
, size_t size
);
62 struct mtd_partition
*nflash_parts
;
65 /* Mutexing is version-dependent */
66 extern struct nand_hw_control
*nand_hwcontrol_lock_init(void);
72 struct nand_hw_control
*controller
;
73 struct mtd_erase_region_info region
;
77 /* Private global state */
78 static struct nflash_mtd nflash
;
80 static int _nflash_get_device(struct nflash_mtd
*nflash
);
81 static void _nflash_release_device(struct nflash_mtd
*nflash
);
83 #define NFLASH_LOCK(nflash) _nflash_get_device(nflash)
84 #define NFLASH_UNLOCK(nflash) _nflash_release_device(nflash)
87 _nflash_get_device(struct nflash_mtd
*nflash
)
89 spinlock_t
*lock
= &nflash
->controller
->lock
;
90 wait_queue_head_t
*wq
= &nflash
->controller
->wq
;
91 struct nand_chip
*chip
;
92 DECLARE_WAITQUEUE(wait
, current
);
97 chip
= nflash
->controller
->active
;
98 if (!chip
|| chip
->state
== FL_READY
)
101 set_current_state(TASK_UNINTERRUPTIBLE
);
102 add_wait_queue(wq
, &wait
);
105 remove_wait_queue(wq
, &wait
);
110 _nflash_release_device(struct nflash_mtd
*nflash
)
112 wake_up(&nflash
->controller
->wq
);
113 spin_unlock(&nflash
->controller
->lock
);
117 _nflash_mtd_read(struct mtd_info
*mtd
, struct mtd_partition
*part
,
118 loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
120 struct nflash_mtd
*nflash
= (struct nflash_mtd
*) mtd
->priv
;
123 uchar
*tmpbuf
= NULL
;
125 uint offset
, blocksize
, mask
, blk_offset
, off
;
126 uint skip_bytes
= 0, good_bytes
= 0, page_size
;
131 /* Locate the part */
133 for (i
= 0; nflash_parts
[i
].name
; i
++) {
134 if (from
>= nflash_parts
[i
].offset
&&
135 ((nflash_parts
[i
+1].name
== NULL
) || (from
< nflash_parts
[i
+1].offset
))) {
136 part
= &nflash_parts
[i
];
143 /* Check address range */
146 if ((from
+ len
) > mtd
->size
)
149 page_size
= nflash
->nfl
->pagesize
;
150 if ((offset
& (page_size
- 1)) != 0) {
151 extra
= offset
& (page_size
- 1);
156 size
= (len
+ (page_size
- 1)) & ~(page_size
- 1);
162 tmpbuf
= (uchar
*)kmalloc(size
, GFP_KERNEL
);
166 blocksize
= mtd
->erasesize
;
167 mask
= blocksize
- 1;
168 blk_offset
= offset
& ~mask
;
169 good_bytes
= part
->offset
& ~mask
;
170 /* Check and skip bad blocks */
171 for (blk_idx
= good_bytes
/blocksize
; blk_idx
< mtd
->eraseregions
->numblocks
; blk_idx
++) {
172 if (nflash
->map
[blk_idx
] != 0) {
173 skip_bytes
+= blocksize
;
175 if (good_bytes
== blk_offset
)
177 good_bytes
+= blocksize
;
180 if (blk_idx
== mtd
->eraseregions
->numblocks
) {
184 blk_offset
= blocksize
* blk_idx
;
187 off
= offset
+ skip_bytes
;
189 /* Check and skip bad blocks */
190 if (off
>= (blk_offset
+ blocksize
)) {
191 blk_offset
+= blocksize
;
193 while ((nflash
->map
[blk_idx
] != 0) &&
194 (blk_offset
< mtd
->size
)) {
195 skip_bytes
+= blocksize
;
196 blk_offset
+= blocksize
;
199 if (blk_offset
>= mtd
->size
) {
203 off
= offset
+ skip_bytes
;
206 if ((bytes
= hndnand_read(nflash
->nfl
,
207 off
, page_size
, ptr
)) < 0) {
222 memcpy(buf
, tmpbuf
+extra
, *retlen
);
229 nflash_mtd_read(struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
232 struct nflash_mtd
*nflash
= (struct nflash_mtd
*) mtd
->priv
;
235 ret
= _nflash_mtd_read(mtd
, NULL
, from
, len
, retlen
, buf
);
236 NFLASH_UNLOCK(nflash
);
242 nflash_mtd_write(struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
244 struct nflash_mtd
*nflash
= (struct nflash_mtd
*) mtd
->priv
;
246 struct mtd_partition
*part
= NULL
;
247 u_char
*block
= NULL
;
248 u_char
*ptr
= (u_char
*)buf
;
249 uint offset
, blocksize
, mask
, blk_offset
, off
;
250 uint skip_bytes
= 0, good_bytes
= 0;
252 int read_len
, write_len
, copy_len
= 0;
256 uint r_blocksize
, part_blk_start
, part_blk_end
;
258 /* Locate the part */
259 for (i
= 0; nflash_parts
[i
].name
; i
++) {
260 if (to
>= nflash_parts
[i
].offset
&&
261 ((nflash_parts
[i
+1].name
== NULL
) ||
262 (to
< (nflash_parts
[i
].offset
+ nflash_parts
[i
].size
)))) {
263 part
= &nflash_parts
[i
];
269 /* Check address range */
272 if ((to
+ len
) > (part
->offset
+ part
->size
))
275 blocksize
= mtd
->erasesize
;
276 r_blocksize
= reciprocal_value(blocksize
);
278 if (!(block
= kmalloc(blocksize
, GFP_KERNEL
)))
283 mask
= blocksize
- 1;
284 /* Check and skip bad blocks */
285 blk_offset
= offset
& ~mask
;
286 good_bytes
= part
->offset
& ~mask
;
287 part_blk_start
= reciprocal_divide(good_bytes
, r_blocksize
);
288 part_blk_end
= reciprocal_divide(part
->offset
+ part
->size
, r_blocksize
);
290 for (blk_idx
= part_blk_start
; blk_idx
< part_blk_end
; blk_idx
++) {
291 if (nflash
->map
[blk_idx
] != 0) {
292 skip_bytes
+= blocksize
;
294 if (good_bytes
== blk_offset
)
296 good_bytes
+= blocksize
;
299 if (blk_idx
== part_blk_end
) {
303 blk_offset
= blocksize
* blk_idx
;
304 /* Backup and erase one block at a time */
309 from
= offset
& ~mask
;
310 /* Copy existing data into holding block if necessary */
311 if (((offset
& (blocksize
-1)) != 0) || (len
< blocksize
)) {
312 ret
= _nflash_mtd_read(mtd
, part
, from
, blocksize
,
316 if (read_len
!= blocksize
) {
321 /* Copy input data into holding block */
322 copy_len
= min(len
, blocksize
- (offset
& mask
));
323 memcpy(block
+ (offset
& mask
), ptr
, copy_len
);
325 off
= (uint
) from
+ skip_bytes
;
327 if ((ret
= hndnand_erase(nflash
->nfl
, off
)) < 0) {
328 hndnand_mark_badb(nflash
->nfl
, off
);
329 nflash
->map
[blk_idx
] = 1;
330 skip_bytes
+= blocksize
;
334 /* Write holding block */
336 write_len
= blocksize
;
338 if ((bytes
= hndnand_write(nflash
->nfl
,
339 from
+ skip_bytes
, (uint
) write_len
,
340 (uchar
*) write_ptr
)) < 0) {
341 hndnand_mark_badb(nflash
->nfl
, off
);
342 nflash
->map
[blk_idx
] = 1;
343 skip_bytes
+= blocksize
;
359 /* Check and skip bad blocks */
361 blk_offset
+= blocksize
;
363 while ((nflash
->map
[blk_idx
] != 0) &&
364 (blk_offset
< (part
->offset
+part
->size
))) {
365 skip_bytes
+= blocksize
;
366 blk_offset
+= blocksize
;
369 if (blk_offset
>= (part
->offset
+part
->size
)) {
376 NFLASH_UNLOCK(nflash
);
384 nflash_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*erase
)
386 struct nflash_mtd
*nflash
= (struct nflash_mtd
*) mtd
->priv
;
387 struct mtd_partition
*part
= NULL
;
389 uint addr
, len
, blocksize
;
390 uint part_start_blk
, part_end_blk
;
391 uint blknum
, new_addr
, erase_blknum
;
392 uint reciprocal_blocksize
;
397 blocksize
= mtd
->erasesize
;
398 reciprocal_blocksize
= reciprocal_value(blocksize
);
400 /* Check address range */
404 if ((addr
+ len
) > mtd
->size
)
407 if (addr
& (blocksize
- 1))
410 /* Locate the part */
411 for (i
= 0; nflash_parts
[i
].name
; i
++) {
412 if (addr
>= nflash_parts
[i
].offset
&&
413 ((addr
+ len
) <= (nflash_parts
[i
].offset
+ nflash_parts
[i
].size
))) {
414 part
= &nflash_parts
[i
];
424 /* Find the effective start block address to erase */
425 part_start_blk
= reciprocal_divide(part
->offset
& ~(blocksize
-1),
426 reciprocal_blocksize
);
427 part_end_blk
= reciprocal_divide(((part
->offset
+ part
->size
) + (blocksize
-1)),
428 reciprocal_blocksize
);
430 new_addr
= part_start_blk
* blocksize
;
431 /* The block number to be skipped relative to the start address of
434 blknum
= reciprocal_divide(addr
- new_addr
, reciprocal_blocksize
);
436 for (i
= part_start_blk
; (i
< part_end_blk
) && (blknum
> 0); i
++) {
437 if (nflash
->map
[i
] != 0) {
438 new_addr
+= blocksize
;
440 new_addr
+= blocksize
;
445 /* Erase the blocks from the new block address */
446 erase_blknum
= reciprocal_divide(len
+ (blocksize
-1), reciprocal_blocksize
);
448 if ((new_addr
+ (erase_blknum
* blocksize
)) > (part
->offset
+ part
->size
)) {
453 for (i
= new_addr
; erase_blknum
; i
+= blocksize
) {
454 /* Skip bad block erase */
455 uint j
= reciprocal_divide(i
, reciprocal_blocksize
);
456 if (nflash
->map
[j
] != 0) {
460 if ((ret
= hndnand_erase(nflash
->nfl
, i
)) < 0) {
461 hndnand_mark_badb(nflash
->nfl
, i
);
462 nflash
->map
[i
/ blocksize
] = 1;
469 /* Set erase status */
471 erase
->state
= MTD_ERASE_FAILED
;
473 erase
->state
= MTD_ERASE_DONE
;
475 NFLASH_UNLOCK(nflash
);
477 /* Call erase callback */
479 erase
->callback(erase
);
484 #if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
485 #define nflash_mtd_init init_module
486 #define nflash_mtd_exit cleanup_module
490 nflash_mtd_init(void)
494 #ifdef CONFIG_MTD_PARTITIONS
495 struct mtd_partition
*parts
;
499 memset(&nflash
, 0, sizeof(struct nflash_mtd
));
501 /* attach to the backplane */
502 if (!(nflash
.sih
= si_kattach(SI_OSH
))) {
503 printk(KERN_ERR
"nflash: error attaching to backplane\n");
508 /* Initialize serial flash access */
509 if (!(info
= hndnand_init(nflash
.sih
))) {
510 printk(KERN_ERR
"nflash: found no supported devices\n");
516 /* Setup region info */
517 nflash
.region
.offset
= 0;
518 nflash
.region
.erasesize
= info
->blocksize
;
519 nflash
.region
.numblocks
= info
->numblocks
;
520 if (nflash
.region
.erasesize
> nflash
.mtd
.erasesize
)
521 nflash
.mtd
.erasesize
= nflash
.region
.erasesize
;
522 /* At most 2GB is supported */
523 nflash
.mtd
.size
= (info
->size
>= (1 << 11)) ? (1 << 31) : (info
->size
<< 20);
524 nflash
.mtd
.numeraseregions
= 1;
525 nflash
.map
= (unsigned char *)kmalloc(info
->numblocks
, GFP_KERNEL
);
527 memset(nflash
.map
, 0, info
->numblocks
);
529 /* Register with MTD */
530 nflash
.mtd
.name
= "nflash";
531 nflash
.mtd
.type
= MTD_NANDFLASH
;
532 nflash
.mtd
.flags
= MTD_CAP_NANDFLASH
;
533 nflash
.mtd
.eraseregions
= &nflash
.region
;
534 nflash
.mtd
.erase
= nflash_mtd_erase
;
535 nflash
.mtd
.read
= nflash_mtd_read
;
536 nflash
.mtd
.write
= nflash_mtd_write
;
537 nflash
.mtd
.writesize
= info
->pagesize
;
538 nflash
.mtd
.priv
= &nflash
;
539 nflash
.mtd
.owner
= THIS_MODULE
;
540 nflash
.controller
= nand_hwcontrol_lock_init();
541 if (!nflash
.controller
)
545 NFLASH_LOCK(&nflash
);
546 for (i
= 0; i
< info
->numblocks
; i
++) {
547 if (hndnand_checkbadb(nflash
.nfl
, (i
* info
->blocksize
)) != 0) {
551 NFLASH_UNLOCK(&nflash
);
553 #ifdef CONFIG_MTD_PARTITIONS
554 parts
= init_nflash_mtd_partitions(info
, &nflash
.mtd
, nflash
.mtd
.size
);
558 for (i
= 0; parts
[i
].name
; i
++)
561 ret
= add_mtd_partitions(&nflash
.mtd
, parts
, i
);
563 printk(KERN_ERR
"nflash: add_mtd failed\n");
566 nflash_parts
= parts
;
575 nflash_mtd_exit(void)
577 #ifdef CONFIG_MTD_PARTITIONS
578 del_mtd_partitions(&nflash
.mtd
);
580 del_mtd_device(&nflash
.mtd
);
584 module_init(nflash_mtd_init
);
585 module_exit(nflash_mtd_exit
);