2 * Broadcom SiliconBackplane chipcommon serial flash interface
4 * Copyright (C) 2010, Broadcom Corporation. All Rights Reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <linux/config.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/ioport.h>
25 #include <linux/mtd/compatmac.h>
26 #include <linux/mtd/mtd.h>
27 #include <linux/mtd/partitions.h>
28 #include <linux/errno.h>
29 #include <linux/pci.h>
30 #include <linux/delay.h>
45 #ifdef CONFIG_MTD_PARTITIONS
46 extern struct mtd_partition
* init_nflash_mtd_partitions(struct mtd_info
*mtd
, size_t size
);
48 struct mtd_partition
*nflash_parts
;
51 extern struct mutex
*partitions_mutex_init(void);
57 struct mtd_erase_region_info region
;
61 /* Private global state */
62 static struct nflash_mtd nflash
;
65 _nflash_mtd_read(struct mtd_info
*mtd
, struct mtd_partition
*part
,
66 loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
68 struct nflash_mtd
*nflash
= (struct nflash_mtd
*) mtd
->priv
;
73 uint offset
, blocksize
, mask
, blk_offset
, off
;
74 uint skip_bytes
= 0, good_bytes
= 0;
81 for (i
= 0; nflash_parts
[i
].name
; i
++) {
82 if (from
>= nflash_parts
[i
].offset
&&
83 ((nflash_parts
[i
+1].name
== NULL
) || (from
< nflash_parts
[i
+1].offset
))) {
84 part
= &nflash_parts
[i
];
91 /* Check address range */
94 if ((from
+ len
) > mtd
->size
)
97 if ((offset
& (NFL_SECTOR_SIZE
- 1)) != 0) {
98 extra
= offset
& (NFL_SECTOR_SIZE
- 1);
103 size
= (len
+ (NFL_SECTOR_SIZE
- 1)) & ~(NFL_SECTOR_SIZE
- 1);
109 tmpbuf
= (uchar
*)kmalloc(size
, GFP_KERNEL
);
113 blocksize
= mtd
->erasesize
;
114 mask
= blocksize
- 1;
115 blk_offset
= offset
& ~mask
;
116 good_bytes
= part
->offset
& ~mask
;
117 /* Check and skip bad blocks */
118 for (blk_idx
= good_bytes
/blocksize
; blk_idx
< mtd
->eraseregions
->numblocks
; blk_idx
++) {
119 if ((nflash
->map
[blk_idx
] != 0) ||
120 (nflash_checkbadb(nflash
->sih
, nflash
->cc
, (blocksize
*blk_idx
)) != 0)) {
121 skip_bytes
+= blocksize
;
122 nflash
->map
[blk_idx
] = 1;
124 if (good_bytes
== blk_offset
)
126 good_bytes
+= blocksize
;
129 if (blk_idx
== mtd
->eraseregions
->numblocks
) {
133 blk_offset
= blocksize
* blk_idx
;
136 off
= offset
+ skip_bytes
;
138 /* Check and skip bad blocks */
139 if (off
>= (blk_offset
+ blocksize
)) {
140 blk_offset
+= blocksize
;
142 while (((nflash
->map
[blk_idx
] != 0) ||
143 (nflash_checkbadb(nflash
->sih
, nflash
->cc
, blk_offset
) != 0)) &&
144 (blk_offset
< mtd
->size
)) {
145 skip_bytes
+= blocksize
;
146 nflash
->map
[blk_idx
] = 1;
147 blk_offset
+= blocksize
;
150 if (blk_offset
>= mtd
->size
) {
154 off
= offset
+ skip_bytes
;
157 if ((bytes
= nflash_read(nflash
->sih
, nflash
->cc
, off
, NFL_SECTOR_SIZE
, ptr
)) < 0) {
172 memcpy(buf
, tmpbuf
+extra
, *retlen
);
179 nflash_mtd_read(struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
183 mutex_lock(mtd
->mutex
);
184 ret
= _nflash_mtd_read(mtd
, NULL
, from
, len
, retlen
, buf
);
185 mutex_unlock(mtd
->mutex
);
191 nflash_mtd_write(struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
193 struct nflash_mtd
*nflash
= (struct nflash_mtd
*) mtd
->priv
;
195 struct mtd_partition
*part
= NULL
;
196 u_char
*block
= NULL
;
197 u_char
*ptr
= (u_char
*)buf
;
198 uint offset
, blocksize
, mask
, blk_offset
, off
;
199 uint skip_bytes
= 0, good_bytes
= 0;
201 int read_len
, write_len
, copy_len
= 0;
206 /* Locate the part */
207 for (i
= 0; nflash_parts
[i
].name
; i
++) {
208 if (to
>= nflash_parts
[i
].offset
&&
209 ((nflash_parts
[i
+1].name
== NULL
) ||
210 (to
< (nflash_parts
[i
].offset
+ nflash_parts
[i
].size
)))) {
211 part
= &nflash_parts
[i
];
217 /* Check address range */
220 if ((to
+ len
) > (part
->offset
+ part
->size
))
223 blocksize
= mtd
->erasesize
;
224 if (!(block
= kmalloc(blocksize
, GFP_KERNEL
)))
227 mutex_lock(mtd
->mutex
);
229 mask
= blocksize
- 1;
230 /* Check and skip bad blocks */
231 blk_offset
= offset
& ~mask
;
232 good_bytes
= part
->offset
& ~mask
;
233 for (blk_idx
= good_bytes
/blocksize
; blk_idx
< (part
->offset
+part
->size
)/blocksize
;
235 if ((nflash
->map
[blk_idx
] != 0) ||
236 (nflash_checkbadb(nflash
->sih
, nflash
->cc
, (blocksize
*blk_idx
)) != 0)) {
237 skip_bytes
+= blocksize
;
238 nflash
->map
[blk_idx
] = 1;
240 if (good_bytes
== blk_offset
)
242 good_bytes
+= blocksize
;
245 if (blk_idx
== (part
->offset
+part
->size
)/blocksize
) {
249 blk_offset
= blocksize
* blk_idx
;
250 /* Backup and erase one block at a time */
255 from
= offset
& ~mask
;
256 /* Copy existing data into holding block if necessary */
257 if (((offset
& (blocksize
-1)) != 0) || (len
< blocksize
)) {
258 ret
= _nflash_mtd_read(mtd
, part
, from
, blocksize
,
262 if (read_len
!= blocksize
) {
267 /* Copy input data into holding block */
268 copy_len
= min(len
, blocksize
- (offset
& mask
));
269 memcpy(block
+ (offset
& mask
), ptr
, copy_len
);
271 off
= (uint
) from
+ skip_bytes
;
273 if ((ret
= nflash_erase(nflash
->sih
, nflash
->cc
, off
)) < 0) {
274 nflash_mark_badb(nflash
->sih
, nflash
->cc
, off
);
275 nflash
->map
[blk_idx
] = 1;
276 skip_bytes
+= blocksize
;
280 /* Write holding block */
282 write_len
= blocksize
;
284 if ((bytes
= nflash_write(nflash
->sih
, nflash
->cc
,
285 (uint
) from
+ skip_bytes
, (uint
) write_len
,
286 (uchar
*) write_ptr
)) < 0) {
287 nflash_mark_badb(nflash
->sih
, nflash
->cc
, off
);
288 nflash
->map
[blk_idx
] = 1;
289 skip_bytes
+= blocksize
;
305 /* Check and skip bad blocks */
307 blk_offset
+= blocksize
;
309 while (((nflash
->map
[blk_idx
] != 0) ||
310 (nflash_checkbadb(nflash
->sih
, nflash
->cc
, blk_offset
) != 0)) &&
311 (blk_offset
< (part
->offset
+part
->size
))) {
312 skip_bytes
+= blocksize
;
313 nflash
->map
[blk_idx
] = 1;
314 blk_offset
+= blocksize
;
317 if (blk_offset
>= (part
->offset
+part
->size
)) {
324 mutex_unlock(mtd
->mutex
);
332 nflash_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*erase
)
334 struct nflash_mtd
*nflash
= (struct nflash_mtd
*) mtd
->priv
;
335 struct mtd_partition
*part
= NULL
;
337 uint addr
, len
, blocksize
;
338 uint part_start_blk
, part_end_blk
;
339 uint blknum
, new_addr
, erase_blknum
;
344 blocksize
= mtd
->erasesize
;
346 /* Check address range */
350 if ((addr
+ len
) > mtd
->size
)
353 if (addr
& (blocksize
- 1))
356 /* Locate the part */
357 for (i
= 0; nflash_parts
[i
].name
; i
++) {
358 if (addr
>= nflash_parts
[i
].offset
&&
359 ((addr
+ len
) <= (nflash_parts
[i
].offset
+ nflash_parts
[i
].size
))) {
360 part
= &nflash_parts
[i
];
368 mutex_lock(mtd
->mutex
);
370 /* Find the effective start block address to erase */
371 part_start_blk
= (part
->offset
& ~(blocksize
- 1)) / blocksize
;
372 part_end_blk
= ROUNDUP((part
->offset
+ part
->size
), blocksize
) / blocksize
;
374 new_addr
= part_start_blk
* blocksize
;
375 blknum
= (addr
/ blocksize
) - part_start_blk
;
377 for (i
= part_start_blk
; i
< part_end_blk
; i
++) {
378 if ((nflash
->map
[i
] != 0) ||
379 (nflash_checkbadb(nflash
->sih
, nflash
->cc
, i
* blocksize
) != 0)) {
381 new_addr
+= blocksize
;
385 new_addr
+= blocksize
;
394 /* Erase the blocks from the new block address */
395 erase_blknum
= ROUNDUP(len
, blocksize
) / blocksize
;
398 if ((new_addr
+ (erase_blknum
* blocksize
)) > (part
->offset
+ part
->size
)) {
404 for (i
= new_addr
; erase_blknum
; i
+= blocksize
) {
405 if (i
>= (part
->offset
+ part
->size
)) {
406 /* It indicates bad block, but do NOT treat it as fail */
407 printk(KERN_WARNING
"%s(%d):end of part %s (0x%x)!\n",
408 __FUNCTION__
, __LINE__
, part
->name
, i
);
413 /* Skip bad block erase */
414 if ((nflash
->map
[i
/ blocksize
] != 0) ||
415 (nflash_checkbadb(nflash
->sih
, nflash
->cc
, i
) != 0)) {
416 nflash
->map
[i
/ blocksize
] = 1;
420 if ((ret
= nflash_erase(nflash
->sih
, nflash
->cc
, i
)) < 0) {
421 nflash_mark_badb(nflash
->sih
, nflash
->cc
, i
);
422 nflash
->map
[i
/ blocksize
] = 1;
429 /* Set erase status */
431 erase
->state
= MTD_ERASE_FAILED
;
433 erase
->state
= MTD_ERASE_DONE
;
435 mutex_unlock(mtd
->mutex
);
437 /* Call erase callback */
439 erase
->callback(erase
);
444 #if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
445 #define nflash_mtd_init init_module
446 #define nflash_mtd_exit cleanup_module
450 nflash_mtd_init(void)
454 struct pci_dev
*dev
= NULL
;
455 #ifdef CONFIG_MTD_PARTITIONS
456 struct mtd_partition
*parts
;
460 list_for_each_entry(dev
, &((pci_find_bus(0, 0))->devices
), bus_list
) {
461 if ((dev
!= NULL
) && (dev
->device
== CC_CORE_ID
))
466 printk(KERN_ERR
"nflash: chipcommon not found\n");
470 memset(&nflash
, 0, sizeof(struct nflash_mtd
));
472 /* attach to the backplane */
473 if (!(nflash
.sih
= si_kattach(SI_OSH
))) {
474 printk(KERN_ERR
"nflash: error attaching to backplane\n");
479 /* Map registers and flash base */
480 if (!(nflash
.cc
= ioremap_nocache(
481 pci_resource_start(dev
, 0),
482 pci_resource_len(dev
, 0)))) {
483 printk(KERN_ERR
"nflash: error mapping registers\n");
488 /* Initialize serial flash access */
489 if (!(info
= nflash_init(nflash
.sih
, nflash
.cc
))) {
490 printk(KERN_ERR
"nflash: found no supported devices\n");
495 /* Setup region info */
496 nflash
.region
.offset
= 0;
497 nflash
.region
.erasesize
= info
->blocksize
;
498 nflash
.region
.numblocks
= info
->numblocks
;
499 if (nflash
.region
.erasesize
> nflash
.mtd
.erasesize
)
500 nflash
.mtd
.erasesize
= nflash
.region
.erasesize
;
501 /* At most 2GB is supported */
502 nflash
.mtd
.size
= (info
->size
>= (1 << 11)) ? (1 << 31) : (info
->size
<< 20);
503 nflash
.mtd
.numeraseregions
= 1;
504 nflash
.map
= (unsigned char *)kmalloc(info
->numblocks
, GFP_KERNEL
);
506 memset(nflash
.map
, 0, info
->numblocks
);
508 /* Register with MTD */
509 nflash
.mtd
.name
= "nflash";
510 nflash
.mtd
.type
= MTD_NANDFLASH
;
511 nflash
.mtd
.flags
= MTD_CAP_NANDFLASH
;
512 nflash
.mtd
.eraseregions
= &nflash
.region
;
513 nflash
.mtd
.erase
= nflash_mtd_erase
;
514 nflash
.mtd
.read
= nflash_mtd_read
;
515 nflash
.mtd
.write
= nflash_mtd_write
;
516 nflash
.mtd
.writesize
= NFL_SECTOR_SIZE
;
517 nflash
.mtd
.priv
= &nflash
;
518 nflash
.mtd
.owner
= THIS_MODULE
;
519 nflash
.mtd
.mutex
= partitions_mutex_init();
520 if (!nflash
.mtd
.mutex
)
524 mutex_lock(nflash
.mtd
.mutex
);
525 for (i
= 0; i
< info
->numblocks
; i
++) {
526 if (nflash_checkbadb(nflash
.sih
, nflash
.cc
, (i
* info
->blocksize
)) != 0) {
530 mutex_unlock(nflash
.mtd
.mutex
);
532 #ifdef CONFIG_MTD_PARTITIONS
533 parts
= init_nflash_mtd_partitions(&nflash
.mtd
, nflash
.mtd
.size
);
536 for (i
= 0; parts
[i
].name
; i
++);
537 ret
= add_mtd_partitions(&nflash
.mtd
, parts
, i
);
539 printk(KERN_ERR
"nflash: add_mtd failed\n");
542 nflash_parts
= parts
;
548 iounmap((void *) nflash
.cc
);
550 si_detach(nflash
.sih
);
555 nflash_mtd_exit(void)
557 #ifdef CONFIG_MTD_PARTITIONS
558 del_mtd_partitions(&nflash
.mtd
);
560 del_mtd_device(&nflash
.mtd
);
562 iounmap((void *) nflash
.cc
);
563 si_detach(nflash
.sih
);
566 module_init(nflash_mtd_init
);
567 module_exit(nflash_mtd_exit
);