2 * Copyright © 2009 - Maxim Levitsky
3 * SmartMedia/xD translation layer
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/hdreg.h>
14 #include <linux/kthread.h>
15 #include <linux/freezer.h>
16 #include <linux/sysfs.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include <linux/mtd/nand_ecc.h>
20 #include "nand/sm_common.h"
25 struct workqueue_struct
*cache_flush_workqueue
;
27 static int cache_timeout
= 1000;
28 module_param(cache_timeout
, bool, S_IRUGO
);
29 MODULE_PARM_DESC(cache_timeout
,
30 "Timeout (in ms) for cache flush (1000 ms default");
33 module_param(debug
, int, S_IRUGO
| S_IWUSR
);
34 MODULE_PARM_DESC(debug
, "Debug level (0-2)");
37 /* ------------------- sysfs attributtes ---------------------------------- */
38 struct sm_sysfs_attribute
{
39 struct device_attribute dev_attr
;
44 ssize_t
sm_attr_show(struct device
*dev
, struct device_attribute
*attr
,
47 struct sm_sysfs_attribute
*sm_attr
=
48 container_of(attr
, struct sm_sysfs_attribute
, dev_attr
);
50 strncpy(buf
, sm_attr
->data
, sm_attr
->len
);
55 #define NUM_ATTRIBUTES 1
56 #define SM_CIS_VENDOR_OFFSET 0x59
57 struct attribute_group
*sm_create_sysfs_attributes(struct sm_ftl
*ftl
)
59 struct attribute_group
*attr_group
;
60 struct attribute
**attributes
;
61 struct sm_sysfs_attribute
*vendor_attribute
;
63 int vendor_len
= strnlen(ftl
->cis_buffer
+ SM_CIS_VENDOR_OFFSET
,
64 SM_SMALL_PAGE
- SM_CIS_VENDOR_OFFSET
);
66 char *vendor
= kmalloc(vendor_len
, GFP_KERNEL
);
67 memcpy(vendor
, ftl
->cis_buffer
+ SM_CIS_VENDOR_OFFSET
, vendor_len
);
68 vendor
[vendor_len
] = 0;
70 /* Initialize sysfs attributes */
72 kzalloc(sizeof(struct sm_sysfs_attribute
), GFP_KERNEL
);
74 sysfs_attr_init(&vendor_attribute
->dev_attr
.attr
);
76 vendor_attribute
->data
= vendor
;
77 vendor_attribute
->len
= vendor_len
;
78 vendor_attribute
->dev_attr
.attr
.name
= "vendor";
79 vendor_attribute
->dev_attr
.attr
.mode
= S_IRUGO
;
80 vendor_attribute
->dev_attr
.show
= sm_attr_show
;
83 /* Create array of pointers to the attributes */
84 attributes
= kzalloc(sizeof(struct attribute
*) * (NUM_ATTRIBUTES
+ 1),
86 attributes
[0] = &vendor_attribute
->dev_attr
.attr
;
88 /* Finally create the attribute group */
89 attr_group
= kzalloc(sizeof(struct attribute_group
), GFP_KERNEL
);
90 attr_group
->attrs
= attributes
;
94 void sm_delete_sysfs_attributes(struct sm_ftl
*ftl
)
96 struct attribute
**attributes
= ftl
->disk_attributes
->attrs
;
99 for (i
= 0; attributes
[i
] ; i
++) {
101 struct device_attribute
*dev_attr
= container_of(attributes
[i
],
102 struct device_attribute
, attr
);
104 struct sm_sysfs_attribute
*sm_attr
=
105 container_of(dev_attr
,
106 struct sm_sysfs_attribute
, dev_attr
);
108 kfree(sm_attr
->data
);
112 kfree(ftl
->disk_attributes
->attrs
);
113 kfree(ftl
->disk_attributes
);
117 /* ----------------------- oob helpers -------------------------------------- */
119 static int sm_get_lba(uint8_t *lba
)
121 /* check fixed bits */
122 if ((lba
[0] & 0xF8) != 0x10)
125 /* check parity - endianess doesn't matter */
126 if (hweight16(*(uint16_t *)lba
) & 1)
129 return (lba
[1] >> 1) | ((lba
[0] & 0x07) << 7);
134 * Read LBA asscociated with block
135 * returns -1, if block is erased
136 * returns -2 if error happens
138 static int sm_read_lba(struct sm_oob
*oob
)
140 static const uint32_t erased_pattern
[4] = {
141 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
146 /* First test for erased block */
147 if (!memcmp(oob
, erased_pattern
, SM_OOB_SIZE
))
150 /* Now check is both copies of the LBA differ too much */
151 lba_test
= *(uint16_t *)oob
->lba_copy1
^ *(uint16_t*)oob
->lba_copy2
;
152 if (lba_test
&& !is_power_of_2(lba_test
))
156 lba
= sm_get_lba(oob
->lba_copy1
);
159 lba
= sm_get_lba(oob
->lba_copy2
);
164 static void sm_write_lba(struct sm_oob
*oob
, uint16_t lba
)
168 WARN_ON(lba
>= 1000);
170 tmp
[0] = 0x10 | ((lba
>> 7) & 0x07);
171 tmp
[1] = (lba
<< 1) & 0xFF;
173 if (hweight16(*(uint16_t *)tmp
) & 0x01)
176 oob
->lba_copy1
[0] = oob
->lba_copy2
[0] = tmp
[0];
177 oob
->lba_copy1
[1] = oob
->lba_copy2
[1] = tmp
[1];
181 /* Make offset from parts */
182 static loff_t
sm_mkoffset(struct sm_ftl
*ftl
, int zone
, int block
, int boffset
)
184 WARN_ON(boffset
& (SM_SECTOR_SIZE
- 1));
185 WARN_ON(zone
< 0 || zone
>= ftl
->zone_count
);
186 WARN_ON(block
>= ftl
->zone_size
);
187 WARN_ON(boffset
>= ftl
->block_size
);
192 return (zone
* SM_MAX_ZONE_SIZE
+ block
) * ftl
->block_size
+ boffset
;
195 /* Breaks offset into parts */
196 static void sm_break_offset(struct sm_ftl
*ftl
, loff_t offset
,
197 int *zone
, int *block
, int *boffset
)
199 *boffset
= do_div(offset
, ftl
->block_size
);
200 *block
= do_div(offset
, ftl
->max_lba
);
201 *zone
= offset
>= ftl
->zone_count
? -1 : offset
;
204 /* ---------------------- low level IO ------------------------------------- */
206 static int sm_correct_sector(uint8_t *buffer
, struct sm_oob
*oob
)
210 __nand_calculate_ecc(buffer
, SM_SMALL_PAGE
, ecc
);
211 if (__nand_correct_data(buffer
, ecc
, oob
->ecc1
, SM_SMALL_PAGE
) < 0)
214 buffer
+= SM_SMALL_PAGE
;
216 __nand_calculate_ecc(buffer
, SM_SMALL_PAGE
, ecc
);
217 if (__nand_correct_data(buffer
, ecc
, oob
->ecc2
, SM_SMALL_PAGE
) < 0)
222 /* Reads a sector + oob*/
223 static int sm_read_sector(struct sm_ftl
*ftl
,
224 int zone
, int block
, int boffset
,
225 uint8_t *buffer
, struct sm_oob
*oob
)
227 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
228 struct mtd_oob_ops ops
;
229 struct sm_oob tmp_oob
;
233 /* FTL can contain -1 entries that are by default filled with bits */
235 memset(buffer
, 0xFF, SM_SECTOR_SIZE
);
239 /* User might not need the oob, but we do for data vertification */
243 ops
.mode
= ftl
->smallpagenand
? MTD_OOB_RAW
: MTD_OOB_PLACE
;
245 ops
.ooblen
= SM_OOB_SIZE
;
246 ops
.oobbuf
= (void *)oob
;
247 ops
.len
= SM_SECTOR_SIZE
;
252 /* Avoid infinite recursion on CIS reads, sm_recheck_media
254 if (zone
== 0 && block
== ftl
->cis_block
&& boffset
==
258 /* Test if media is stable */
259 if (try == 3 || sm_recheck_media(ftl
))
263 /* Unfortunelly, oob read will _always_ succeed,
264 despite card removal..... */
265 ret
= mtd
->read_oob(mtd
, sm_mkoffset(ftl
, zone
, block
, boffset
), &ops
);
267 /* Test for unknown errors */
268 if (ret
!= 0 && ret
!= -EUCLEAN
&& ret
!= -EBADMSG
) {
269 dbg("read of block %d at zone %d, failed due to error (%d)",
274 /* Do a basic test on the oob, to guard against returned garbage */
275 if (oob
->reserved
!= 0xFFFFFFFF && !is_power_of_2(~oob
->reserved
))
278 /* This should never happen, unless there is a bug in the mtd driver */
279 WARN_ON(ops
.oobretlen
!= SM_OOB_SIZE
);
280 WARN_ON(buffer
&& ops
.retlen
!= SM_SECTOR_SIZE
);
285 /* Test if sector marked as bad */
286 if (!sm_sector_valid(oob
)) {
287 dbg("read of block %d at zone %d, failed because it is marked"
288 " as bad" , block
, zone
);
293 if (ret
== -EBADMSG
||
294 (ftl
->smallpagenand
&& sm_correct_sector(buffer
, oob
))) {
296 dbg("read of block %d at zone %d, failed due to ECC error",
304 /* Writes a sector to media */
305 static int sm_write_sector(struct sm_ftl
*ftl
,
306 int zone
, int block
, int boffset
,
307 uint8_t *buffer
, struct sm_oob
*oob
)
309 struct mtd_oob_ops ops
;
310 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
313 BUG_ON(ftl
->readonly
);
315 if (zone
== 0 && (block
== ftl
->cis_block
|| block
== 0)) {
316 dbg("attempted to write the CIS!");
323 ops
.mode
= ftl
->smallpagenand
? MTD_OOB_RAW
: MTD_OOB_PLACE
;
324 ops
.len
= SM_SECTOR_SIZE
;
327 ops
.ooblen
= SM_OOB_SIZE
;
328 ops
.oobbuf
= (void *)oob
;
330 ret
= mtd
->write_oob(mtd
, sm_mkoffset(ftl
, zone
, block
, boffset
), &ops
);
332 /* Now we assume that hardware will catch write bitflip errors */
333 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
336 dbg("write to block %d at zone %d, failed with error %d",
339 sm_recheck_media(ftl
);
343 /* This should never happen, unless there is a bug in the driver */
344 WARN_ON(ops
.oobretlen
!= SM_OOB_SIZE
);
345 WARN_ON(buffer
&& ops
.retlen
!= SM_SECTOR_SIZE
);
350 /* ------------------------ block IO ------------------------------------- */
352 /* Write a block using data and lba, and invalid sector bitmap */
353 static int sm_write_block(struct sm_ftl
*ftl
, uint8_t *buf
,
354 int zone
, int block
, int lba
,
355 unsigned long invalid_bitmap
)
361 /* Initialize the oob with requested values */
362 memset(&oob
, 0xFF, SM_OOB_SIZE
);
363 sm_write_lba(&oob
, lba
);
368 for (boffset
= 0; boffset
< ftl
->block_size
;
369 boffset
+= SM_SECTOR_SIZE
) {
371 oob
.data_status
= 0xFF;
373 if (test_bit(boffset
/ SM_SECTOR_SIZE
, &invalid_bitmap
)) {
375 sm_printk("sector %d of block at LBA %d of zone %d"
376 " coudn't be read, marking it as invalid",
377 boffset
/ SM_SECTOR_SIZE
, lba
, zone
);
382 if (ftl
->smallpagenand
) {
383 __nand_calculate_ecc(buf
+ boffset
,
384 SM_SMALL_PAGE
, oob
.ecc1
);
386 __nand_calculate_ecc(buf
+ boffset
+ SM_SMALL_PAGE
,
387 SM_SMALL_PAGE
, oob
.ecc2
);
389 if (!sm_write_sector(ftl
, zone
, block
, boffset
,
390 buf
+ boffset
, &oob
))
395 /* If write fails. try to erase the block */
396 /* This is safe, because we never write in blocks
397 that contain valuable data.
398 This is intended to repair block that are marked
399 as erased, but that isn't fully erased*/
401 if (sm_erase_block(ftl
, zone
, block
, 0))
407 sm_mark_block_bad(ftl
, zone
, block
);
415 /* Mark whole block at offset 'offs' as bad. */
416 static void sm_mark_block_bad(struct sm_ftl
*ftl
, int zone
, int block
)
421 memset(&oob
, 0xFF, SM_OOB_SIZE
);
422 oob
.block_status
= 0xF0;
427 if (sm_recheck_media(ftl
))
430 sm_printk("marking block %d of zone %d as bad", block
, zone
);
432 /* We aren't checking the return value, because we don't care */
433 /* This also fails on fake xD cards, but I guess these won't expose
434 any bad blocks till fail completly */
435 for (boffset
= 0; boffset
< ftl
->block_size
; boffset
+= SM_SECTOR_SIZE
)
436 sm_write_sector(ftl
, zone
, block
, boffset
, NULL
, &oob
);
440 * Erase a block within a zone
441 * If erase succedes, it updates free block fifo, otherwise marks block as bad
443 static int sm_erase_block(struct sm_ftl
*ftl
, int zone_num
, uint16_t block
,
446 struct ftl_zone
*zone
= &ftl
->zones
[zone_num
];
447 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
448 struct erase_info erase
;
451 erase
.callback
= sm_erase_callback
;
452 erase
.addr
= sm_mkoffset(ftl
, zone_num
, block
, 0);
453 erase
.len
= ftl
->block_size
;
454 erase
.priv
= (u_long
)ftl
;
459 BUG_ON(ftl
->readonly
);
461 if (zone_num
== 0 && (block
== ftl
->cis_block
|| block
== 0)) {
462 sm_printk("attempted to erase the CIS!");
466 if (mtd
->erase(mtd
, &erase
)) {
467 sm_printk("erase of block %d in zone %d failed",
472 if (erase
.state
== MTD_ERASE_PENDING
)
473 wait_for_completion(&ftl
->erase_completion
);
475 if (erase
.state
!= MTD_ERASE_DONE
) {
476 sm_printk("erase of block %d in zone %d failed after wait",
482 kfifo_in(&zone
->free_sectors
,
483 (const unsigned char *)&block
, sizeof(block
));
487 sm_mark_block_bad(ftl
, zone_num
, block
);
491 static void sm_erase_callback(struct erase_info
*self
)
493 struct sm_ftl
*ftl
= (struct sm_ftl
*)self
->priv
;
494 complete(&ftl
->erase_completion
);
497 /* Throughtly test that block is valid. */
498 static int sm_check_block(struct sm_ftl
*ftl
, int zone
, int block
)
502 int lbas
[] = { -3, 0, 0, 0 };
507 /* First just check that block doesn't look fishy */
508 /* Only blocks that are valid or are sliced in two parts, are
510 for (boffset
= 0; boffset
< ftl
->block_size
;
511 boffset
+= SM_SECTOR_SIZE
) {
513 /* This shoudn't happen anyway */
514 if (sm_read_sector(ftl
, zone
, block
, boffset
, NULL
, &oob
))
517 test_lba
= sm_read_lba(&oob
);
519 if (lbas
[i
] != test_lba
)
520 lbas
[++i
] = test_lba
;
522 /* If we found three different LBAs, something is fishy */
527 /* If the block is sliced (partialy erased usually) erase it */
529 sm_erase_block(ftl
, zone
, block
, 1);
536 /* ----------------- media scanning --------------------------------- */
537 static const struct chs_entry chs_table
[] = {
545 { 128, 500, 16, 32 },
546 { 256, 1000, 16, 32 },
547 { 512, 1015, 32, 63 },
548 { 1024, 985, 33, 63 },
549 { 2048, 985, 33, 63 },
554 static const uint8_t cis_signature
[] = {
555 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
557 /* Find out media parameters.
558 * This ideally has to be based on nand id, but for now device size is enough */
559 int sm_get_media_info(struct sm_ftl
*ftl
, struct mtd_info
*mtd
)
562 int size_in_megs
= mtd
->size
/ (1024 * 1024);
564 ftl
->readonly
= mtd
->type
== MTD_ROM
;
566 /* Manual settings for very old devices */
568 ftl
->smallpagenand
= 0;
570 switch (size_in_megs
) {
572 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
573 ftl
->zone_size
= 256;
575 ftl
->block_size
= 8 * SM_SECTOR_SIZE
;
576 ftl
->smallpagenand
= 1;
580 /* 2 MiB flash SmartMedia (256 byte pages)*/
581 if (mtd
->writesize
== SM_SMALL_PAGE
) {
582 ftl
->zone_size
= 512;
584 ftl
->block_size
= 8 * SM_SECTOR_SIZE
;
585 ftl
->smallpagenand
= 1;
586 /* 2 MiB rom SmartMedia */
592 ftl
->zone_size
= 256;
594 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
598 /* 4 MiB flash/rom SmartMedia device */
599 ftl
->zone_size
= 512;
601 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
604 /* 8 MiB flash/rom SmartMedia device */
605 ftl
->zone_size
= 1024;
607 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
610 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
611 sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
612 if (size_in_megs
>= 16) {
613 ftl
->zone_count
= size_in_megs
/ 16;
614 ftl
->zone_size
= 1024;
616 ftl
->block_size
= 32 * SM_SECTOR_SIZE
;
619 /* Test for proper write,erase and oob sizes */
620 if (mtd
->erasesize
> ftl
->block_size
)
623 if (mtd
->writesize
> SM_SECTOR_SIZE
)
626 if (ftl
->smallpagenand
&& mtd
->oobsize
< SM_SMALL_OOB_SIZE
)
629 if (!ftl
->smallpagenand
&& mtd
->oobsize
< SM_OOB_SIZE
)
632 /* We use these functions for IO */
633 if (!mtd
->read_oob
|| !mtd
->write_oob
)
636 /* Find geometry information */
637 for (i
= 0 ; i
< ARRAY_SIZE(chs_table
) ; i
++) {
638 if (chs_table
[i
].size
== size_in_megs
) {
639 ftl
->cylinders
= chs_table
[i
].cyl
;
640 ftl
->heads
= chs_table
[i
].head
;
641 ftl
->sectors
= chs_table
[i
].sec
;
646 sm_printk("media has unknown size : %dMiB", size_in_megs
);
647 ftl
->cylinders
= 985;
653 /* Validate the CIS */
654 static int sm_read_cis(struct sm_ftl
*ftl
)
658 if (sm_read_sector(ftl
,
659 0, ftl
->cis_block
, ftl
->cis_boffset
, ftl
->cis_buffer
, &oob
))
662 if (!sm_sector_valid(&oob
) || !sm_block_valid(&oob
))
665 if (!memcmp(ftl
->cis_buffer
+ ftl
->cis_page_offset
,
666 cis_signature
, sizeof(cis_signature
))) {
673 /* Scan the media for the CIS */
674 static int sm_find_cis(struct sm_ftl
*ftl
)
681 /* Search for first valid block */
682 for (block
= 0 ; block
< ftl
->zone_size
- ftl
->max_lba
; block
++) {
684 if (sm_read_sector(ftl
, 0, block
, 0, NULL
, &oob
))
687 if (!sm_block_valid(&oob
))
696 /* Search for first valid sector in this block */
697 for (boffset
= 0 ; boffset
< ftl
->block_size
;
698 boffset
+= SM_SECTOR_SIZE
) {
700 if (sm_read_sector(ftl
, 0, block
, boffset
, NULL
, &oob
))
703 if (!sm_sector_valid(&oob
))
708 if (boffset
== ftl
->block_size
)
711 ftl
->cis_block
= block
;
712 ftl
->cis_boffset
= boffset
;
713 ftl
->cis_page_offset
= 0;
715 cis_found
= !sm_read_cis(ftl
);
718 ftl
->cis_page_offset
= SM_SMALL_PAGE
;
719 cis_found
= !sm_read_cis(ftl
);
723 dbg("CIS block found at offset %x",
724 block
* ftl
->block_size
+
725 boffset
+ ftl
->cis_page_offset
);
731 /* Basic test to determine if underlying mtd device if functional */
732 static int sm_recheck_media(struct sm_ftl
*ftl
)
734 if (sm_read_cis(ftl
)) {
736 if (!ftl
->unstable
) {
737 sm_printk("media unstable, not allowing writes");
745 /* Initialize a FTL zone */
746 static int sm_init_zone(struct sm_ftl
*ftl
, int zone_num
)
748 struct ftl_zone
*zone
= &ftl
->zones
[zone_num
];
755 dbg("initializing zone %d", zone_num
);
757 /* Allocate memory for FTL table */
758 zone
->lba_to_phys_table
= kmalloc(ftl
->max_lba
* 2, GFP_KERNEL
);
760 if (!zone
->lba_to_phys_table
)
762 memset(zone
->lba_to_phys_table
, -1, ftl
->max_lba
* 2);
765 /* Allocate memory for free sectors FIFO */
766 if (kfifo_alloc(&zone
->free_sectors
, ftl
->zone_size
* 2, GFP_KERNEL
)) {
767 kfree(zone
->lba_to_phys_table
);
771 /* Now scan the zone */
772 for (block
= 0 ; block
< ftl
->zone_size
; block
++) {
774 /* Skip blocks till the CIS (including) */
775 if (zone_num
== 0 && block
<= ftl
->cis_block
)
778 /* Read the oob of first sector */
779 if (sm_read_sector(ftl
, zone_num
, block
, 0, NULL
, &oob
))
782 /* Test to see if block is erased. It is enough to test
783 first sector, because erase happens in one shot */
784 if (sm_block_erased(&oob
)) {
785 kfifo_in(&zone
->free_sectors
,
786 (unsigned char *)&block
, 2);
790 /* If block is marked as bad, skip it */
791 /* This assumes we can trust first sector*/
792 /* However the way the block valid status is defined, ensures
793 very low probability of failure here */
794 if (!sm_block_valid(&oob
)) {
795 dbg("PH %04d <-> <marked bad>", block
);
800 lba
= sm_read_lba(&oob
);
802 /* Invalid LBA means that block is damaged. */
803 /* We can try to erase it, or mark it as bad, but
804 lets leave that to recovery application */
805 if (lba
== -2 || lba
>= ftl
->max_lba
) {
806 dbg("PH %04d <-> LBA %04d(bad)", block
, lba
);
811 /* If there is no collision,
812 just put the sector in the FTL table */
813 if (zone
->lba_to_phys_table
[lba
] < 0) {
814 dbg_verbose("PH %04d <-> LBA %04d", block
, lba
);
815 zone
->lba_to_phys_table
[lba
] = block
;
819 sm_printk("collision"
820 " of LBA %d between blocks %d and %d in zone %d",
821 lba
, zone
->lba_to_phys_table
[lba
], block
, zone_num
);
823 /* Test that this block is valid*/
824 if (sm_check_block(ftl
, zone_num
, block
))
827 /* Test now the old block */
828 if (sm_check_block(ftl
, zone_num
,
829 zone
->lba_to_phys_table
[lba
])) {
830 zone
->lba_to_phys_table
[lba
] = block
;
834 /* If both blocks are valid and share same LBA, it means that
835 they hold different versions of same data. It not
836 known which is more recent, thus just erase one of them
838 sm_printk("both blocks are valid, erasing the later");
839 sm_erase_block(ftl
, zone_num
, block
, 1);
842 dbg("zone initialized");
843 zone
->initialized
= 1;
845 /* No free sectors, means that the zone is heavily damaged, write won't
846 work, but it can still can be (partially) read */
847 if (!kfifo_len(&zone
->free_sectors
)) {
848 sm_printk("no free blocks in zone %d", zone_num
);
852 /* Randomize first block we write to */
853 get_random_bytes(&i
, 2);
854 i
%= (kfifo_len(&zone
->free_sectors
) / 2);
857 len
= kfifo_out(&zone
->free_sectors
,
858 (unsigned char *)&block
, 2);
860 kfifo_in(&zone
->free_sectors
, (const unsigned char *)&block
, 2);
865 /* Get and automaticly initialize an FTL mapping for one zone */
866 struct ftl_zone
*sm_get_zone(struct sm_ftl
*ftl
, int zone_num
)
868 struct ftl_zone
*zone
;
871 BUG_ON(zone_num
>= ftl
->zone_count
);
872 zone
= &ftl
->zones
[zone_num
];
874 if (!zone
->initialized
) {
875 error
= sm_init_zone(ftl
, zone_num
);
878 return ERR_PTR(error
);
884 /* ----------------- cache handling ------------------------------------------*/
886 /* Initialize the one block cache */
887 void sm_cache_init(struct sm_ftl
*ftl
)
889 ftl
->cache_data_invalid_bitmap
= 0xFFFFFFFF;
890 ftl
->cache_clean
= 1;
891 ftl
->cache_zone
= -1;
892 ftl
->cache_block
= -1;
893 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
896 /* Put sector in one block cache */
897 void sm_cache_put(struct sm_ftl
*ftl
, char *buffer
, int boffset
)
899 memcpy(ftl
->cache_data
+ boffset
, buffer
, SM_SECTOR_SIZE
);
900 clear_bit(boffset
/ SM_SECTOR_SIZE
, &ftl
->cache_data_invalid_bitmap
);
901 ftl
->cache_clean
= 0;
904 /* Read a sector from the cache */
905 int sm_cache_get(struct sm_ftl
*ftl
, char *buffer
, int boffset
)
907 if (test_bit(boffset
/ SM_SECTOR_SIZE
,
908 &ftl
->cache_data_invalid_bitmap
))
911 memcpy(buffer
, ftl
->cache_data
+ boffset
, SM_SECTOR_SIZE
);
915 /* Write the cache to hardware */
916 int sm_cache_flush(struct sm_ftl
*ftl
)
918 struct ftl_zone
*zone
;
921 uint16_t write_sector
;
922 int zone_num
= ftl
->cache_zone
;
925 if (ftl
->cache_clean
)
931 BUG_ON(zone_num
< 0);
932 zone
= &ftl
->zones
[zone_num
];
933 block_num
= zone
->lba_to_phys_table
[ftl
->cache_block
];
936 /* Try to read all unread areas of the cache block*/
937 for_each_set_bit(sector_num
, &ftl
->cache_data_invalid_bitmap
,
938 ftl
->block_size
/ SM_SECTOR_SIZE
) {
940 if (!sm_read_sector(ftl
,
941 zone_num
, block_num
, sector_num
* SM_SECTOR_SIZE
,
942 ftl
->cache_data
+ sector_num
* SM_SECTOR_SIZE
, NULL
))
943 clear_bit(sector_num
,
944 &ftl
->cache_data_invalid_bitmap
);
951 /* If there are no spare blocks, */
952 /* we could still continue by erasing/writing the current block,
953 but for such worn out media it doesn't worth the trouble,
955 if (kfifo_out(&zone
->free_sectors
,
956 (unsigned char *)&write_sector
, 2) != 2) {
957 dbg("no free sectors for write!");
962 if (sm_write_block(ftl
, ftl
->cache_data
, zone_num
, write_sector
,
963 ftl
->cache_block
, ftl
->cache_data_invalid_bitmap
))
966 /* Update the FTL table */
967 zone
->lba_to_phys_table
[ftl
->cache_block
] = write_sector
;
969 /* Write succesfull, so erase and free the old block */
971 sm_erase_block(ftl
, zone_num
, block_num
, 1);
978 /* flush timer, runs a second after last write */
979 static void sm_cache_flush_timer(unsigned long data
)
981 struct sm_ftl
*ftl
= (struct sm_ftl
*)data
;
982 queue_work(cache_flush_workqueue
, &ftl
->flush_work
);
985 /* cache flush work, kicked by timer */
986 static void sm_cache_flush_work(struct work_struct
*work
)
988 struct sm_ftl
*ftl
= container_of(work
, struct sm_ftl
, flush_work
);
989 mutex_lock(&ftl
->mutex
);
991 mutex_unlock(&ftl
->mutex
);
995 /* ---------------- outside interface -------------------------------------- */
997 /* outside interface: read a sector */
998 static int sm_read(struct mtd_blktrans_dev
*dev
,
999 unsigned long sect_no
, char *buf
)
1001 struct sm_ftl
*ftl
= dev
->priv
;
1002 struct ftl_zone
*zone
;
1003 int error
= 0, in_cache
= 0;
1004 int zone_num
, block
, boffset
;
1006 sm_break_offset(ftl
, sect_no
<< 9, &zone_num
, &block
, &boffset
);
1007 mutex_lock(&ftl
->mutex
);
1010 zone
= sm_get_zone(ftl
, zone_num
);
1012 error
= PTR_ERR(zone
);
1016 /* Have to look at cache first */
1017 if (ftl
->cache_zone
== zone_num
&& ftl
->cache_block
== block
) {
1019 if (!sm_cache_get(ftl
, buf
, boffset
))
1023 /* Translate the block and return if doesn't exist in the table */
1024 block
= zone
->lba_to_phys_table
[block
];
1027 memset(buf
, 0xFF, SM_SECTOR_SIZE
);
1031 if (sm_read_sector(ftl
, zone_num
, block
, boffset
, buf
, NULL
)) {
1037 sm_cache_put(ftl
, buf
, boffset
);
1039 mutex_unlock(&ftl
->mutex
);
1043 /* outside interface: write a sector */
1044 static int sm_write(struct mtd_blktrans_dev
*dev
,
1045 unsigned long sec_no
, char *buf
)
1047 struct sm_ftl
*ftl
= dev
->priv
;
1048 struct ftl_zone
*zone
;
1049 int error
, zone_num
, block
, boffset
;
1051 BUG_ON(ftl
->readonly
);
1052 sm_break_offset(ftl
, sec_no
<< 9, &zone_num
, &block
, &boffset
);
1054 /* No need in flush thread running now */
1055 del_timer(&ftl
->timer
);
1056 mutex_lock(&ftl
->mutex
);
1058 zone
= sm_get_zone(ftl
, zone_num
);
1060 error
= PTR_ERR(zone
);
1064 /* If entry is not in cache, flush it */
1065 if (ftl
->cache_block
!= block
|| ftl
->cache_zone
!= zone_num
) {
1067 error
= sm_cache_flush(ftl
);
1071 ftl
->cache_block
= block
;
1072 ftl
->cache_zone
= zone_num
;
1075 sm_cache_put(ftl
, buf
, boffset
);
1077 mod_timer(&ftl
->timer
, jiffies
+ msecs_to_jiffies(cache_timeout
));
1078 mutex_unlock(&ftl
->mutex
);
1082 /* outside interface: flush everything */
1083 static int sm_flush(struct mtd_blktrans_dev
*dev
)
1085 struct sm_ftl
*ftl
= dev
->priv
;
1088 mutex_lock(&ftl
->mutex
);
1089 retval
= sm_cache_flush(ftl
);
1090 mutex_unlock(&ftl
->mutex
);
1094 /* outside interface: device is released */
1095 static int sm_release(struct mtd_blktrans_dev
*dev
)
1097 struct sm_ftl
*ftl
= dev
->priv
;
1099 mutex_lock(&ftl
->mutex
);
1100 del_timer_sync(&ftl
->timer
);
1101 cancel_work_sync(&ftl
->flush_work
);
1102 sm_cache_flush(ftl
);
1103 mutex_unlock(&ftl
->mutex
);
1107 /* outside interface: get geometry */
1108 static int sm_getgeo(struct mtd_blktrans_dev
*dev
, struct hd_geometry
*geo
)
1110 struct sm_ftl
*ftl
= dev
->priv
;
1111 geo
->heads
= ftl
->heads
;
1112 geo
->sectors
= ftl
->sectors
;
1113 geo
->cylinders
= ftl
->cylinders
;
1117 /* external interface: main initialization function */
1118 static void sm_add_mtd(struct mtd_blktrans_ops
*tr
, struct mtd_info
*mtd
)
1120 struct mtd_blktrans_dev
*trans
;
1123 /* Allocate & initialize our private structure */
1124 ftl
= kzalloc(sizeof(struct sm_ftl
), GFP_KERNEL
);
1129 mutex_init(&ftl
->mutex
);
1130 setup_timer(&ftl
->timer
, sm_cache_flush_timer
, (unsigned long)ftl
);
1131 INIT_WORK(&ftl
->flush_work
, sm_cache_flush_work
);
1132 init_completion(&ftl
->erase_completion
);
1134 /* Read media information */
1135 if (sm_get_media_info(ftl
, mtd
)) {
1136 dbg("found unsupported mtd device, aborting");
1141 /* Allocate temporary CIS buffer for read retry support */
1142 ftl
->cis_buffer
= kzalloc(SM_SECTOR_SIZE
, GFP_KERNEL
);
1143 if (!ftl
->cis_buffer
)
1146 /* Allocate zone array, it will be initialized on demand */
1147 ftl
->zones
= kzalloc(sizeof(struct ftl_zone
) * ftl
->zone_count
,
1152 /* Allocate the cache*/
1153 ftl
->cache_data
= kzalloc(ftl
->block_size
, GFP_KERNEL
);
1155 if (!ftl
->cache_data
)
1161 /* Allocate upper layer structure and initialize it */
1162 trans
= kzalloc(sizeof(struct mtd_blktrans_dev
), GFP_KERNEL
);
1172 trans
->size
= (ftl
->block_size
* ftl
->max_lba
* ftl
->zone_count
) >> 9;
1173 trans
->readonly
= ftl
->readonly
;
1175 if (sm_find_cis(ftl
)) {
1176 dbg("CIS not found on mtd device, aborting");
1180 ftl
->disk_attributes
= sm_create_sysfs_attributes(ftl
);
1181 trans
->disk_attributes
= ftl
->disk_attributes
;
1183 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1184 (int)(mtd
->size
/ (1024 * 1024)), mtd
->index
);
1187 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1188 ftl
->zone_count
, ftl
->max_lba
,
1189 ftl
->zone_size
- ftl
->max_lba
);
1190 dbg("each block consists of %d bytes",
1194 /* Register device*/
1195 if (add_mtd_blktrans_dev(trans
)) {
1196 dbg("error in mtdblktrans layer");
1203 kfree(ftl
->cache_data
);
1207 kfree(ftl
->cis_buffer
);
1214 /* main interface: device {surprise,} removal */
1215 static void sm_remove_dev(struct mtd_blktrans_dev
*dev
)
1217 struct sm_ftl
*ftl
= dev
->priv
;
1220 del_mtd_blktrans_dev(dev
);
1223 for (i
= 0 ; i
< ftl
->zone_count
; i
++) {
1225 if (!ftl
->zones
[i
].initialized
)
1228 kfree(ftl
->zones
[i
].lba_to_phys_table
);
1229 kfifo_free(&ftl
->zones
[i
].free_sectors
);
1232 sm_delete_sysfs_attributes(ftl
);
1233 kfree(ftl
->cis_buffer
);
1235 kfree(ftl
->cache_data
);
1239 static struct mtd_blktrans_ops sm_ftl_ops
= {
1242 .part_bits
= SM_FTL_PARTN_BITS
,
1243 .blksize
= SM_SECTOR_SIZE
,
1244 .getgeo
= sm_getgeo
,
1246 .add_mtd
= sm_add_mtd
,
1247 .remove_dev
= sm_remove_dev
,
1249 .readsect
= sm_read
,
1250 .writesect
= sm_write
,
1253 .release
= sm_release
,
1255 .owner
= THIS_MODULE
,
1258 static __init
int sm_module_init(void)
1261 cache_flush_workqueue
= create_freezeable_workqueue("smflush");
1263 if (IS_ERR(cache_flush_workqueue
))
1264 return PTR_ERR(cache_flush_workqueue
);
1266 error
= register_mtd_blktrans(&sm_ftl_ops
);
1268 destroy_workqueue(cache_flush_workqueue
);
1273 static void __exit
sm_module_exit(void)
1275 destroy_workqueue(cache_flush_workqueue
);
1276 deregister_mtd_blktrans(&sm_ftl_ops
);
1279 module_init(sm_module_init
);
1280 module_exit(sm_module_exit
);
1282 MODULE_LICENSE("GPL");
1283 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1284 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");