1 // SPDX-License-Identifier: GPL-2.0-only
3 * rfd_ftl.c -- resident flash disk (flash translation layer)
5 * Copyright © 2005 Sean Young <sean@mess.org>
7 * This type of flash translation layer (FTL) is used by the Embedded BIOS
8 * by General Software. It is known as the Resident Flash Disk (RFD), see:
10 * http://www.gensw.com/pages/prod/bios/rfd.htm
15 #include <linux/hdreg.h>
16 #include <linux/init.h>
17 #include <linux/mtd/blktrans.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/vmalloc.h>
20 #include <linux/slab.h>
21 #include <linux/jiffies.h>
22 #include <linux/module.h>
24 #include <asm/types.h>
26 static int block_size
= 0;
27 module_param(block_size
, int, 0);
28 MODULE_PARM_DESC(block_size
, "Block size to use by RFD, defaults to erase unit size");
30 #define PREFIX "rfd_ftl: "
32 /* This major has been assigned by device@lanana.org */
34 #define RFD_FTL_MAJOR 256
37 /* Maximum number of partitions in an FTL region */
40 /* An erase unit should start with this value */
41 #define RFD_MAGIC 0x9193
43 /* the second value is 0xffff or 0xffc8; function unknown */
45 /* the third value is always 0xffff, ignored */
47 /* next is an array of mapping for each corresponding sector */
48 #define HEADER_MAP_OFFSET 3
49 #define SECTOR_DELETED 0x0000
50 #define SECTOR_ZERO 0xfffe
51 #define SECTOR_FREE 0xffff
53 #define SECTOR_SIZE 512
55 #define SECTORS_PER_TRACK 63
72 struct mtd_blktrans_dev mbd
;
74 u_int block_size
; /* size of erase unit */
75 u_int total_blocks
; /* number of erase units */
76 u_int header_sectors_per_block
; /* header sectors in erase unit */
77 u_int data_sectors_per_block
; /* data sectors in erase unit */
78 u_int sector_count
; /* sectors in translated disk */
79 u_int header_size
; /* bytes in header sector */
80 int reserved_block
; /* block next up for reclaim */
81 int current_block
; /* block to write to */
82 u16
*header_cache
; /* cached header */
91 static int rfd_ftl_writesect(struct mtd_blktrans_dev
*dev
, u_long sector
, char *buf
);
93 static int build_block_map(struct partition
*part
, int block_no
)
95 struct block
*block
= &part
->blocks
[block_no
];
98 block
->offset
= part
->block_size
* block_no
;
100 if (le16_to_cpu(part
->header_cache
[0]) != RFD_MAGIC
) {
101 block
->state
= BLOCK_UNUSED
;
105 block
->state
= BLOCK_OK
;
107 for (i
=0; i
<part
->data_sectors_per_block
; i
++) {
110 entry
= le16_to_cpu(part
->header_cache
[HEADER_MAP_OFFSET
+ i
]);
112 if (entry
== SECTOR_DELETED
)
115 if (entry
== SECTOR_FREE
) {
116 block
->free_sectors
++;
120 if (entry
== SECTOR_ZERO
)
123 if (entry
>= part
->sector_count
) {
124 printk(KERN_WARNING PREFIX
125 "'%s': unit #%d: entry %d corrupt, "
126 "sector %d out of range\n",
127 part
->mbd
.mtd
->name
, block_no
, i
, entry
);
131 if (part
->sector_map
[entry
] != -1) {
132 printk(KERN_WARNING PREFIX
133 "'%s': more than one entry for sector %d\n",
134 part
->mbd
.mtd
->name
, entry
);
139 part
->sector_map
[entry
] = block
->offset
+
140 (i
+ part
->header_sectors_per_block
) * SECTOR_SIZE
;
142 block
->used_sectors
++;
145 if (block
->free_sectors
== part
->data_sectors_per_block
)
146 part
->reserved_block
= block_no
;
151 static int scan_header(struct partition
*part
)
153 int sectors_per_block
;
158 sectors_per_block
= part
->block_size
/ SECTOR_SIZE
;
159 part
->total_blocks
= (u32
)part
->mbd
.mtd
->size
/ part
->block_size
;
161 if (part
->total_blocks
< 2)
164 /* each erase block has three bytes header, followed by the map */
165 part
->header_sectors_per_block
=
166 ((HEADER_MAP_OFFSET
+ sectors_per_block
) *
167 sizeof(u16
) + SECTOR_SIZE
- 1) / SECTOR_SIZE
;
169 part
->data_sectors_per_block
= sectors_per_block
-
170 part
->header_sectors_per_block
;
172 part
->header_size
= (HEADER_MAP_OFFSET
+
173 part
->data_sectors_per_block
) * sizeof(u16
);
175 part
->cylinders
= (part
->data_sectors_per_block
*
176 (part
->total_blocks
- 1) - 1) / SECTORS_PER_TRACK
;
178 part
->sector_count
= part
->cylinders
* SECTORS_PER_TRACK
;
180 part
->current_block
= -1;
181 part
->reserved_block
= -1;
182 part
->is_reclaiming
= 0;
184 part
->header_cache
= kmalloc(part
->header_size
, GFP_KERNEL
);
185 if (!part
->header_cache
)
188 part
->blocks
= kcalloc(part
->total_blocks
, sizeof(struct block
),
193 part
->sector_map
= vmalloc(array_size(sizeof(u_long
),
194 part
->sector_count
));
195 if (!part
->sector_map
)
198 for (i
=0; i
<part
->sector_count
; i
++)
199 part
->sector_map
[i
] = -1;
201 for (i
=0, blocks_found
=0; i
<part
->total_blocks
; i
++) {
202 rc
= mtd_read(part
->mbd
.mtd
, i
* part
->block_size
,
203 part
->header_size
, &retlen
,
204 (u_char
*)part
->header_cache
);
206 if (!rc
&& retlen
!= part
->header_size
)
212 if (!build_block_map(part
, i
))
216 if (blocks_found
== 0) {
217 printk(KERN_NOTICE PREFIX
"no RFD magic found in '%s'\n",
218 part
->mbd
.mtd
->name
);
223 if (part
->reserved_block
== -1) {
224 printk(KERN_WARNING PREFIX
"'%s': no empty erase unit found\n",
225 part
->mbd
.mtd
->name
);
233 vfree(part
->sector_map
);
234 kfree(part
->header_cache
);
240 static int rfd_ftl_readsect(struct mtd_blktrans_dev
*dev
, u_long sector
, char *buf
)
242 struct partition
*part
= container_of(dev
, struct partition
, mbd
);
247 if (sector
>= part
->sector_count
)
250 addr
= part
->sector_map
[sector
];
252 rc
= mtd_read(part
->mbd
.mtd
, addr
, SECTOR_SIZE
, &retlen
,
254 if (!rc
&& retlen
!= SECTOR_SIZE
)
258 printk(KERN_WARNING PREFIX
"error reading '%s' at "
259 "0x%lx\n", part
->mbd
.mtd
->name
, addr
);
263 memset(buf
, 0, SECTOR_SIZE
);
268 static int erase_block(struct partition
*part
, int block
)
270 struct erase_info
*erase
;
273 erase
= kmalloc(sizeof(struct erase_info
), GFP_KERNEL
);
277 erase
->addr
= part
->blocks
[block
].offset
;
278 erase
->len
= part
->block_size
;
280 part
->blocks
[block
].state
= BLOCK_ERASING
;
281 part
->blocks
[block
].free_sectors
= 0;
283 rc
= mtd_erase(part
->mbd
.mtd
, erase
);
285 printk(KERN_ERR PREFIX
"erase of region %llx,%llx on '%s' "
286 "failed\n", (unsigned long long)erase
->addr
,
287 (unsigned long long)erase
->len
, part
->mbd
.mtd
->name
);
288 part
->blocks
[block
].state
= BLOCK_FAILED
;
289 part
->blocks
[block
].free_sectors
= 0;
290 part
->blocks
[block
].used_sectors
= 0;
292 u16 magic
= cpu_to_le16(RFD_MAGIC
);
295 part
->blocks
[block
].state
= BLOCK_ERASED
;
296 part
->blocks
[block
].free_sectors
= part
->data_sectors_per_block
;
297 part
->blocks
[block
].used_sectors
= 0;
298 part
->blocks
[block
].erases
++;
300 rc
= mtd_write(part
->mbd
.mtd
, part
->blocks
[block
].offset
,
301 sizeof(magic
), &retlen
, (u_char
*)&magic
);
302 if (!rc
&& retlen
!= sizeof(magic
))
306 pr_err(PREFIX
"'%s': unable to write RFD header at 0x%lx\n",
307 part
->mbd
.mtd
->name
, part
->blocks
[block
].offset
);
308 part
->blocks
[block
].state
= BLOCK_FAILED
;
310 part
->blocks
[block
].state
= BLOCK_OK
;
319 static int move_block_contents(struct partition
*part
, int block_no
, u_long
*old_sector
)
326 part
->is_reclaiming
= 1;
328 sector_data
= kmalloc(SECTOR_SIZE
, GFP_KERNEL
);
332 map
= kmalloc(part
->header_size
, GFP_KERNEL
);
336 rc
= mtd_read(part
->mbd
.mtd
, part
->blocks
[block_no
].offset
,
337 part
->header_size
, &retlen
, (u_char
*)map
);
339 if (!rc
&& retlen
!= part
->header_size
)
343 printk(KERN_ERR PREFIX
"error reading '%s' at "
344 "0x%lx\n", part
->mbd
.mtd
->name
,
345 part
->blocks
[block_no
].offset
);
350 for (i
=0; i
<part
->data_sectors_per_block
; i
++) {
351 u16 entry
= le16_to_cpu(map
[HEADER_MAP_OFFSET
+ i
]);
355 if (entry
== SECTOR_FREE
|| entry
== SECTOR_DELETED
)
358 if (entry
== SECTOR_ZERO
)
361 /* already warned about and ignored in build_block_map() */
362 if (entry
>= part
->sector_count
)
365 addr
= part
->blocks
[block_no
].offset
+
366 (i
+ part
->header_sectors_per_block
) * SECTOR_SIZE
;
368 if (*old_sector
== addr
) {
370 if (!part
->blocks
[block_no
].used_sectors
--) {
371 rc
= erase_block(part
, block_no
);
376 rc
= mtd_read(part
->mbd
.mtd
, addr
, SECTOR_SIZE
, &retlen
,
379 if (!rc
&& retlen
!= SECTOR_SIZE
)
383 printk(KERN_ERR PREFIX
"'%s': Unable to "
384 "read sector for relocation\n",
385 part
->mbd
.mtd
->name
);
390 rc
= rfd_ftl_writesect((struct mtd_blktrans_dev
*)part
,
402 part
->is_reclaiming
= 0;
407 static int reclaim_block(struct partition
*part
, u_long
*old_sector
)
409 int block
, best_block
, score
, old_sector_block
;
412 /* we have a race if sync doesn't exist */
413 mtd_sync(part
->mbd
.mtd
);
415 score
= 0x7fffffff; /* MAX_INT */
417 if (*old_sector
!= -1)
418 old_sector_block
= *old_sector
/ part
->block_size
;
420 old_sector_block
= -1;
422 for (block
=0; block
<part
->total_blocks
; block
++) {
425 if (block
== part
->reserved_block
)
429 * Postpone reclaiming if there is a free sector as
430 * more removed sectors is more efficient (have to move
433 if (part
->blocks
[block
].free_sectors
)
436 this_score
= part
->blocks
[block
].used_sectors
;
438 if (block
== old_sector_block
)
441 /* no point in moving a full block */
442 if (part
->blocks
[block
].used_sectors
==
443 part
->data_sectors_per_block
)
447 this_score
+= part
->blocks
[block
].erases
;
449 if (this_score
< score
) {
455 if (best_block
== -1)
458 part
->current_block
= -1;
459 part
->reserved_block
= best_block
;
461 pr_debug("reclaim_block: reclaiming block #%d with %d used "
462 "%d free sectors\n", best_block
,
463 part
->blocks
[best_block
].used_sectors
,
464 part
->blocks
[best_block
].free_sectors
);
466 if (part
->blocks
[best_block
].used_sectors
)
467 rc
= move_block_contents(part
, best_block
, old_sector
);
469 rc
= erase_block(part
, best_block
);
475 * IMPROVE: It would be best to choose the block with the most deleted sectors,
476 * because if we fill that one up first it'll have the most chance of having
477 * the least live sectors at reclaim.
479 static int find_free_block(struct partition
*part
)
483 block
= part
->current_block
== -1 ?
484 jiffies
% part
->total_blocks
: part
->current_block
;
488 if (part
->blocks
[block
].free_sectors
&&
489 block
!= part
->reserved_block
)
492 if (part
->blocks
[block
].state
== BLOCK_UNUSED
)
493 erase_block(part
, block
);
495 if (++block
>= part
->total_blocks
)
498 } while (block
!= stop
);
503 static int find_writable_block(struct partition
*part
, u_long
*old_sector
)
508 block
= find_free_block(part
);
511 if (!part
->is_reclaiming
) {
512 rc
= reclaim_block(part
, old_sector
);
516 block
= find_free_block(part
);
525 rc
= mtd_read(part
->mbd
.mtd
, part
->blocks
[block
].offset
,
526 part
->header_size
, &retlen
,
527 (u_char
*)part
->header_cache
);
529 if (!rc
&& retlen
!= part
->header_size
)
533 printk(KERN_ERR PREFIX
"'%s': unable to read header at "
534 "0x%lx\n", part
->mbd
.mtd
->name
,
535 part
->blocks
[block
].offset
);
539 part
->current_block
= block
;
545 static int mark_sector_deleted(struct partition
*part
, u_long old_addr
)
547 int block
, offset
, rc
;
550 u16 del
= cpu_to_le16(SECTOR_DELETED
);
552 block
= old_addr
/ part
->block_size
;
553 offset
= (old_addr
% part
->block_size
) / SECTOR_SIZE
-
554 part
->header_sectors_per_block
;
556 addr
= part
->blocks
[block
].offset
+
557 (HEADER_MAP_OFFSET
+ offset
) * sizeof(u16
);
558 rc
= mtd_write(part
->mbd
.mtd
, addr
, sizeof(del
), &retlen
,
561 if (!rc
&& retlen
!= sizeof(del
))
565 printk(KERN_ERR PREFIX
"error writing '%s' at "
566 "0x%lx\n", part
->mbd
.mtd
->name
, addr
);
569 if (block
== part
->current_block
)
570 part
->header_cache
[offset
+ HEADER_MAP_OFFSET
] = del
;
572 part
->blocks
[block
].used_sectors
--;
574 if (!part
->blocks
[block
].used_sectors
&&
575 !part
->blocks
[block
].free_sectors
)
576 rc
= erase_block(part
, block
);
582 static int find_free_sector(const struct partition
*part
, const struct block
*block
)
586 i
= stop
= part
->data_sectors_per_block
- block
->free_sectors
;
589 if (le16_to_cpu(part
->header_cache
[HEADER_MAP_OFFSET
+ i
])
593 if (++i
== part
->data_sectors_per_block
)
601 static int do_writesect(struct mtd_blktrans_dev
*dev
, u_long sector
, char *buf
, ulong
*old_addr
)
603 struct partition
*part
= container_of(dev
, struct partition
, mbd
);
611 if (part
->current_block
== -1 ||
612 !part
->blocks
[part
->current_block
].free_sectors
) {
614 rc
= find_writable_block(part
, old_addr
);
619 block
= &part
->blocks
[part
->current_block
];
621 i
= find_free_sector(part
, block
);
628 addr
= (i
+ part
->header_sectors_per_block
) * SECTOR_SIZE
+
630 rc
= mtd_write(part
->mbd
.mtd
, addr
, SECTOR_SIZE
, &retlen
,
633 if (!rc
&& retlen
!= SECTOR_SIZE
)
637 printk(KERN_ERR PREFIX
"error writing '%s' at 0x%lx\n",
638 part
->mbd
.mtd
->name
, addr
);
642 part
->sector_map
[sector
] = addr
;
644 entry
= cpu_to_le16(sector
== 0 ? SECTOR_ZERO
: sector
);
646 part
->header_cache
[i
+ HEADER_MAP_OFFSET
] = entry
;
648 addr
= block
->offset
+ (HEADER_MAP_OFFSET
+ i
) * sizeof(u16
);
649 rc
= mtd_write(part
->mbd
.mtd
, addr
, sizeof(entry
), &retlen
,
652 if (!rc
&& retlen
!= sizeof(entry
))
656 printk(KERN_ERR PREFIX
"error writing '%s' at 0x%lx\n",
657 part
->mbd
.mtd
->name
, addr
);
660 block
->used_sectors
++;
661 block
->free_sectors
--;
667 static int rfd_ftl_writesect(struct mtd_blktrans_dev
*dev
, u_long sector
, char *buf
)
669 struct partition
*part
= container_of(dev
, struct partition
, mbd
);
674 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector
);
676 if (part
->reserved_block
== -1) {
681 if (sector
>= part
->sector_count
) {
686 old_addr
= part
->sector_map
[sector
];
688 for (i
=0; i
<SECTOR_SIZE
; i
++) {
692 rc
= do_writesect(dev
, sector
, buf
, &old_addr
);
698 if (i
== SECTOR_SIZE
)
699 part
->sector_map
[sector
] = -1;
702 rc
= mark_sector_deleted(part
, old_addr
);
708 static int rfd_ftl_discardsect(struct mtd_blktrans_dev
*dev
,
709 unsigned long sector
, unsigned int nr_sects
)
711 struct partition
*part
= container_of(dev
, struct partition
, mbd
);
716 if (sector
>= part
->sector_count
)
719 addr
= part
->sector_map
[sector
];
722 rc
= mark_sector_deleted(part
, addr
);
726 part
->sector_map
[sector
] = -1;
736 static int rfd_ftl_getgeo(struct mtd_blktrans_dev
*dev
, struct hd_geometry
*geo
)
738 struct partition
*part
= container_of(dev
, struct partition
, mbd
);
741 geo
->sectors
= SECTORS_PER_TRACK
;
742 geo
->cylinders
= part
->cylinders
;
747 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops
*tr
, struct mtd_info
*mtd
)
749 struct partition
*part
;
751 if ((mtd
->type
!= MTD_NORFLASH
&& mtd
->type
!= MTD_RAM
) ||
752 mtd
->size
> UINT_MAX
)
755 part
= kzalloc(sizeof(struct partition
), GFP_KERNEL
);
762 part
->block_size
= block_size
;
764 if (!mtd
->erasesize
) {
765 printk(KERN_WARNING PREFIX
"please provide block_size");
768 part
->block_size
= mtd
->erasesize
;
771 if (scan_header(part
) == 0) {
772 part
->mbd
.size
= part
->sector_count
;
774 part
->mbd
.devnum
= -1;
775 if (!(mtd
->flags
& MTD_WRITEABLE
))
776 part
->mbd
.readonly
= 1;
777 else if (part
->errors
) {
778 printk(KERN_WARNING PREFIX
"'%s': errors found, "
779 "setting read-only\n", mtd
->name
);
780 part
->mbd
.readonly
= 1;
783 printk(KERN_INFO PREFIX
"name: '%s' type: %d flags %x\n",
784 mtd
->name
, mtd
->type
, mtd
->flags
);
786 if (!add_mtd_blktrans_dev(&part
->mbd
))
793 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev
*dev
)
795 struct partition
*part
= container_of(dev
, struct partition
, mbd
);
798 for (i
=0; i
<part
->total_blocks
; i
++) {
799 pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
800 part
->mbd
.mtd
->name
, i
, part
->blocks
[i
].erases
);
803 vfree(part
->sector_map
);
804 kfree(part
->header_cache
);
806 del_mtd_blktrans_dev(&part
->mbd
);
809 static struct mtd_blktrans_ops rfd_ftl_tr
= {
811 .major
= RFD_FTL_MAJOR
,
812 .part_bits
= PART_BITS
,
813 .blksize
= SECTOR_SIZE
,
815 .readsect
= rfd_ftl_readsect
,
816 .writesect
= rfd_ftl_writesect
,
817 .discard
= rfd_ftl_discardsect
,
818 .getgeo
= rfd_ftl_getgeo
,
819 .add_mtd
= rfd_ftl_add_mtd
,
820 .remove_dev
= rfd_ftl_remove_dev
,
821 .owner
= THIS_MODULE
,
824 module_mtd_blktrans(rfd_ftl_tr
);
826 MODULE_LICENSE("GPL");
827 MODULE_AUTHOR("Sean Young <sean@mess.org>");
828 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
829 "used by General Software's Embedded BIOS");