sched: fix CONFIG_SCHED_DEBUG dependency of lockdep sysctls
[usb.git] / drivers / mtd / ftl.c
blobc815d0f38577281f3b033570d2ac1607a361e15f
1 /* This version ported to the Linux-MTD system by dwmw2@infradead.org
2 * $Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $
4 * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
7 * Based on:
8 */
9 /*======================================================================
11 A Flash Translation Layer memory card driver
13 This driver implements a disk-like block device driver with an
14 apparent block size of 512 bytes for flash memory cards.
16 ftl_cs.c 1.62 2000/02/01 00:59:04
18 The contents of this file are subject to the Mozilla Public
19 License Version 1.1 (the "License"); you may not use this file
20 except in compliance with the License. You may obtain a copy of
21 the License at http://www.mozilla.org/MPL/
23 Software distributed under the License is distributed on an "AS
24 IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
25 implied. See the License for the specific language governing
26 rights and limitations under the License.
28 The initial developer of the original code is David A. Hinds
29 <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
30 are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
32 Alternatively, the contents of this file may be used under the
33 terms of the GNU General Public License version 2 (the "GPL"), in
34 which case the provisions of the GPL are applicable instead of the
35 above. If you wish to allow the use of your version of this file
36 only under the terms of the GPL and not to allow others to use
37 your version of this file under the MPL, indicate your decision
38 by deleting the provisions above and replace them with the notice
39 and other provisions required by the GPL. If you do not delete
40 the provisions above, a recipient may use your version of this
41 file under either the MPL or the GPL.
43 LEGAL NOTE: The FTL format is patented by M-Systems. They have
44 granted a license for its use with PCMCIA devices:
46 "M-Systems grants a royalty-free, non-exclusive license under
47 any presently existing M-Systems intellectual property rights
48 necessary for the design and development of FTL-compatible
49 drivers, file systems and utilities using the data formats with
50 PCMCIA PC Cards as described in the PCMCIA Flash Translation
51 Layer (FTL) Specification."
53 Use of the FTL format for non-PCMCIA applications may be an
54 infringement of these patents. For additional information,
55 contact M-Systems (http://www.m-sys.com) directly.
57 ======================================================================*/
58 #include <linux/mtd/blktrans.h>
59 #include <linux/module.h>
60 #include <linux/mtd/mtd.h>
61 /*#define PSYCHO_DEBUG */
63 #include <linux/kernel.h>
64 #include <linux/ptrace.h>
65 #include <linux/slab.h>
66 #include <linux/string.h>
67 #include <linux/timer.h>
68 #include <linux/major.h>
69 #include <linux/fs.h>
70 #include <linux/init.h>
71 #include <linux/hdreg.h>
72 #include <linux/vmalloc.h>
73 #include <linux/blkpg.h>
74 #include <asm/uaccess.h>
76 #include <linux/mtd/ftl.h>
78 /*====================================================================*/
80 /* Parameters that can be set with 'insmod' */
81 static int shuffle_freq = 50;
82 module_param(shuffle_freq, int, 0);
84 /*====================================================================*/
86 /* Major device # for FTL device */
87 #ifndef FTL_MAJOR
88 #define FTL_MAJOR 44
89 #endif
92 /*====================================================================*/
94 /* Maximum number of separate memory devices we'll allow */
95 #define MAX_DEV 4
97 /* Maximum number of regions per device */
98 #define MAX_REGION 4
100 /* Maximum number of partitions in an FTL region */
101 #define PART_BITS 4
103 /* Maximum number of outstanding erase requests per socket */
104 #define MAX_ERASE 8
106 /* Sector size -- shouldn't need to change */
107 #define SECTOR_SIZE 512
110 /* Each memory region corresponds to a minor device */
111 typedef struct partition_t {
112 struct mtd_blktrans_dev mbd;
113 u_int32_t state;
114 u_int32_t *VirtualBlockMap;
115 u_int32_t *VirtualPageMap;
116 u_int32_t FreeTotal;
117 struct eun_info_t {
118 u_int32_t Offset;
119 u_int32_t EraseCount;
120 u_int32_t Free;
121 u_int32_t Deleted;
122 } *EUNInfo;
123 struct xfer_info_t {
124 u_int32_t Offset;
125 u_int32_t EraseCount;
126 u_int16_t state;
127 } *XferInfo;
128 u_int16_t bam_index;
129 u_int32_t *bam_cache;
130 u_int16_t DataUnits;
131 u_int32_t BlocksPerUnit;
132 erase_unit_header_t header;
133 #if 0
134 region_info_t region;
135 memory_handle_t handle;
136 #endif
137 } partition_t;
139 void ftl_freepart(partition_t *part);
141 /* Partition state flags */
142 #define FTL_FORMATTED 0x01
144 /* Transfer unit states */
145 #define XFER_UNKNOWN 0x00
146 #define XFER_ERASING 0x01
147 #define XFER_ERASED 0x02
148 #define XFER_PREPARED 0x03
149 #define XFER_FAILED 0x04
151 /*====================================================================*/
154 static void ftl_erase_callback(struct erase_info *done);
157 /*======================================================================
159 Scan_header() checks to see if a memory region contains an FTL
160 partition. build_maps() reads all the erase unit headers, builds
161 the erase unit map, and then builds the virtual page map.
163 ======================================================================*/
165 static int scan_header(partition_t *part)
167 erase_unit_header_t header;
168 loff_t offset, max_offset;
169 size_t ret;
170 int err;
171 part->header.FormattedSize = 0;
172 max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
173 /* Search first megabyte for a valid FTL header */
174 for (offset = 0;
175 (offset + sizeof(header)) < max_offset;
176 offset += part->mbd.mtd->erasesize ? : 0x2000) {
178 err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
179 (unsigned char *)&header);
181 if (err)
182 return err;
184 if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
187 if (offset == max_offset) {
188 printk(KERN_NOTICE "ftl_cs: FTL header not found.\n");
189 return -ENOENT;
191 if (header.BlockSize != 9 ||
192 (header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||
193 (header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {
194 printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n");
195 return -1;
197 if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {
198 printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n",
199 1 << header.EraseUnitSize,part->mbd.mtd->erasesize);
200 return -1;
202 part->header = header;
203 return 0;
206 static int build_maps(partition_t *part)
208 erase_unit_header_t header;
209 u_int16_t xvalid, xtrans, i;
210 u_int blocks, j;
211 int hdr_ok, ret = -1;
212 ssize_t retval;
213 loff_t offset;
215 /* Set up erase unit maps */
216 part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -
217 part->header.NumTransferUnits;
218 part->EUNInfo = kmalloc(part->DataUnits * sizeof(struct eun_info_t),
219 GFP_KERNEL);
220 if (!part->EUNInfo)
221 goto out;
222 for (i = 0; i < part->DataUnits; i++)
223 part->EUNInfo[i].Offset = 0xffffffff;
224 part->XferInfo =
225 kmalloc(part->header.NumTransferUnits * sizeof(struct xfer_info_t),
226 GFP_KERNEL);
227 if (!part->XferInfo)
228 goto out_EUNInfo;
230 xvalid = xtrans = 0;
231 for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
232 offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
233 << part->header.EraseUnitSize);
234 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
235 (unsigned char *)&header);
237 if (ret)
238 goto out_XferInfo;
240 ret = -1;
241 /* Is this a transfer partition? */
242 hdr_ok = (strcmp(header.DataOrgTuple+3, "FTL100") == 0);
243 if (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) &&
244 (part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) {
245 part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset;
246 part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount =
247 le32_to_cpu(header.EraseCount);
248 xvalid++;
249 } else {
250 if (xtrans == part->header.NumTransferUnits) {
251 printk(KERN_NOTICE "ftl_cs: format error: too many "
252 "transfer units!\n");
253 goto out_XferInfo;
255 if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) {
256 part->XferInfo[xtrans].state = XFER_PREPARED;
257 part->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount);
258 } else {
259 part->XferInfo[xtrans].state = XFER_UNKNOWN;
260 /* Pick anything reasonable for the erase count */
261 part->XferInfo[xtrans].EraseCount =
262 le32_to_cpu(part->header.EraseCount);
264 part->XferInfo[xtrans].Offset = offset;
265 xtrans++;
268 /* Check for format trouble */
269 header = part->header;
270 if ((xtrans != header.NumTransferUnits) ||
271 (xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) {
272 printk(KERN_NOTICE "ftl_cs: format error: erase units "
273 "don't add up!\n");
274 goto out_XferInfo;
277 /* Set up virtual page map */
278 blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
279 part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t));
280 if (!part->VirtualBlockMap)
281 goto out_XferInfo;
283 memset(part->VirtualBlockMap, 0xff, blocks * sizeof(u_int32_t));
284 part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
286 part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(u_int32_t),
287 GFP_KERNEL);
288 if (!part->bam_cache)
289 goto out_VirtualBlockMap;
291 part->bam_index = 0xffff;
292 part->FreeTotal = 0;
294 for (i = 0; i < part->DataUnits; i++) {
295 part->EUNInfo[i].Free = 0;
296 part->EUNInfo[i].Deleted = 0;
297 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
299 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
300 part->BlocksPerUnit * sizeof(u_int32_t), &retval,
301 (unsigned char *)part->bam_cache);
303 if (ret)
304 goto out_bam_cache;
306 for (j = 0; j < part->BlocksPerUnit; j++) {
307 if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) {
308 part->EUNInfo[i].Free++;
309 part->FreeTotal++;
310 } else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) &&
311 (BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks))
312 part->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] =
313 (i << header.EraseUnitSize) + (j << header.BlockSize);
314 else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j])))
315 part->EUNInfo[i].Deleted++;
319 ret = 0;
320 goto out;
322 out_bam_cache:
323 kfree(part->bam_cache);
324 out_VirtualBlockMap:
325 vfree(part->VirtualBlockMap);
326 out_XferInfo:
327 kfree(part->XferInfo);
328 out_EUNInfo:
329 kfree(part->EUNInfo);
330 out:
331 return ret;
332 } /* build_maps */
334 /*======================================================================
336 Erase_xfer() schedules an asynchronous erase operation for a
337 transfer unit.
339 ======================================================================*/
341 static int erase_xfer(partition_t *part,
342 u_int16_t xfernum)
344 int ret;
345 struct xfer_info_t *xfer;
346 struct erase_info *erase;
348 xfer = &part->XferInfo[xfernum];
349 DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
350 xfer->state = XFER_ERASING;
352 /* Is there a free erase slot? Always in MTD. */
355 erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
356 if (!erase)
357 return -ENOMEM;
359 erase->mtd = part->mbd.mtd;
360 erase->callback = ftl_erase_callback;
361 erase->addr = xfer->Offset;
362 erase->len = 1 << part->header.EraseUnitSize;
363 erase->priv = (u_long)part;
365 ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
367 if (!ret)
368 xfer->EraseCount++;
369 else
370 kfree(erase);
372 return ret;
373 } /* erase_xfer */
375 /*======================================================================
377 Prepare_xfer() takes a freshly erased transfer unit and gives
378 it an appropriate header.
380 ======================================================================*/
382 static void ftl_erase_callback(struct erase_info *erase)
384 partition_t *part;
385 struct xfer_info_t *xfer;
386 int i;
388 /* Look up the transfer unit */
389 part = (partition_t *)(erase->priv);
391 for (i = 0; i < part->header.NumTransferUnits; i++)
392 if (part->XferInfo[i].Offset == erase->addr) break;
394 if (i == part->header.NumTransferUnits) {
395 printk(KERN_NOTICE "ftl_cs: internal error: "
396 "erase lookup failed!\n");
397 return;
400 xfer = &part->XferInfo[i];
401 if (erase->state == MTD_ERASE_DONE)
402 xfer->state = XFER_ERASED;
403 else {
404 xfer->state = XFER_FAILED;
405 printk(KERN_NOTICE "ftl_cs: erase failed: state = %d\n",
406 erase->state);
409 kfree(erase);
411 } /* ftl_erase_callback */
413 static int prepare_xfer(partition_t *part, int i)
415 erase_unit_header_t header;
416 struct xfer_info_t *xfer;
417 int nbam, ret;
418 u_int32_t ctl;
419 ssize_t retlen;
420 loff_t offset;
422 xfer = &part->XferInfo[i];
423 xfer->state = XFER_FAILED;
425 DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
427 /* Write the transfer unit header */
428 header = part->header;
429 header.LogicalEUN = cpu_to_le16(0xffff);
430 header.EraseCount = cpu_to_le32(xfer->EraseCount);
432 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
433 &retlen, (u_char *)&header);
435 if (ret) {
436 return ret;
439 /* Write the BAM stub */
440 nbam = (part->BlocksPerUnit * sizeof(u_int32_t) +
441 le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
443 offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
444 ctl = cpu_to_le32(BLOCK_CONTROL);
446 for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) {
448 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
449 &retlen, (u_char *)&ctl);
451 if (ret)
452 return ret;
454 xfer->state = XFER_PREPARED;
455 return 0;
457 } /* prepare_xfer */
459 /*======================================================================
461 Copy_erase_unit() takes a full erase block and a transfer unit,
462 copies everything to the transfer unit, then swaps the block
463 pointers.
465 All data blocks are copied to the corresponding blocks in the
466 target unit, so the virtual block map does not need to be
467 updated.
469 ======================================================================*/
471 static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
472 u_int16_t xferunit)
474 u_char buf[SECTOR_SIZE];
475 struct eun_info_t *eun;
476 struct xfer_info_t *xfer;
477 u_int32_t src, dest, free, i;
478 u_int16_t unit;
479 int ret;
480 ssize_t retlen;
481 loff_t offset;
482 u_int16_t srcunitswap = cpu_to_le16(srcunit);
484 eun = &part->EUNInfo[srcunit];
485 xfer = &part->XferInfo[xferunit];
486 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
487 eun->Offset, xfer->Offset);
490 /* Read current BAM */
491 if (part->bam_index != srcunit) {
493 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
495 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
496 part->BlocksPerUnit * sizeof(u_int32_t),
497 &retlen, (u_char *) (part->bam_cache));
499 /* mark the cache bad, in case we get an error later */
500 part->bam_index = 0xffff;
502 if (ret) {
503 printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
504 return ret;
508 /* Write the LogicalEUN for the transfer unit */
509 xfer->state = XFER_UNKNOWN;
510 offset = xfer->Offset + 20; /* Bad! */
511 unit = cpu_to_le16(0x7fff);
513 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t),
514 &retlen, (u_char *) &unit);
516 if (ret) {
517 printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
518 return ret;
521 /* Copy all data blocks from source unit to transfer unit */
522 src = eun->Offset; dest = xfer->Offset;
524 free = 0;
525 ret = 0;
526 for (i = 0; i < part->BlocksPerUnit; i++) {
527 switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) {
528 case BLOCK_CONTROL:
529 /* This gets updated later */
530 break;
531 case BLOCK_DATA:
532 case BLOCK_REPLACEMENT:
533 ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
534 &retlen, (u_char *) buf);
535 if (ret) {
536 printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
537 return ret;
541 ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
542 &retlen, (u_char *) buf);
543 if (ret) {
544 printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
545 return ret;
548 break;
549 default:
550 /* All other blocks must be free */
551 part->bam_cache[i] = cpu_to_le32(0xffffffff);
552 free++;
553 break;
555 src += SECTOR_SIZE;
556 dest += SECTOR_SIZE;
559 /* Write the BAM to the transfer unit */
560 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
561 part->BlocksPerUnit * sizeof(int32_t), &retlen,
562 (u_char *)part->bam_cache);
563 if (ret) {
564 printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
565 return ret;
569 /* All clear? Then update the LogicalEUN again */
570 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t),
571 &retlen, (u_char *)&srcunitswap);
573 if (ret) {
574 printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
575 return ret;
579 /* Update the maps and usage stats*/
580 i = xfer->EraseCount;
581 xfer->EraseCount = eun->EraseCount;
582 eun->EraseCount = i;
583 i = xfer->Offset;
584 xfer->Offset = eun->Offset;
585 eun->Offset = i;
586 part->FreeTotal -= eun->Free;
587 part->FreeTotal += free;
588 eun->Free = free;
589 eun->Deleted = 0;
591 /* Now, the cache should be valid for the new block */
592 part->bam_index = srcunit;
594 return 0;
595 } /* copy_erase_unit */
597 /*======================================================================
599 reclaim_block() picks a full erase unit and a transfer unit and
600 then calls copy_erase_unit() to copy one to the other. Then, it
601 schedules an erase on the expired block.
603 What's a good way to decide which transfer unit and which erase
604 unit to use? Beats me. My way is to always pick the transfer
605 unit with the fewest erases, and usually pick the data unit with
606 the most deleted blocks. But with a small probability, pick the
607 oldest data unit instead. This means that we generally postpone
608 the next reclaimation as long as possible, but shuffle static
609 stuff around a bit for wear leveling.
611 ======================================================================*/
613 static int reclaim_block(partition_t *part)
615 u_int16_t i, eun, xfer;
616 u_int32_t best;
617 int queued, ret;
619 DEBUG(0, "ftl_cs: reclaiming space...\n");
620 DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits);
621 /* Pick the least erased transfer unit */
622 best = 0xffffffff; xfer = 0xffff;
623 do {
624 queued = 0;
625 for (i = 0; i < part->header.NumTransferUnits; i++) {
626 int n=0;
627 if (part->XferInfo[i].state == XFER_UNKNOWN) {
628 DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i);
629 n=1;
630 erase_xfer(part, i);
632 if (part->XferInfo[i].state == XFER_ERASING) {
633 DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i);
634 n=1;
635 queued = 1;
637 else if (part->XferInfo[i].state == XFER_ERASED) {
638 DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i);
639 n=1;
640 prepare_xfer(part, i);
642 if (part->XferInfo[i].state == XFER_PREPARED) {
643 DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i);
644 n=1;
645 if (part->XferInfo[i].EraseCount <= best) {
646 best = part->XferInfo[i].EraseCount;
647 xfer = i;
650 if (!n)
651 DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
654 if (xfer == 0xffff) {
655 if (queued) {
656 DEBUG(1, "ftl_cs: waiting for transfer "
657 "unit to be prepared...\n");
658 if (part->mbd.mtd->sync)
659 part->mbd.mtd->sync(part->mbd.mtd);
660 } else {
661 static int ne = 0;
662 if (++ne < 5)
663 printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
664 "suitable transfer units!\n");
665 else
666 DEBUG(1, "ftl_cs: reclaim failed: no "
667 "suitable transfer units!\n");
669 return -EIO;
672 } while (xfer == 0xffff);
674 eun = 0;
675 if ((jiffies % shuffle_freq) == 0) {
676 DEBUG(1, "ftl_cs: recycling freshest block...\n");
677 best = 0xffffffff;
678 for (i = 0; i < part->DataUnits; i++)
679 if (part->EUNInfo[i].EraseCount <= best) {
680 best = part->EUNInfo[i].EraseCount;
681 eun = i;
683 } else {
684 best = 0;
685 for (i = 0; i < part->DataUnits; i++)
686 if (part->EUNInfo[i].Deleted >= best) {
687 best = part->EUNInfo[i].Deleted;
688 eun = i;
690 if (best == 0) {
691 static int ne = 0;
692 if (++ne < 5)
693 printk(KERN_NOTICE "ftl_cs: reclaim failed: "
694 "no free blocks!\n");
695 else
696 DEBUG(1,"ftl_cs: reclaim failed: "
697 "no free blocks!\n");
699 return -EIO;
702 ret = copy_erase_unit(part, eun, xfer);
703 if (!ret)
704 erase_xfer(part, xfer);
705 else
706 printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n");
707 return ret;
708 } /* reclaim_block */
710 /*======================================================================
712 Find_free() searches for a free block. If necessary, it updates
713 the BAM cache for the erase unit containing the free block. It
714 returns the block index -- the erase unit is just the currently
715 cached unit. If there are no free blocks, it returns 0 -- this
716 is never a valid data block because it contains the header.
718 ======================================================================*/
720 #ifdef PSYCHO_DEBUG
721 static void dump_lists(partition_t *part)
723 int i;
724 printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal);
725 for (i = 0; i < part->DataUnits; i++)
726 printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, "
727 "%d deleted\n", i,
728 part->EUNInfo[i].Offset >> part->header.EraseUnitSize,
729 part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);
731 #endif
733 static u_int32_t find_free(partition_t *part)
735 u_int16_t stop, eun;
736 u_int32_t blk;
737 size_t retlen;
738 int ret;
740 /* Find an erase unit with some free space */
741 stop = (part->bam_index == 0xffff) ? 0 : part->bam_index;
742 eun = stop;
743 do {
744 if (part->EUNInfo[eun].Free != 0) break;
745 /* Wrap around at end of table */
746 if (++eun == part->DataUnits) eun = 0;
747 } while (eun != stop);
749 if (part->EUNInfo[eun].Free == 0)
750 return 0;
752 /* Is this unit's BAM cached? */
753 if (eun != part->bam_index) {
754 /* Invalidate cache */
755 part->bam_index = 0xffff;
757 ret = part->mbd.mtd->read(part->mbd.mtd,
758 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
759 part->BlocksPerUnit * sizeof(u_int32_t),
760 &retlen, (u_char *) (part->bam_cache));
762 if (ret) {
763 printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
764 return 0;
766 part->bam_index = eun;
769 /* Find a free block */
770 for (blk = 0; blk < part->BlocksPerUnit; blk++)
771 if (BLOCK_FREE(le32_to_cpu(part->bam_cache[blk]))) break;
772 if (blk == part->BlocksPerUnit) {
773 #ifdef PSYCHO_DEBUG
774 static int ne = 0;
775 if (++ne == 1)
776 dump_lists(part);
777 #endif
778 printk(KERN_NOTICE "ftl_cs: bad free list!\n");
779 return 0;
781 DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun);
782 return blk;
784 } /* find_free */
787 /*======================================================================
789 Read a series of sectors from an FTL partition.
791 ======================================================================*/
793 static int ftl_read(partition_t *part, caddr_t buffer,
794 u_long sector, u_long nblocks)
796 u_int32_t log_addr, bsize;
797 u_long i;
798 int ret;
799 size_t offset, retlen;
801 DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
802 part, sector, nblocks);
803 if (!(part->state & FTL_FORMATTED)) {
804 printk(KERN_NOTICE "ftl_cs: bad partition\n");
805 return -EIO;
807 bsize = 1 << part->header.EraseUnitSize;
809 for (i = 0; i < nblocks; i++) {
810 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
811 printk(KERN_NOTICE "ftl_cs: bad read offset\n");
812 return -EIO;
814 log_addr = part->VirtualBlockMap[sector+i];
815 if (log_addr == 0xffffffff)
816 memset(buffer, 0, SECTOR_SIZE);
817 else {
818 offset = (part->EUNInfo[log_addr / bsize].Offset
819 + (log_addr % bsize));
820 ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
821 &retlen, (u_char *) buffer);
823 if (ret) {
824 printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
825 return ret;
828 buffer += SECTOR_SIZE;
830 return 0;
831 } /* ftl_read */
833 /*======================================================================
835 Write a series of sectors to an FTL partition
837 ======================================================================*/
839 static int set_bam_entry(partition_t *part, u_int32_t log_addr,
840 u_int32_t virt_addr)
842 u_int32_t bsize, blk, le_virt_addr;
843 #ifdef PSYCHO_DEBUG
844 u_int32_t old_addr;
845 #endif
846 u_int16_t eun;
847 int ret;
848 size_t retlen, offset;
850 DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
851 part, log_addr, virt_addr);
852 bsize = 1 << part->header.EraseUnitSize;
853 eun = log_addr / bsize;
854 blk = (log_addr % bsize) / SECTOR_SIZE;
855 offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) +
856 le32_to_cpu(part->header.BAMOffset));
858 #ifdef PSYCHO_DEBUG
859 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t),
860 &retlen, (u_char *)&old_addr);
861 if (ret) {
862 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
863 return ret;
865 old_addr = le32_to_cpu(old_addr);
867 if (((virt_addr == 0xfffffffe) && !BLOCK_FREE(old_addr)) ||
868 ((virt_addr == 0) && (BLOCK_TYPE(old_addr) != BLOCK_DATA)) ||
869 (!BLOCK_DELETED(virt_addr) && (old_addr != 0xfffffffe))) {
870 static int ne = 0;
871 if (++ne < 5) {
872 printk(KERN_NOTICE "ftl_cs: set_bam_entry() inconsistency!\n");
873 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, old = 0x%x"
874 ", new = 0x%x\n", log_addr, old_addr, virt_addr);
876 return -EIO;
878 #endif
879 le_virt_addr = cpu_to_le32(virt_addr);
880 if (part->bam_index == eun) {
881 #ifdef PSYCHO_DEBUG
882 if (le32_to_cpu(part->bam_cache[blk]) != old_addr) {
883 static int ne = 0;
884 if (++ne < 5) {
885 printk(KERN_NOTICE "ftl_cs: set_bam_entry() "
886 "inconsistency!\n");
887 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, cache"
888 " = 0x%x\n",
889 le32_to_cpu(part->bam_cache[blk]), old_addr);
891 return -EIO;
893 #endif
894 part->bam_cache[blk] = le_virt_addr;
896 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
897 &retlen, (u_char *)&le_virt_addr);
899 if (ret) {
900 printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
901 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, new = 0x%x\n",
902 log_addr, virt_addr);
904 return ret;
905 } /* set_bam_entry */
907 static int ftl_write(partition_t *part, caddr_t buffer,
908 u_long sector, u_long nblocks)
910 u_int32_t bsize, log_addr, virt_addr, old_addr, blk;
911 u_long i;
912 int ret;
913 size_t retlen, offset;
915 DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
916 part, sector, nblocks);
917 if (!(part->state & FTL_FORMATTED)) {
918 printk(KERN_NOTICE "ftl_cs: bad partition\n");
919 return -EIO;
921 /* See if we need to reclaim space, before we start */
922 while (part->FreeTotal < nblocks) {
923 ret = reclaim_block(part);
924 if (ret)
925 return ret;
928 bsize = 1 << part->header.EraseUnitSize;
930 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
931 for (i = 0; i < nblocks; i++) {
932 if (virt_addr >= le32_to_cpu(part->header.FormattedSize)) {
933 printk(KERN_NOTICE "ftl_cs: bad write offset\n");
934 return -EIO;
937 /* Grab a free block */
938 blk = find_free(part);
939 if (blk == 0) {
940 static int ne = 0;
941 if (++ne < 5)
942 printk(KERN_NOTICE "ftl_cs: internal error: "
943 "no free blocks!\n");
944 return -ENOSPC;
947 /* Tag the BAM entry, and write the new block */
948 log_addr = part->bam_index * bsize + blk * SECTOR_SIZE;
949 part->EUNInfo[part->bam_index].Free--;
950 part->FreeTotal--;
951 if (set_bam_entry(part, log_addr, 0xfffffffe))
952 return -EIO;
953 part->EUNInfo[part->bam_index].Deleted++;
954 offset = (part->EUNInfo[part->bam_index].Offset +
955 blk * SECTOR_SIZE);
956 ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
957 buffer);
959 if (ret) {
960 printk(KERN_NOTICE "ftl_cs: block write failed!\n");
961 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
962 " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
963 offset);
964 return -EIO;
967 /* Only delete the old entry when the new entry is ready */
968 old_addr = part->VirtualBlockMap[sector+i];
969 if (old_addr != 0xffffffff) {
970 part->VirtualBlockMap[sector+i] = 0xffffffff;
971 part->EUNInfo[old_addr/bsize].Deleted++;
972 if (set_bam_entry(part, old_addr, 0))
973 return -EIO;
976 /* Finally, set up the new pointers */
977 if (set_bam_entry(part, log_addr, virt_addr))
978 return -EIO;
979 part->VirtualBlockMap[sector+i] = log_addr;
980 part->EUNInfo[part->bam_index].Deleted--;
982 buffer += SECTOR_SIZE;
983 virt_addr += SECTOR_SIZE;
985 return 0;
986 } /* ftl_write */
988 static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
990 partition_t *part = (void *)dev;
991 u_long sect;
993 /* Sort of arbitrary: round size down to 4KiB boundary */
994 sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
996 geo->heads = 1;
997 geo->sectors = 8;
998 geo->cylinders = sect >> 3;
1000 return 0;
1003 static int ftl_readsect(struct mtd_blktrans_dev *dev,
1004 unsigned long block, char *buf)
1006 return ftl_read((void *)dev, buf, block, 1);
1009 static int ftl_writesect(struct mtd_blktrans_dev *dev,
1010 unsigned long block, char *buf)
1012 return ftl_write((void *)dev, buf, block, 1);
1015 /*====================================================================*/
1017 void ftl_freepart(partition_t *part)
1019 vfree(part->VirtualBlockMap);
1020 part->VirtualBlockMap = NULL;
1021 kfree(part->VirtualPageMap);
1022 part->VirtualPageMap = NULL;
1023 kfree(part->EUNInfo);
1024 part->EUNInfo = NULL;
1025 kfree(part->XferInfo);
1026 part->XferInfo = NULL;
1027 kfree(part->bam_cache);
1028 part->bam_cache = NULL;
1029 } /* ftl_freepart */
1031 static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1033 partition_t *partition;
1035 partition = kzalloc(sizeof(partition_t), GFP_KERNEL);
1037 if (!partition) {
1038 printk(KERN_WARNING "No memory to scan for FTL on %s\n",
1039 mtd->name);
1040 return;
1043 partition->mbd.mtd = mtd;
1045 if ((scan_header(partition) == 0) &&
1046 (build_maps(partition) == 0)) {
1048 partition->state = FTL_FORMATTED;
1049 #ifdef PCMCIA_DEBUG
1050 printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n",
1051 le32_to_cpu(partition->header.FormattedSize) >> 10);
1052 #endif
1053 partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
1055 partition->mbd.tr = tr;
1056 partition->mbd.devnum = -1;
1057 if (!add_mtd_blktrans_dev((void *)partition))
1058 return;
1061 ftl_freepart(partition);
1062 kfree(partition);
1065 static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
1067 del_mtd_blktrans_dev(dev);
1068 ftl_freepart((partition_t *)dev);
1069 kfree(dev);
1072 struct mtd_blktrans_ops ftl_tr = {
1073 .name = "ftl",
1074 .major = FTL_MAJOR,
1075 .part_bits = PART_BITS,
1076 .blksize = SECTOR_SIZE,
1077 .readsect = ftl_readsect,
1078 .writesect = ftl_writesect,
1079 .getgeo = ftl_getgeo,
1080 .add_mtd = ftl_add_mtd,
1081 .remove_dev = ftl_remove_dev,
1082 .owner = THIS_MODULE,
1085 static int init_ftl(void)
1087 DEBUG(0, "$Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $\n");
1089 return register_mtd_blktrans(&ftl_tr);
1092 static void __exit cleanup_ftl(void)
1094 deregister_mtd_blktrans(&ftl_tr);
1097 module_init(init_ftl);
1098 module_exit(cleanup_ftl);
1101 MODULE_LICENSE("Dual MPL/GPL");
1102 MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
1103 MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");