2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
5 * (C) 2000 Red Hat. GPL'd
7 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
8 * - completely revamped method functions so they are aware and
9 * independent of the flash geometry (buswidth, interleave, etc.)
10 * - scalability vs code size is completely set at compile-time
11 * (see include/linux/mtd/cfi.h for selection)
12 * - optimized write buffer method
13 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
14 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
15 * (command set 0x0020)
16 * - added a writev function
17 * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de>
18 * - Plugged memory leak in cfi_staa_writev().
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
27 #include <asm/byteorder.h>
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/cfi.h>
35 #include <linux/mtd/mtd.h>
38 static int cfi_staa_read(struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
39 static int cfi_staa_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
40 static int cfi_staa_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
41 unsigned long count
, loff_t to
, size_t *retlen
);
42 static int cfi_staa_erase_varsize(struct mtd_info
*, struct erase_info
*);
43 static void cfi_staa_sync (struct mtd_info
*);
44 static int cfi_staa_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
45 static int cfi_staa_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
46 static int cfi_staa_suspend (struct mtd_info
*);
47 static void cfi_staa_resume (struct mtd_info
*);
49 static void cfi_staa_destroy(struct mtd_info
*);
51 struct mtd_info
*cfi_cmdset_0020(struct map_info
*, int);
53 static struct mtd_info
*cfi_staa_setup (struct map_info
*);
55 static struct mtd_chip_driver cfi_staa_chipdrv
= {
56 .probe
= NULL
, /* Not usable directly */
57 .destroy
= cfi_staa_destroy
,
58 .name
= "cfi_cmdset_0020",
62 /* #define DEBUG_LOCK_BITS */
63 //#define DEBUG_CFI_FEATURES
65 #ifdef DEBUG_CFI_FEATURES
66 static void cfi_tell_features(struct cfi_pri_intelext
*extp
)
69 printk(" Feature/Command Support: %4.4X\n", extp
->FeatureSupport
);
70 printk(" - Chip Erase: %s\n", extp
->FeatureSupport
&1?"supported":"unsupported");
71 printk(" - Suspend Erase: %s\n", extp
->FeatureSupport
&2?"supported":"unsupported");
72 printk(" - Suspend Program: %s\n", extp
->FeatureSupport
&4?"supported":"unsupported");
73 printk(" - Legacy Lock/Unlock: %s\n", extp
->FeatureSupport
&8?"supported":"unsupported");
74 printk(" - Queued Erase: %s\n", extp
->FeatureSupport
&16?"supported":"unsupported");
75 printk(" - Instant block lock: %s\n", extp
->FeatureSupport
&32?"supported":"unsupported");
76 printk(" - Protection Bits: %s\n", extp
->FeatureSupport
&64?"supported":"unsupported");
77 printk(" - Page-mode read: %s\n", extp
->FeatureSupport
&128?"supported":"unsupported");
78 printk(" - Synchronous read: %s\n", extp
->FeatureSupport
&256?"supported":"unsupported");
79 for (i
=9; i
<32; i
++) {
80 if (extp
->FeatureSupport
& (1<<i
))
81 printk(" - Unknown Bit %X: supported\n", i
);
84 printk(" Supported functions after Suspend: %2.2X\n", extp
->SuspendCmdSupport
);
85 printk(" - Program after Erase Suspend: %s\n", extp
->SuspendCmdSupport
&1?"supported":"unsupported");
87 if (extp
->SuspendCmdSupport
& (1<<i
))
88 printk(" - Unknown Bit %X: supported\n", i
);
91 printk(" Block Status Register Mask: %4.4X\n", extp
->BlkStatusRegMask
);
92 printk(" - Lock Bit Active: %s\n", extp
->BlkStatusRegMask
&1?"yes":"no");
93 printk(" - Valid Bit Active: %s\n", extp
->BlkStatusRegMask
&2?"yes":"no");
94 for (i
=2; i
<16; i
++) {
95 if (extp
->BlkStatusRegMask
& (1<<i
))
96 printk(" - Unknown Bit %X Active: yes\n",i
);
99 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100 extp
->VccOptimal
>> 8, extp
->VccOptimal
& 0xf);
101 if (extp
->VppOptimal
)
102 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 extp
->VppOptimal
>> 8, extp
->VppOptimal
& 0xf);
107 /* This routine is made available to other mtd code via
108 * inter_module_register. It must only be accessed through
109 * inter_module_get which will bump the use count of this module. The
110 * addresses passed back in cfi are valid as long as the use count of
111 * this module is non-zero, i.e. between inter_module_get and
112 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
114 struct mtd_info
*cfi_cmdset_0020(struct map_info
*map
, int primary
)
116 struct cfi_private
*cfi
= map
->fldrv_priv
;
121 * It's a real CFI chip, not one for which the probe
122 * routine faked a CFI structure. So we read the feature
125 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
126 struct cfi_pri_intelext
*extp
;
128 extp
= (struct cfi_pri_intelext
*)cfi_read_pri(map
, adr
, sizeof(*extp
), "ST Microelectronics");
132 if (extp
->MajorVersion
!= '1' ||
133 (extp
->MinorVersion
< '0' || extp
->MinorVersion
> '3')) {
134 printk(KERN_ERR
" Unknown ST Microelectronics"
135 " Extended Query version %c.%c.\n",
136 extp
->MajorVersion
, extp
->MinorVersion
);
141 /* Do some byteswapping if necessary */
142 extp
->FeatureSupport
= cfi32_to_cpu(extp
->FeatureSupport
);
143 extp
->BlkStatusRegMask
= cfi32_to_cpu(extp
->BlkStatusRegMask
);
145 #ifdef DEBUG_CFI_FEATURES
146 /* Tell the user about it in lots of lovely detail */
147 cfi_tell_features(extp
);
150 /* Install our own private info structure */
151 cfi
->cmdset_priv
= extp
;
154 for (i
=0; i
< cfi
->numchips
; i
++) {
155 cfi
->chips
[i
].word_write_time
= 128;
156 cfi
->chips
[i
].buffer_write_time
= 128;
157 cfi
->chips
[i
].erase_time
= 1024;
158 cfi
->chips
[i
].ref_point_counter
= 0;
159 init_waitqueue_head(&(cfi
->chips
[i
].wq
));
162 return cfi_staa_setup(map
);
164 EXPORT_SYMBOL_GPL(cfi_cmdset_0020
);
166 static struct mtd_info
*cfi_staa_setup(struct map_info
*map
)
168 struct cfi_private
*cfi
= map
->fldrv_priv
;
169 struct mtd_info
*mtd
;
170 unsigned long offset
= 0;
172 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
174 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
175 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
178 printk(KERN_ERR
"Failed to allocate memory for MTD device\n");
179 kfree(cfi
->cmdset_priv
);
184 mtd
->type
= MTD_NORFLASH
;
185 mtd
->size
= devsize
* cfi
->numchips
;
187 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
188 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
189 * mtd
->numeraseregions
, GFP_KERNEL
);
190 if (!mtd
->eraseregions
) {
191 printk(KERN_ERR
"Failed to allocate memory for MTD erase region info\n");
192 kfree(cfi
->cmdset_priv
);
197 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
198 unsigned long ernum
, ersize
;
199 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
200 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
202 if (mtd
->erasesize
< ersize
) {
203 mtd
->erasesize
= ersize
;
205 for (j
=0; j
<cfi
->numchips
; j
++) {
206 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
207 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
208 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
210 offset
+= (ersize
* ernum
);
213 if (offset
!= devsize
) {
215 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
216 kfree(mtd
->eraseregions
);
217 kfree(cfi
->cmdset_priv
);
222 for (i
=0; i
<mtd
->numeraseregions
;i
++){
223 printk(KERN_DEBUG
"%d: offset=0x%llx,size=0x%x,blocks=%d\n",
224 i
, (unsigned long long)mtd
->eraseregions
[i
].offset
,
225 mtd
->eraseregions
[i
].erasesize
,
226 mtd
->eraseregions
[i
].numblocks
);
229 /* Also select the correct geometry setup too */
230 mtd
->erase
= cfi_staa_erase_varsize
;
231 mtd
->read
= cfi_staa_read
;
232 mtd
->write
= cfi_staa_write_buffers
;
233 mtd
->writev
= cfi_staa_writev
;
234 mtd
->sync
= cfi_staa_sync
;
235 mtd
->lock
= cfi_staa_lock
;
236 mtd
->unlock
= cfi_staa_unlock
;
237 mtd
->suspend
= cfi_staa_suspend
;
238 mtd
->resume
= cfi_staa_resume
;
239 mtd
->flags
= MTD_CAP_NORFLASH
& ~MTD_BIT_WRITEABLE
;
240 mtd
->writesize
= 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
241 map
->fldrv
= &cfi_staa_chipdrv
;
242 __module_get(THIS_MODULE
);
243 mtd
->name
= map
->name
;
248 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
250 map_word status
, status_OK
;
252 DECLARE_WAITQUEUE(wait
, current
);
254 unsigned long cmd_addr
;
255 struct cfi_private
*cfi
= map
->fldrv_priv
;
259 /* Ensure cmd read/writes are aligned. */
260 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
262 /* Let's determine this according to the interleave only once */
263 status_OK
= CMD(0x80);
265 timeo
= jiffies
+ HZ
;
267 mutex_lock(&chip
->mutex
);
269 /* Check that the chip's ready to talk to us.
270 * If it's in FL_ERASING state, suspend it and make it talk now.
272 switch (chip
->state
) {
274 if (!(((struct cfi_pri_intelext
*)cfi
->cmdset_priv
)->FeatureSupport
& 2))
275 goto sleep
; /* We don't support erase suspend */
277 map_write (map
, CMD(0xb0), cmd_addr
);
278 /* If the flash has finished erasing, then 'erase suspend'
279 * appears to make some (28F320) flash devices switch to
280 * 'read' mode. Make sure that we switch to 'read status'
281 * mode so we get the right data. --rmk
283 map_write(map
, CMD(0x70), cmd_addr
);
284 chip
->oldstate
= FL_ERASING
;
285 chip
->state
= FL_ERASE_SUSPENDING
;
286 // printk("Erase suspending at 0x%lx\n", cmd_addr);
288 status
= map_read(map
, cmd_addr
);
289 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
292 if (time_after(jiffies
, timeo
)) {
294 map_write(map
, CMD(0xd0), cmd_addr
);
295 /* make sure we're in 'read status' mode */
296 map_write(map
, CMD(0x70), cmd_addr
);
297 chip
->state
= FL_ERASING
;
298 mutex_unlock(&chip
->mutex
);
299 printk(KERN_ERR
"Chip not ready after erase "
300 "suspended: status = 0x%lx\n", status
.x
[0]);
304 mutex_unlock(&chip
->mutex
);
306 mutex_lock(&chip
->mutex
);
310 map_write(map
, CMD(0xff), cmd_addr
);
311 chip
->state
= FL_READY
;
324 map_write(map
, CMD(0x70), cmd_addr
);
325 chip
->state
= FL_STATUS
;
328 status
= map_read(map
, cmd_addr
);
329 if (map_word_andequal(map
, status
, status_OK
, status_OK
)) {
330 map_write(map
, CMD(0xff), cmd_addr
);
331 chip
->state
= FL_READY
;
335 /* Urgh. Chip not yet ready to talk to us. */
336 if (time_after(jiffies
, timeo
)) {
337 mutex_unlock(&chip
->mutex
);
338 printk(KERN_ERR
"waiting for chip to be ready timed out in read. WSM status = %lx\n", status
.x
[0]);
342 /* Latency issues. Drop the lock, wait a while and retry */
343 mutex_unlock(&chip
->mutex
);
349 /* Stick ourselves on a wait queue to be woken when
350 someone changes the status */
351 set_current_state(TASK_UNINTERRUPTIBLE
);
352 add_wait_queue(&chip
->wq
, &wait
);
353 mutex_unlock(&chip
->mutex
);
355 remove_wait_queue(&chip
->wq
, &wait
);
356 timeo
= jiffies
+ HZ
;
360 map_copy_from(map
, buf
, adr
, len
);
363 chip
->state
= chip
->oldstate
;
364 /* What if one interleaved chip has finished and the
365 other hasn't? The old code would leave the finished
366 one in READY mode. That's bad, and caused -EROFS
367 errors to be returned from do_erase_oneblock because
368 that's the only bit it checked for at the time.
369 As the state machine appears to explicitly allow
370 sending the 0x70 (Read Status) command to an erasing
371 chip and expecting it to be ignored, that's what we
373 map_write(map
, CMD(0xd0), cmd_addr
);
374 map_write(map
, CMD(0x70), cmd_addr
);
378 mutex_unlock(&chip
->mutex
);
382 static int cfi_staa_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
384 struct map_info
*map
= mtd
->priv
;
385 struct cfi_private
*cfi
= map
->fldrv_priv
;
390 /* ofs: offset within the first chip that the first read should start */
391 chipnum
= (from
>> cfi
->chipshift
);
392 ofs
= from
- (chipnum
<< cfi
->chipshift
);
397 unsigned long thislen
;
399 if (chipnum
>= cfi
->numchips
)
402 if ((len
+ ofs
-1) >> cfi
->chipshift
)
403 thislen
= (1<<cfi
->chipshift
) - ofs
;
407 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
421 static inline int do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
422 unsigned long adr
, const u_char
*buf
, int len
)
424 struct cfi_private
*cfi
= map
->fldrv_priv
;
425 map_word status
, status_OK
;
426 unsigned long cmd_adr
, timeo
;
427 DECLARE_WAITQUEUE(wait
, current
);
430 /* M58LW064A requires bus alignment for buffer wriets -- saw */
431 if (adr
& (map_bankwidth(map
)-1))
434 wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
436 cmd_adr
= adr
& ~(wbufsize
-1);
438 /* Let's determine this according to the interleave only once */
439 status_OK
= CMD(0x80);
441 timeo
= jiffies
+ HZ
;
444 #ifdef DEBUG_CFI_FEATURES
445 printk("%s: chip->state[%d]\n", __func__
, chip
->state
);
447 mutex_lock(&chip
->mutex
);
449 /* Check that the chip's ready to talk to us.
450 * Later, we can actually think about interrupting it
451 * if it's in FL_ERASING state.
452 * Not just yet, though.
454 switch (chip
->state
) {
460 map_write(map
, CMD(0x70), cmd_adr
);
461 chip
->state
= FL_STATUS
;
462 #ifdef DEBUG_CFI_FEATURES
463 printk("%s: 1 status[%x]\n", __func__
, map_read(map
, cmd_adr
));
467 status
= map_read(map
, cmd_adr
);
468 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
470 /* Urgh. Chip not yet ready to talk to us. */
471 if (time_after(jiffies
, timeo
)) {
472 mutex_unlock(&chip
->mutex
);
473 printk(KERN_ERR
"waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
474 status
.x
[0], map_read(map
, cmd_adr
).x
[0]);
478 /* Latency issues. Drop the lock, wait a while and retry */
479 mutex_unlock(&chip
->mutex
);
484 /* Stick ourselves on a wait queue to be woken when
485 someone changes the status */
486 set_current_state(TASK_UNINTERRUPTIBLE
);
487 add_wait_queue(&chip
->wq
, &wait
);
488 mutex_unlock(&chip
->mutex
);
490 remove_wait_queue(&chip
->wq
, &wait
);
491 timeo
= jiffies
+ HZ
;
496 map_write(map
, CMD(0xe8), cmd_adr
);
497 chip
->state
= FL_WRITING_TO_BUFFER
;
501 status
= map_read(map
, cmd_adr
);
502 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
505 mutex_unlock(&chip
->mutex
);
507 mutex_lock(&chip
->mutex
);
510 /* Argh. Not ready for write to buffer */
512 map_write(map
, CMD(0x70), cmd_adr
);
513 chip
->state
= FL_STATUS
;
514 mutex_unlock(&chip
->mutex
);
515 printk(KERN_ERR
"Chip not ready for buffer write. Xstatus = %lx\n", status
.x
[0]);
520 /* Write length of data to come */
521 map_write(map
, CMD(len
/map_bankwidth(map
)-1), cmd_adr
);
525 z
+= map_bankwidth(map
), buf
+= map_bankwidth(map
)) {
527 d
= map_word_load(map
, buf
);
528 map_write(map
, d
, adr
+z
);
531 map_write(map
, CMD(0xd0), cmd_adr
);
532 chip
->state
= FL_WRITING
;
534 mutex_unlock(&chip
->mutex
);
535 cfi_udelay(chip
->buffer_write_time
);
536 mutex_lock(&chip
->mutex
);
538 timeo
= jiffies
+ (HZ
/2);
541 if (chip
->state
!= FL_WRITING
) {
542 /* Someone's suspended the write. Sleep */
543 set_current_state(TASK_UNINTERRUPTIBLE
);
544 add_wait_queue(&chip
->wq
, &wait
);
545 mutex_unlock(&chip
->mutex
);
547 remove_wait_queue(&chip
->wq
, &wait
);
548 timeo
= jiffies
+ (HZ
/ 2); /* FIXME */
549 mutex_lock(&chip
->mutex
);
553 status
= map_read(map
, cmd_adr
);
554 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
557 /* OK Still waiting */
558 if (time_after(jiffies
, timeo
)) {
560 map_write(map
, CMD(0x50), cmd_adr
);
561 /* put back into read status register mode */
562 map_write(map
, CMD(0x70), adr
);
563 chip
->state
= FL_STATUS
;
565 mutex_unlock(&chip
->mutex
);
566 printk(KERN_ERR
"waiting for chip to be ready timed out in bufwrite\n");
570 /* Latency issues. Drop the lock, wait a while and retry */
571 mutex_unlock(&chip
->mutex
);
574 mutex_lock(&chip
->mutex
);
577 chip
->buffer_write_time
--;
578 if (!chip
->buffer_write_time
)
579 chip
->buffer_write_time
++;
582 chip
->buffer_write_time
++;
584 /* Done and happy. */
586 chip
->state
= FL_STATUS
;
588 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
589 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
590 #ifdef DEBUG_CFI_FEATURES
591 printk("%s: 2 status[%lx]\n", __func__
, status
.x
[0]);
594 map_write(map
, CMD(0x50), cmd_adr
);
595 /* put back into read status register mode */
596 map_write(map
, CMD(0x70), adr
);
598 mutex_unlock(&chip
->mutex
);
599 return map_word_bitsset(map
, status
, CMD(0x02)) ? -EROFS
: -EIO
;
602 mutex_unlock(&chip
->mutex
);
607 static int cfi_staa_write_buffers (struct mtd_info
*mtd
, loff_t to
,
608 size_t len
, size_t *retlen
, const u_char
*buf
)
610 struct map_info
*map
= mtd
->priv
;
611 struct cfi_private
*cfi
= map
->fldrv_priv
;
612 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
621 chipnum
= to
>> cfi
->chipshift
;
622 ofs
= to
- (chipnum
<< cfi
->chipshift
);
624 #ifdef DEBUG_CFI_FEATURES
625 printk("%s: map_bankwidth(map)[%x]\n", __func__
, map_bankwidth(map
));
626 printk("%s: chipnum[%x] wbufsize[%x]\n", __func__
, chipnum
, wbufsize
);
627 printk("%s: ofs[%x] len[%x]\n", __func__
, ofs
, len
);
630 /* Write buffer is worth it only if more than one word to write... */
632 /* We must not cross write block boundaries */
633 int size
= wbufsize
- (ofs
& (wbufsize
-1));
638 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
648 if (ofs
>> cfi
->chipshift
) {
651 if (chipnum
== cfi
->numchips
)
660 * Writev for ECC-Flashes is a little more complicated. We need to maintain
661 * a small buffer for this.
662 * XXX: If the buffer size is not a multiple of 2, this will break
664 #define ECCBUF_SIZE (mtd->writesize)
665 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
666 #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
668 cfi_staa_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
669 unsigned long count
, loff_t to
, size_t *retlen
)
672 size_t totlen
= 0, thislen
;
678 /* We should fall back to a general writev implementation.
679 * Until that is written, just break.
683 buffer
= kmalloc(ECCBUF_SIZE
, GFP_KERNEL
);
687 for (i
=0; i
<count
; i
++) {
688 size_t elem_len
= vecs
[i
].iov_len
;
689 void *elem_base
= vecs
[i
].iov_base
;
690 if (!elem_len
) /* FIXME: Might be unnecessary. Check that */
692 if (buflen
) { /* cut off head */
693 if (buflen
+ elem_len
< ECCBUF_SIZE
) { /* just accumulate */
694 memcpy(buffer
+buflen
, elem_base
, elem_len
);
698 memcpy(buffer
+buflen
, elem_base
, ECCBUF_SIZE
-buflen
);
699 ret
= mtd
->write(mtd
, to
, ECCBUF_SIZE
, &thislen
, buffer
);
701 if (ret
|| thislen
!= ECCBUF_SIZE
)
703 elem_len
-= thislen
-buflen
;
704 elem_base
+= thislen
-buflen
;
707 if (ECCBUF_DIV(elem_len
)) { /* write clean aligned data */
708 ret
= mtd
->write(mtd
, to
, ECCBUF_DIV(elem_len
), &thislen
, elem_base
);
710 if (ret
|| thislen
!= ECCBUF_DIV(elem_len
))
714 buflen
= ECCBUF_MOD(elem_len
); /* cut off tail */
716 memset(buffer
, 0xff, ECCBUF_SIZE
);
717 memcpy(buffer
, elem_base
+ thislen
, buflen
);
720 if (buflen
) { /* flush last page, even if not full */
721 /* This is sometimes intended behaviour, really */
722 ret
= mtd
->write(mtd
, to
, buflen
, &thislen
, buffer
);
724 if (ret
|| thislen
!= ECCBUF_SIZE
)
735 static inline int do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
737 struct cfi_private
*cfi
= map
->fldrv_priv
;
738 map_word status
, status_OK
;
741 DECLARE_WAITQUEUE(wait
, current
);
746 /* Let's determine this according to the interleave only once */
747 status_OK
= CMD(0x80);
749 timeo
= jiffies
+ HZ
;
751 mutex_lock(&chip
->mutex
);
753 /* Check that the chip's ready to talk to us. */
754 switch (chip
->state
) {
758 map_write(map
, CMD(0x70), adr
);
759 chip
->state
= FL_STATUS
;
762 status
= map_read(map
, adr
);
763 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
766 /* Urgh. Chip not yet ready to talk to us. */
767 if (time_after(jiffies
, timeo
)) {
768 mutex_unlock(&chip
->mutex
);
769 printk(KERN_ERR
"waiting for chip to be ready timed out in erase\n");
773 /* Latency issues. Drop the lock, wait a while and retry */
774 mutex_unlock(&chip
->mutex
);
779 /* Stick ourselves on a wait queue to be woken when
780 someone changes the status */
781 set_current_state(TASK_UNINTERRUPTIBLE
);
782 add_wait_queue(&chip
->wq
, &wait
);
783 mutex_unlock(&chip
->mutex
);
785 remove_wait_queue(&chip
->wq
, &wait
);
786 timeo
= jiffies
+ HZ
;
791 /* Clear the status register first */
792 map_write(map
, CMD(0x50), adr
);
795 map_write(map
, CMD(0x20), adr
);
796 map_write(map
, CMD(0xD0), adr
);
797 chip
->state
= FL_ERASING
;
799 mutex_unlock(&chip
->mutex
);
801 mutex_lock(&chip
->mutex
);
803 /* FIXME. Use a timer to check this, and return immediately. */
804 /* Once the state machine's known to be working I'll do that */
806 timeo
= jiffies
+ (HZ
*20);
808 if (chip
->state
!= FL_ERASING
) {
809 /* Someone's suspended the erase. Sleep */
810 set_current_state(TASK_UNINTERRUPTIBLE
);
811 add_wait_queue(&chip
->wq
, &wait
);
812 mutex_unlock(&chip
->mutex
);
814 remove_wait_queue(&chip
->wq
, &wait
);
815 timeo
= jiffies
+ (HZ
*20); /* FIXME */
816 mutex_lock(&chip
->mutex
);
820 status
= map_read(map
, adr
);
821 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
824 /* OK Still waiting */
825 if (time_after(jiffies
, timeo
)) {
826 map_write(map
, CMD(0x70), adr
);
827 chip
->state
= FL_STATUS
;
828 printk(KERN_ERR
"waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status
.x
[0], map_read(map
, adr
).x
[0]);
830 mutex_unlock(&chip
->mutex
);
834 /* Latency issues. Drop the lock, wait a while and retry */
835 mutex_unlock(&chip
->mutex
);
837 mutex_lock(&chip
->mutex
);
843 /* We've broken this before. It doesn't hurt to be safe */
844 map_write(map
, CMD(0x70), adr
);
845 chip
->state
= FL_STATUS
;
846 status
= map_read(map
, adr
);
848 /* check for lock bit */
849 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
850 unsigned char chipstatus
= status
.x
[0];
851 if (!map_word_equal(map
, status
, CMD(chipstatus
))) {
853 for (w
=0; w
<map_words(map
); w
++) {
854 for (i
= 0; i
<cfi_interleave(cfi
); i
++) {
855 chipstatus
|= status
.x
[w
] >> (cfi
->device_type
* 8);
858 printk(KERN_WARNING
"Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
859 status
.x
[0], chipstatus
);
861 /* Reset the error bits */
862 map_write(map
, CMD(0x50), adr
);
863 map_write(map
, CMD(0x70), adr
);
865 if ((chipstatus
& 0x30) == 0x30) {
866 printk(KERN_NOTICE
"Chip reports improper command sequence: status 0x%x\n", chipstatus
);
868 } else if (chipstatus
& 0x02) {
869 /* Protection bit set */
871 } else if (chipstatus
& 0x8) {
873 printk(KERN_WARNING
"Chip reports voltage low on erase: status 0x%x\n", chipstatus
);
875 } else if (chipstatus
& 0x20) {
877 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr
, chipstatus
);
878 timeo
= jiffies
+ HZ
;
879 chip
->state
= FL_STATUS
;
880 mutex_unlock(&chip
->mutex
);
883 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x\n", adr
, chipstatus
);
889 mutex_unlock(&chip
->mutex
);
893 static int cfi_staa_erase_varsize(struct mtd_info
*mtd
,
894 struct erase_info
*instr
)
895 { struct map_info
*map
= mtd
->priv
;
896 struct cfi_private
*cfi
= map
->fldrv_priv
;
897 unsigned long adr
, len
;
898 int chipnum
, ret
= 0;
900 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
902 if (instr
->addr
> mtd
->size
)
905 if ((instr
->len
+ instr
->addr
) > mtd
->size
)
908 /* Check that both start and end of the requested erase are
909 * aligned with the erasesize at the appropriate addresses.
914 /* Skip all erase regions which are ended before the start of
915 the requested erase. Actually, to save on the calculations,
916 we skip to the first erase region which starts after the
917 start of the requested erase, and then go back one.
920 while (i
< mtd
->numeraseregions
&& instr
->addr
>= regions
[i
].offset
)
924 /* OK, now i is pointing at the erase region in which this
925 erase request starts. Check the start of the requested
926 erase range is aligned with the erase size which is in
930 if (instr
->addr
& (regions
[i
].erasesize
-1))
933 /* Remember the erase region we start on */
936 /* Next, check that the end of the requested erase is aligned
937 * with the erase region at that address.
940 while (i
<mtd
->numeraseregions
&& (instr
->addr
+ instr
->len
) >= regions
[i
].offset
)
943 /* As before, drop back one to point at the region in which
944 the address actually falls
948 if ((instr
->addr
+ instr
->len
) & (regions
[i
].erasesize
-1))
951 chipnum
= instr
->addr
>> cfi
->chipshift
;
952 adr
= instr
->addr
- (chipnum
<< cfi
->chipshift
);
958 ret
= do_erase_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
963 adr
+= regions
[i
].erasesize
;
964 len
-= regions
[i
].erasesize
;
966 if (adr
% (1<< cfi
->chipshift
) == (((unsigned long)regions
[i
].offset
+ (regions
[i
].erasesize
* regions
[i
].numblocks
)) %( 1<< cfi
->chipshift
)))
969 if (adr
>> cfi
->chipshift
) {
973 if (chipnum
>= cfi
->numchips
)
978 instr
->state
= MTD_ERASE_DONE
;
979 mtd_erase_callback(instr
);
984 static void cfi_staa_sync (struct mtd_info
*mtd
)
986 struct map_info
*map
= mtd
->priv
;
987 struct cfi_private
*cfi
= map
->fldrv_priv
;
991 DECLARE_WAITQUEUE(wait
, current
);
993 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
994 chip
= &cfi
->chips
[i
];
997 mutex_lock(&chip
->mutex
);
999 switch(chip
->state
) {
1003 case FL_JEDEC_QUERY
:
1004 chip
->oldstate
= chip
->state
;
1005 chip
->state
= FL_SYNCING
;
1006 /* No need to wake_up() on this state change -
1007 * as the whole point is that nobody can do anything
1008 * with the chip now anyway.
1011 mutex_unlock(&chip
->mutex
);
1015 /* Not an idle state */
1016 set_current_state(TASK_UNINTERRUPTIBLE
);
1017 add_wait_queue(&chip
->wq
, &wait
);
1019 mutex_unlock(&chip
->mutex
);
1021 remove_wait_queue(&chip
->wq
, &wait
);
1027 /* Unlock the chips again */
1029 for (i
--; i
>=0; i
--) {
1030 chip
= &cfi
->chips
[i
];
1032 mutex_lock(&chip
->mutex
);
1034 if (chip
->state
== FL_SYNCING
) {
1035 chip
->state
= chip
->oldstate
;
1038 mutex_unlock(&chip
->mutex
);
1042 static inline int do_lock_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
1044 struct cfi_private
*cfi
= map
->fldrv_priv
;
1045 map_word status
, status_OK
;
1046 unsigned long timeo
= jiffies
+ HZ
;
1047 DECLARE_WAITQUEUE(wait
, current
);
1051 /* Let's determine this according to the interleave only once */
1052 status_OK
= CMD(0x80);
1054 timeo
= jiffies
+ HZ
;
1056 mutex_lock(&chip
->mutex
);
1058 /* Check that the chip's ready to talk to us. */
1059 switch (chip
->state
) {
1061 case FL_JEDEC_QUERY
:
1063 map_write(map
, CMD(0x70), adr
);
1064 chip
->state
= FL_STATUS
;
1067 status
= map_read(map
, adr
);
1068 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1071 /* Urgh. Chip not yet ready to talk to us. */
1072 if (time_after(jiffies
, timeo
)) {
1073 mutex_unlock(&chip
->mutex
);
1074 printk(KERN_ERR
"waiting for chip to be ready timed out in lock\n");
1078 /* Latency issues. Drop the lock, wait a while and retry */
1079 mutex_unlock(&chip
->mutex
);
1084 /* Stick ourselves on a wait queue to be woken when
1085 someone changes the status */
1086 set_current_state(TASK_UNINTERRUPTIBLE
);
1087 add_wait_queue(&chip
->wq
, &wait
);
1088 mutex_unlock(&chip
->mutex
);
1090 remove_wait_queue(&chip
->wq
, &wait
);
1091 timeo
= jiffies
+ HZ
;
1096 map_write(map
, CMD(0x60), adr
);
1097 map_write(map
, CMD(0x01), adr
);
1098 chip
->state
= FL_LOCKING
;
1100 mutex_unlock(&chip
->mutex
);
1102 mutex_lock(&chip
->mutex
);
1104 /* FIXME. Use a timer to check this, and return immediately. */
1105 /* Once the state machine's known to be working I'll do that */
1107 timeo
= jiffies
+ (HZ
*2);
1110 status
= map_read(map
, adr
);
1111 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1114 /* OK Still waiting */
1115 if (time_after(jiffies
, timeo
)) {
1116 map_write(map
, CMD(0x70), adr
);
1117 chip
->state
= FL_STATUS
;
1118 printk(KERN_ERR
"waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status
.x
[0], map_read(map
, adr
).x
[0]);
1120 mutex_unlock(&chip
->mutex
);
1124 /* Latency issues. Drop the lock, wait a while and retry */
1125 mutex_unlock(&chip
->mutex
);
1127 mutex_lock(&chip
->mutex
);
1130 /* Done and happy. */
1131 chip
->state
= FL_STATUS
;
1134 mutex_unlock(&chip
->mutex
);
1137 static int cfi_staa_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
1139 struct map_info
*map
= mtd
->priv
;
1140 struct cfi_private
*cfi
= map
->fldrv_priv
;
1142 int chipnum
, ret
= 0;
1143 #ifdef DEBUG_LOCK_BITS
1144 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1147 if (ofs
& (mtd
->erasesize
- 1))
1150 if (len
& (mtd
->erasesize
-1))
1153 if ((len
+ ofs
) > mtd
->size
)
1156 chipnum
= ofs
>> cfi
->chipshift
;
1157 adr
= ofs
- (chipnum
<< cfi
->chipshift
);
1161 #ifdef DEBUG_LOCK_BITS
1162 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1163 printk("before lock: block status register is %x\n",cfi_read_query(map
, adr
+(2*ofs_factor
)));
1164 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1167 ret
= do_lock_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
1169 #ifdef DEBUG_LOCK_BITS
1170 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1171 printk("after lock: block status register is %x\n",cfi_read_query(map
, adr
+(2*ofs_factor
)));
1172 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1178 adr
+= mtd
->erasesize
;
1179 len
-= mtd
->erasesize
;
1181 if (adr
>> cfi
->chipshift
) {
1185 if (chipnum
>= cfi
->numchips
)
1191 static inline int do_unlock_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
1193 struct cfi_private
*cfi
= map
->fldrv_priv
;
1194 map_word status
, status_OK
;
1195 unsigned long timeo
= jiffies
+ HZ
;
1196 DECLARE_WAITQUEUE(wait
, current
);
1200 /* Let's determine this according to the interleave only once */
1201 status_OK
= CMD(0x80);
1203 timeo
= jiffies
+ HZ
;
1205 mutex_lock(&chip
->mutex
);
1207 /* Check that the chip's ready to talk to us. */
1208 switch (chip
->state
) {
1210 case FL_JEDEC_QUERY
:
1212 map_write(map
, CMD(0x70), adr
);
1213 chip
->state
= FL_STATUS
;
1216 status
= map_read(map
, adr
);
1217 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1220 /* Urgh. Chip not yet ready to talk to us. */
1221 if (time_after(jiffies
, timeo
)) {
1222 mutex_unlock(&chip
->mutex
);
1223 printk(KERN_ERR
"waiting for chip to be ready timed out in unlock\n");
1227 /* Latency issues. Drop the lock, wait a while and retry */
1228 mutex_unlock(&chip
->mutex
);
1233 /* Stick ourselves on a wait queue to be woken when
1234 someone changes the status */
1235 set_current_state(TASK_UNINTERRUPTIBLE
);
1236 add_wait_queue(&chip
->wq
, &wait
);
1237 mutex_unlock(&chip
->mutex
);
1239 remove_wait_queue(&chip
->wq
, &wait
);
1240 timeo
= jiffies
+ HZ
;
1245 map_write(map
, CMD(0x60), adr
);
1246 map_write(map
, CMD(0xD0), adr
);
1247 chip
->state
= FL_UNLOCKING
;
1249 mutex_unlock(&chip
->mutex
);
1251 mutex_lock(&chip
->mutex
);
1253 /* FIXME. Use a timer to check this, and return immediately. */
1254 /* Once the state machine's known to be working I'll do that */
1256 timeo
= jiffies
+ (HZ
*2);
1259 status
= map_read(map
, adr
);
1260 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1263 /* OK Still waiting */
1264 if (time_after(jiffies
, timeo
)) {
1265 map_write(map
, CMD(0x70), adr
);
1266 chip
->state
= FL_STATUS
;
1267 printk(KERN_ERR
"waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status
.x
[0], map_read(map
, adr
).x
[0]);
1269 mutex_unlock(&chip
->mutex
);
1273 /* Latency issues. Drop the unlock, wait a while and retry */
1274 mutex_unlock(&chip
->mutex
);
1276 mutex_lock(&chip
->mutex
);
1279 /* Done and happy. */
1280 chip
->state
= FL_STATUS
;
1283 mutex_unlock(&chip
->mutex
);
1286 static int cfi_staa_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
1288 struct map_info
*map
= mtd
->priv
;
1289 struct cfi_private
*cfi
= map
->fldrv_priv
;
1291 int chipnum
, ret
= 0;
1292 #ifdef DEBUG_LOCK_BITS
1293 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1296 chipnum
= ofs
>> cfi
->chipshift
;
1297 adr
= ofs
- (chipnum
<< cfi
->chipshift
);
1299 #ifdef DEBUG_LOCK_BITS
1301 unsigned long temp_adr
= adr
;
1302 unsigned long temp_len
= len
;
1304 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1306 printk("before unlock %x: block status register is %x\n",temp_adr
,cfi_read_query(map
, temp_adr
+(2*ofs_factor
)));
1307 temp_adr
+= mtd
->erasesize
;
1308 temp_len
-= mtd
->erasesize
;
1310 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1314 ret
= do_unlock_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
1316 #ifdef DEBUG_LOCK_BITS
1317 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1318 printk("after unlock: block status register is %x\n",cfi_read_query(map
, adr
+(2*ofs_factor
)));
1319 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1325 static int cfi_staa_suspend(struct mtd_info
*mtd
)
1327 struct map_info
*map
= mtd
->priv
;
1328 struct cfi_private
*cfi
= map
->fldrv_priv
;
1330 struct flchip
*chip
;
1333 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1334 chip
= &cfi
->chips
[i
];
1336 mutex_lock(&chip
->mutex
);
1338 switch(chip
->state
) {
1342 case FL_JEDEC_QUERY
:
1343 chip
->oldstate
= chip
->state
;
1344 chip
->state
= FL_PM_SUSPENDED
;
1345 /* No need to wake_up() on this state change -
1346 * as the whole point is that nobody can do anything
1347 * with the chip now anyway.
1349 case FL_PM_SUSPENDED
:
1356 mutex_unlock(&chip
->mutex
);
1359 /* Unlock the chips again */
1362 for (i
--; i
>=0; i
--) {
1363 chip
= &cfi
->chips
[i
];
1365 mutex_lock(&chip
->mutex
);
1367 if (chip
->state
== FL_PM_SUSPENDED
) {
1368 /* No need to force it into a known state here,
1369 because we're returning failure, and it didn't
1371 chip
->state
= chip
->oldstate
;
1374 mutex_unlock(&chip
->mutex
);
1381 static void cfi_staa_resume(struct mtd_info
*mtd
)
1383 struct map_info
*map
= mtd
->priv
;
1384 struct cfi_private
*cfi
= map
->fldrv_priv
;
1386 struct flchip
*chip
;
1388 for (i
=0; i
<cfi
->numchips
; i
++) {
1390 chip
= &cfi
->chips
[i
];
1392 mutex_lock(&chip
->mutex
);
1394 /* Go to known state. Chip may have been power cycled */
1395 if (chip
->state
== FL_PM_SUSPENDED
) {
1396 map_write(map
, CMD(0xFF), 0);
1397 chip
->state
= FL_READY
;
1401 mutex_unlock(&chip
->mutex
);
1405 static void cfi_staa_destroy(struct mtd_info
*mtd
)
1407 struct map_info
*map
= mtd
->priv
;
1408 struct cfi_private
*cfi
= map
->fldrv_priv
;
1409 kfree(cfi
->cmdset_priv
);
1413 MODULE_LICENSE("GPL");