MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / mtd / chips / cfi_cmdset_0001.c
blobb774bb91bf55333d2752d2c1d921d5f4d8d9b00f
1 /*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.154 2004/08/09 13:19:43 dwmw2 Exp $
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
37 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 // debugging, turns off buffer write mode if set to 1
40 #define FORCE_WORD_WRITE 0
42 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
45 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
46 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
47 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
48 static void cfi_intelext_sync (struct mtd_info *);
49 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
50 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
51 static int cfi_intelext_suspend (struct mtd_info *);
52 static void cfi_intelext_resume (struct mtd_info *);
54 static void cfi_intelext_destroy(struct mtd_info *);
56 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
58 static struct mtd_info *cfi_intelext_setup (struct map_info *);
59 static int cfi_intelext_partition_fixup(struct map_info *, struct cfi_private **);
61 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
62 size_t *retlen, u_char **mtdbuf);
63 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
64 size_t len);
68 * *********** SETUP AND PROBE BITS ***********
71 static struct mtd_chip_driver cfi_intelext_chipdrv = {
72 .probe = NULL, /* Not usable directly */
73 .destroy = cfi_intelext_destroy,
74 .name = "cfi_cmdset_0001",
75 .module = THIS_MODULE
78 /* #define DEBUG_LOCK_BITS */
79 /* #define DEBUG_CFI_FEATURES */
81 #ifdef DEBUG_CFI_FEATURES
82 static void cfi_tell_features(struct cfi_pri_intelext *extp)
84 int i;
85 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
86 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
87 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
88 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
89 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
90 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
91 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
92 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
93 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
94 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
95 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
96 for (i=10; i<32; i++) {
97 if (extp->FeatureSupport & (1<<i))
98 printk(" - Unknown Bit %X: supported\n", i);
101 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
102 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
103 for (i=1; i<8; i++) {
104 if (extp->SuspendCmdSupport & (1<<i))
105 printk(" - Unknown Bit %X: supported\n", i);
108 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
109 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
110 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
111 for (i=2; i<16; i++) {
112 if (extp->BlkStatusRegMask & (1<<i))
113 printk(" - Unknown Bit %X Active: yes\n",i);
116 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
117 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
118 if (extp->VppOptimal)
119 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
120 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
122 #endif
124 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
125 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
126 static void fixup_intel_strataflash(struct map_info *map, void* param)
128 struct cfi_private *cfi = map->fldrv_priv;
129 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
131 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
132 "erase on write disabled.\n");
133 extp->SuspendCmdSupport &= ~1;
135 #endif
137 static void fixup_st_m28w320ct(struct map_info *map, void* param)
139 struct cfi_private *cfi = map->fldrv_priv;
141 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
142 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
145 static void fixup_st_m28w320cb(struct map_info *map, void* param)
147 struct cfi_private *cfi = map->fldrv_priv;
149 /* Note this is done after the region info is endian swapped */
150 cfi->cfiq->EraseRegionInfo[1] =
151 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
154 static struct cfi_fixup fixup_table[] = {
155 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 CFI_MFR_ANY, CFI_ID_ANY,
158 fixup_intel_strataflash, NULL
160 #endif
162 0x0020, /* STMicroelectronics */
163 0x00ba, /* M28W320CT */
164 fixup_st_m28w320ct, NULL
165 }, {
166 0x0020, /* STMicroelectronics */
167 0x00bb, /* M28W320CB */
168 fixup_st_m28w320cb, NULL
169 }, {
170 0, 0, NULL, NULL
174 /* This routine is made available to other mtd code via
175 * inter_module_register. It must only be accessed through
176 * inter_module_get which will bump the use count of this module. The
177 * addresses passed back in cfi are valid as long as the use count of
178 * this module is non-zero, i.e. between inter_module_get and
179 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
181 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
183 struct cfi_private *cfi = map->fldrv_priv;
184 int i;
186 if (cfi->cfi_mode == CFI_MODE_CFI) {
188 * It's a real CFI chip, not one for which the probe
189 * routine faked a CFI structure. So we read the feature
190 * table from it.
192 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
193 struct cfi_pri_intelext *extp;
195 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "Intel/Sharp");
196 if (!extp)
197 return NULL;
199 /* Do some byteswapping if necessary */
200 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
201 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
202 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
204 /* Install our own private info structure */
205 cfi->cmdset_priv = extp;
207 cfi_fixup(map, fixup_table);
209 #ifdef DEBUG_CFI_FEATURES
210 /* Tell the user about it in lots of lovely detail */
211 cfi_tell_features(extp);
212 #endif
214 if(extp->SuspendCmdSupport & 1) {
215 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
219 for (i=0; i< cfi->numchips; i++) {
220 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
221 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
222 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
223 cfi->chips[i].ref_point_counter = 0;
226 map->fldrv = &cfi_intelext_chipdrv;
228 return cfi_intelext_setup(map);
231 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
233 struct cfi_private *cfi = map->fldrv_priv;
234 struct mtd_info *mtd;
235 unsigned long offset = 0;
236 int i,j;
237 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
239 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
240 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
242 if (!mtd) {
243 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
244 goto setup_err;
247 memset(mtd, 0, sizeof(*mtd));
248 mtd->priv = map;
249 mtd->type = MTD_NORFLASH;
250 mtd->size = devsize * cfi->numchips;
252 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
253 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
254 * mtd->numeraseregions, GFP_KERNEL);
255 if (!mtd->eraseregions) {
256 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
257 goto setup_err;
260 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
261 unsigned long ernum, ersize;
262 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
263 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
265 if (mtd->erasesize < ersize) {
266 mtd->erasesize = ersize;
268 for (j=0; j<cfi->numchips; j++) {
269 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
270 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
271 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
273 offset += (ersize * ernum);
276 if (offset != devsize) {
277 /* Argh */
278 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
279 goto setup_err;
282 for (i=0; i<mtd->numeraseregions;i++){
283 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
284 i,mtd->eraseregions[i].offset,
285 mtd->eraseregions[i].erasesize,
286 mtd->eraseregions[i].numblocks);
289 /* Also select the correct geometry setup too */
290 mtd->erase = cfi_intelext_erase_varsize;
291 mtd->read = cfi_intelext_read;
293 if (map_is_linear(map)) {
294 mtd->point = cfi_intelext_point;
295 mtd->unpoint = cfi_intelext_unpoint;
298 if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
299 printk(KERN_INFO "Using buffer write method\n" );
300 mtd->write = cfi_intelext_write_buffers;
301 } else {
302 printk(KERN_INFO "Using word write method\n" );
303 mtd->write = cfi_intelext_write_words;
305 #if 0
306 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
307 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
308 #endif
309 mtd->sync = cfi_intelext_sync;
310 mtd->lock = cfi_intelext_lock;
311 mtd->unlock = cfi_intelext_unlock;
312 mtd->suspend = cfi_intelext_suspend;
313 mtd->resume = cfi_intelext_resume;
314 mtd->flags = MTD_CAP_NORFLASH;
315 map->fldrv = &cfi_intelext_chipdrv;
316 mtd->name = map->name;
318 /* This function has the potential to distort the reality
319 a bit and therefore should be called last. */
320 if (cfi_intelext_partition_fixup(map, &cfi) != 0)
321 goto setup_err;
323 __module_get(THIS_MODULE);
324 return mtd;
326 setup_err:
327 if(mtd) {
328 if(mtd->eraseregions)
329 kfree(mtd->eraseregions);
330 kfree(mtd);
332 kfree(cfi->cmdset_priv);
333 return NULL;
336 static int cfi_intelext_partition_fixup(struct map_info *map,
337 struct cfi_private **pcfi)
339 struct cfi_private *cfi = *pcfi;
340 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
343 * Probing of multi-partition flash ships.
345 * This is extremely crude at the moment and should probably be
346 * extracted entirely from the Intel extended query data instead.
347 * Right now a L18 flash is assumed if multiple operations is
348 * detected.
350 * To support multiple partitions when available, we simply arrange
351 * for each of them to have their own flchip structure even if they
352 * are on the same physical chip. This means completely recreating
353 * a new cfi_private structure right here which is a blatent code
354 * layering violation, but this is still the least intrusive
355 * arrangement at this point. This can be rearranged in the future
356 * if someone feels motivated enough. --nico
358 if (extp && extp->FeatureSupport & (1 << 9)) {
359 struct cfi_private *newcfi;
360 struct flchip *chip;
361 struct flchip_shared *shared;
362 int numparts, partshift, numvirtchips, i, j;
365 * The L18 flash memory array is divided
366 * into multiple 8-Mbit partitions.
368 numparts = 1 << (cfi->cfiq->DevSize - 20);
369 partshift = 20 + __ffs(cfi->interleave);
370 numvirtchips = cfi->numchips * numparts;
372 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
373 if (!newcfi)
374 return -ENOMEM;
375 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
376 if (!shared) {
377 kfree(newcfi);
378 return -ENOMEM;
380 memcpy(newcfi, cfi, sizeof(struct cfi_private));
381 newcfi->numchips = numvirtchips;
382 newcfi->chipshift = partshift;
384 chip = &newcfi->chips[0];
385 for (i = 0; i < cfi->numchips; i++) {
386 shared[i].writing = shared[i].erasing = NULL;
387 spin_lock_init(&shared[i].lock);
388 for (j = 0; j < numparts; j++) {
389 *chip = cfi->chips[i];
390 chip->start += j << partshift;
391 chip->priv = &shared[i];
392 /* those should be reset too since
393 they create memory references. */
394 init_waitqueue_head(&chip->wq);
395 spin_lock_init(&chip->_spinlock);
396 chip->mutex = &chip->_spinlock;
397 chip++;
401 printk(KERN_DEBUG "%s: %d sets of %d interleaved chips "
402 "--> %d partitions of %#x bytes\n",
403 map->name, cfi->numchips, cfi->interleave,
404 newcfi->numchips, 1<<newcfi->chipshift);
406 map->fldrv_priv = newcfi;
407 *pcfi = newcfi;
408 kfree(cfi);
411 return 0;
415 * *********** CHIP ACCESS FUNCTIONS ***********
418 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
420 DECLARE_WAITQUEUE(wait, current);
421 struct cfi_private *cfi = map->fldrv_priv;
422 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
423 unsigned long timeo;
424 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
426 resettime:
427 timeo = jiffies + HZ;
428 retry:
429 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
431 * OK. We have possibility for contension on the write/erase
432 * operations which are global to the real chip and not per
433 * partition. So let's fight it over in the partition which
434 * currently has authority on the operation.
436 * The rules are as follows:
438 * - any write operation must own shared->writing.
440 * - any erase operation must own _both_ shared->writing and
441 * shared->erasing.
443 * - contension arbitration is handled in the owner's context.
445 * The 'shared' struct can be read when its lock is taken.
446 * However any writes to it can only be made when the current
447 * owner's lock is also held.
449 struct flchip_shared *shared = chip->priv;
450 struct flchip *contender;
451 spin_lock(&shared->lock);
452 contender = shared->writing;
453 if (contender && contender != chip) {
455 * The engine to perform desired operation on this
456 * partition is already in use by someone else.
457 * Let's fight over it in the context of the chip
458 * currently using it. If it is possible to suspend,
459 * that other partition will do just that, otherwise
460 * it'll happily send us to sleep. In any case, when
461 * get_chip returns success we're clear to go ahead.
463 int ret = spin_trylock(contender->mutex);
464 spin_unlock(&shared->lock);
465 if (!ret)
466 goto retry;
467 spin_unlock(chip->mutex);
468 ret = get_chip(map, contender, contender->start, mode);
469 spin_lock(chip->mutex);
470 if (ret) {
471 spin_unlock(contender->mutex);
472 return ret;
474 timeo = jiffies + HZ;
475 spin_lock(&shared->lock);
478 /* We now own it */
479 shared->writing = chip;
480 if (mode == FL_ERASING)
481 shared->erasing = chip;
482 if (contender && contender != chip)
483 spin_unlock(contender->mutex);
484 spin_unlock(&shared->lock);
487 switch (chip->state) {
489 case FL_STATUS:
490 for (;;) {
491 status = map_read(map, adr);
492 if (map_word_andequal(map, status, status_OK, status_OK))
493 break;
495 /* At this point we're fine with write operations
496 in other partitions as they don't conflict. */
497 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
498 break;
500 if (time_after(jiffies, timeo)) {
501 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
502 status.x[0]);
503 return -EIO;
505 spin_unlock(chip->mutex);
506 cfi_udelay(1);
507 spin_lock(chip->mutex);
508 /* Someone else might have been playing with it. */
509 goto retry;
512 case FL_READY:
513 case FL_CFI_QUERY:
514 case FL_JEDEC_QUERY:
515 return 0;
517 case FL_ERASING:
518 if (!(cfip->FeatureSupport & 2) ||
519 !(mode == FL_READY || mode == FL_POINT ||
520 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
521 goto sleep;
524 /* Erase suspend */
525 map_write(map, CMD(0xB0), adr);
527 /* If the flash has finished erasing, then 'erase suspend'
528 * appears to make some (28F320) flash devices switch to
529 * 'read' mode. Make sure that we switch to 'read status'
530 * mode so we get the right data. --rmk
532 map_write(map, CMD(0x70), adr);
533 chip->oldstate = FL_ERASING;
534 chip->state = FL_ERASE_SUSPENDING;
535 chip->erase_suspended = 1;
536 for (;;) {
537 status = map_read(map, adr);
538 if (map_word_andequal(map, status, status_OK, status_OK))
539 break;
541 if (time_after(jiffies, timeo)) {
542 /* Urgh. Resume and pretend we weren't here. */
543 map_write(map, CMD(0xd0), adr);
544 /* Make sure we're in 'read status' mode if it had finished */
545 map_write(map, CMD(0x70), adr);
546 chip->state = FL_ERASING;
547 chip->oldstate = FL_READY;
548 printk(KERN_ERR "Chip not ready after erase "
549 "suspended: status = 0x%lx\n", status.x[0]);
550 return -EIO;
553 spin_unlock(chip->mutex);
554 cfi_udelay(1);
555 spin_lock(chip->mutex);
556 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
557 So we can just loop here. */
559 chip->state = FL_STATUS;
560 return 0;
562 case FL_POINT:
563 /* Only if there's no operation suspended... */
564 if (mode == FL_READY && chip->oldstate == FL_READY)
565 return 0;
567 default:
568 sleep:
569 #if 1 // mask by Victor Yu. 05-14-2007
570 set_current_state(TASK_UNINTERRUPTIBLE);
571 #endif
572 add_wait_queue(&chip->wq, &wait);
573 spin_unlock(chip->mutex);
574 schedule();
575 remove_wait_queue(&chip->wq, &wait);
576 spin_lock(chip->mutex);
577 goto resettime;
581 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
583 struct cfi_private *cfi = map->fldrv_priv;
585 if (chip->priv) {
586 struct flchip_shared *shared = chip->priv;
587 spin_lock(&shared->lock);
588 if (shared->writing == chip) {
589 /* We own the ability to write, but we're done */
590 shared->writing = shared->erasing;
591 if (shared->writing && shared->writing != chip) {
592 /* give back ownership to who we loaned it from */
593 struct flchip *loaner = shared->writing;
594 spin_lock(loaner->mutex);
595 spin_unlock(&shared->lock);
596 spin_unlock(chip->mutex);
597 put_chip(map, loaner, loaner->start);
598 spin_lock(chip->mutex);
599 spin_unlock(loaner->mutex);
600 } else {
601 if (chip->oldstate != FL_ERASING) {
602 shared->erasing = NULL;
603 if (chip->oldstate != FL_WRITING)
604 shared->writing = NULL;
606 spin_unlock(&shared->lock);
608 } else {
609 spin_unlock(&shared->lock);
613 switch(chip->oldstate) {
614 case FL_ERASING:
615 chip->state = chip->oldstate;
616 /* What if one interleaved chip has finished and the
617 other hasn't? The old code would leave the finished
618 one in READY mode. That's bad, and caused -EROFS
619 errors to be returned from do_erase_oneblock because
620 that's the only bit it checked for at the time.
621 As the state machine appears to explicitly allow
622 sending the 0x70 (Read Status) command to an erasing
623 chip and expecting it to be ignored, that's what we
624 do. */
625 map_write(map, CMD(0xd0), adr);
626 map_write(map, CMD(0x70), adr);
627 chip->oldstate = FL_READY;
628 chip->state = FL_ERASING;
629 break;
631 case FL_READY:
632 case FL_STATUS:
633 case FL_JEDEC_QUERY:
634 /* We should really make set_vpp() count, rather than doing this */
635 DISABLE_VPP(map);
636 break;
637 default:
638 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
640 wake_up(&chip->wq);
643 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
645 unsigned long cmd_addr;
646 struct cfi_private *cfi = map->fldrv_priv;
647 int ret = 0;
649 adr += chip->start;
651 /* Ensure cmd read/writes are aligned. */
652 cmd_addr = adr & ~(map_bankwidth(map)-1);
654 spin_lock(chip->mutex);
656 ret = get_chip(map, chip, cmd_addr, FL_POINT);
658 if (!ret) {
659 if (chip->state != FL_POINT && chip->state != FL_READY)
660 map_write(map, CMD(0xff), cmd_addr);
662 chip->state = FL_POINT;
663 chip->ref_point_counter++;
665 spin_unlock(chip->mutex);
667 return ret;
670 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
672 struct map_info *map = mtd->priv;
673 struct cfi_private *cfi = map->fldrv_priv;
674 unsigned long ofs;
675 int chipnum;
676 int ret = 0;
678 if (!map->virt || (from + len > mtd->size))
679 return -EINVAL;
681 *mtdbuf = (void *)map->virt + from;
682 *retlen = 0;
684 /* Now lock the chip(s) to POINT state */
686 /* ofs: offset within the first chip that the first read should start */
687 chipnum = (from >> cfi->chipshift);
688 ofs = from - (chipnum << cfi->chipshift);
690 while (len) {
691 unsigned long thislen;
693 if (chipnum >= cfi->numchips)
694 break;
696 if ((len + ofs -1) >> cfi->chipshift)
697 thislen = (1<<cfi->chipshift) - ofs;
698 else
699 thislen = len;
701 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
702 if (ret)
703 break;
705 *retlen += thislen;
706 len -= thislen;
708 ofs = 0;
709 chipnum++;
711 return 0;
714 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
716 struct map_info *map = mtd->priv;
717 struct cfi_private *cfi = map->fldrv_priv;
718 unsigned long ofs;
719 int chipnum;
721 /* Now unlock the chip(s) POINT state */
723 /* ofs: offset within the first chip that the first read should start */
724 chipnum = (from >> cfi->chipshift);
725 ofs = from - (chipnum << cfi->chipshift);
727 while (len) {
728 unsigned long thislen;
729 struct flchip *chip;
731 chip = &cfi->chips[chipnum];
732 if (chipnum >= cfi->numchips)
733 break;
735 if ((len + ofs -1) >> cfi->chipshift)
736 thislen = (1<<cfi->chipshift) - ofs;
737 else
738 thislen = len;
740 spin_lock(chip->mutex);
741 if (chip->state == FL_POINT) {
742 chip->ref_point_counter--;
743 if(chip->ref_point_counter == 0)
744 chip->state = FL_READY;
745 } else
746 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
748 put_chip(map, chip, chip->start);
749 spin_unlock(chip->mutex);
751 len -= thislen;
752 ofs = 0;
753 chipnum++;
757 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
759 unsigned long cmd_addr;
760 struct cfi_private *cfi = map->fldrv_priv;
761 int ret;
763 adr += chip->start;
765 /* Ensure cmd read/writes are aligned. */
766 cmd_addr = adr & ~(map_bankwidth(map)-1);
768 spin_lock(chip->mutex);
769 ret = get_chip(map, chip, cmd_addr, FL_READY);
770 if (ret) {
771 spin_unlock(chip->mutex);
772 return ret;
775 if (chip->state != FL_POINT && chip->state != FL_READY) {
776 map_write(map, CMD(0xff), cmd_addr);
778 chip->state = FL_READY;
781 map_copy_from(map, buf, adr, len);
783 put_chip(map, chip, cmd_addr);
785 spin_unlock(chip->mutex);
786 return 0;
789 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
791 struct map_info *map = mtd->priv;
792 struct cfi_private *cfi = map->fldrv_priv;
793 unsigned long ofs;
794 int chipnum;
795 int ret = 0;
797 /* ofs: offset within the first chip that the first read should start */
798 chipnum = (from >> cfi->chipshift);
799 ofs = from - (chipnum << cfi->chipshift);
801 *retlen = 0;
803 while (len) {
804 unsigned long thislen;
806 if (chipnum >= cfi->numchips)
807 break;
809 if ((len + ofs -1) >> cfi->chipshift)
810 thislen = (1<<cfi->chipshift) - ofs;
811 else
812 thislen = len;
814 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
815 if (ret)
816 break;
818 *retlen += thislen;
819 len -= thislen;
820 buf += thislen;
822 ofs = 0;
823 chipnum++;
825 return ret;
827 #if 0
828 static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
830 struct map_info *map = mtd->priv;
831 struct cfi_private *cfi = map->fldrv_priv;
832 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
833 struct flchip *chip;
834 int ofs_factor = cfi->interleave * cfi->device_type;
835 int count = len;
836 int chip_num, offst;
837 int ret;
839 chip_num = ((unsigned int)from/reg_sz);
840 offst = from - (reg_sz*chip_num)+base_offst;
842 while (count) {
843 /* Calculate which chip & protection register offset we need */
845 if (chip_num >= cfi->numchips)
846 goto out;
848 chip = &cfi->chips[chip_num];
850 spin_lock(chip->mutex);
851 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
852 if (ret) {
853 spin_unlock(chip->mutex);
854 return (len-count)?:ret;
857 if (chip->state != FL_JEDEC_QUERY) {
858 map_write(map, CMD(0x90), chip->start);
859 chip->state = FL_JEDEC_QUERY;
862 while (count && ((offst-base_offst) < reg_sz)) {
863 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
864 buf++;
865 offst++;
866 count--;
869 put_chip(map, chip, chip->start);
870 spin_unlock(chip->mutex);
872 /* Move on to the next chip */
873 chip_num++;
874 offst = base_offst;
877 out:
878 return len-count;
881 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
883 struct map_info *map = mtd->priv;
884 struct cfi_private *cfi = map->fldrv_priv;
885 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
886 int base_offst,reg_sz;
888 /* Check that we actually have some protection registers */
889 if(!(extp->FeatureSupport&64)){
890 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
891 return 0;
894 base_offst=(1<<extp->FactProtRegSize);
895 reg_sz=(1<<extp->UserProtRegSize);
897 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
900 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
902 struct map_info *map = mtd->priv;
903 struct cfi_private *cfi = map->fldrv_priv;
904 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
905 int base_offst,reg_sz;
907 /* Check that we actually have some protection registers */
908 if(!(extp->FeatureSupport&64)){
909 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
910 return 0;
913 base_offst=0;
914 reg_sz=(1<<extp->FactProtRegSize);
916 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
918 #endif
920 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
922 struct cfi_private *cfi = map->fldrv_priv;
923 map_word status, status_OK;
924 unsigned long timeo;
925 int z, ret=0;
927 adr += chip->start;
929 /* Let's determine this according to the interleave only once */
930 status_OK = CMD(0x80);
932 spin_lock(chip->mutex);
933 ret = get_chip(map, chip, adr, FL_WRITING);
934 if (ret) {
935 spin_unlock(chip->mutex);
936 return ret;
939 ENABLE_VPP(map);
940 map_write(map, CMD(0x40), adr);
941 map_write(map, datum, adr);
942 chip->state = FL_WRITING;
944 spin_unlock(chip->mutex);
945 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
946 cfi_udelay(chip->word_write_time);
947 spin_lock(chip->mutex);
949 timeo = jiffies + (HZ/2);
950 z = 0;
951 for (;;) {
952 if (chip->state != FL_WRITING) {
953 /* Someone's suspended the write. Sleep */
954 DECLARE_WAITQUEUE(wait, current);
956 #if 1 // mask by Victor Yu. 05-14-2007
957 set_current_state(TASK_UNINTERRUPTIBLE);
958 #endif
959 add_wait_queue(&chip->wq, &wait);
960 spin_unlock(chip->mutex);
961 schedule();
962 remove_wait_queue(&chip->wq, &wait);
963 timeo = jiffies + (HZ / 2); /* FIXME */
964 spin_lock(chip->mutex);
965 continue;
968 status = map_read(map, adr);
969 if (map_word_andequal(map, status, status_OK, status_OK))
970 break;
972 /* OK Still waiting */
973 if (time_after(jiffies, timeo)) {
974 chip->state = FL_STATUS;
975 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
976 ret = -EIO;
977 goto out;
980 /* Latency issues. Drop the lock, wait a while and retry */
981 spin_unlock(chip->mutex);
982 z++;
983 cfi_udelay(1);
984 spin_lock(chip->mutex);
986 if (!z) {
987 chip->word_write_time--;
988 if (!chip->word_write_time)
989 chip->word_write_time++;
991 if (z > 1)
992 chip->word_write_time++;
994 /* Done and happy. */
995 chip->state = FL_STATUS;
996 /* check for lock bit */
997 if (map_word_bitsset(map, status, CMD(0x02))) {
998 /* clear status */
999 map_write(map, CMD(0x50), adr);
1000 /* put back into read status register mode */
1001 map_write(map, CMD(0x70), adr);
1002 ret = -EROFS;
1004 out:
1005 put_chip(map, chip, adr);
1006 spin_unlock(chip->mutex);
1008 return ret;
1012 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1014 struct map_info *map = mtd->priv;
1015 struct cfi_private *cfi = map->fldrv_priv;
1016 int ret = 0;
1017 int chipnum;
1018 unsigned long ofs;
1020 *retlen = 0;
1021 if (!len)
1022 return 0;
1024 chipnum = to >> cfi->chipshift;
1025 ofs = to - (chipnum << cfi->chipshift);
1027 /* If it's not bus-aligned, do the first byte write */
1028 if (ofs & (map_bankwidth(map)-1)) {
1029 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1030 int gap = ofs - bus_ofs;
1031 int n;
1032 map_word datum;
1034 n = min_t(int, len, map_bankwidth(map)-gap);
1035 datum = map_word_ff(map);
1036 datum = map_word_load_partial(map, datum, buf, gap, n);
1038 ret = do_write_oneword(map, &cfi->chips[chipnum],
1039 bus_ofs, datum);
1040 if (ret)
1041 return ret;
1043 len -= n;
1044 ofs += n;
1045 buf += n;
1046 (*retlen) += n;
1048 if (ofs >> cfi->chipshift) {
1049 chipnum ++;
1050 ofs = 0;
1051 if (chipnum == cfi->numchips)
1052 return 0;
1056 while(len >= map_bankwidth(map)) {
1057 map_word datum = map_word_load(map, buf);
1059 ret = do_write_oneword(map, &cfi->chips[chipnum],
1060 ofs, datum);
1061 if (ret)
1062 return ret;
1064 ofs += map_bankwidth(map);
1065 buf += map_bankwidth(map);
1066 (*retlen) += map_bankwidth(map);
1067 len -= map_bankwidth(map);
1069 if (ofs >> cfi->chipshift) {
1070 chipnum ++;
1071 ofs = 0;
1072 if (chipnum == cfi->numchips)
1073 return 0;
1077 if (len & (map_bankwidth(map)-1)) {
1078 map_word datum;
1080 datum = map_word_ff(map);
1081 datum = map_word_load_partial(map, datum, buf, 0, len);
1083 ret = do_write_oneword(map, &cfi->chips[chipnum],
1084 ofs, datum);
1085 if (ret)
1086 return ret;
1088 (*retlen) += len;
1091 return 0;
1095 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
1096 unsigned long adr, const u_char *buf, int len)
1098 struct cfi_private *cfi = map->fldrv_priv;
1099 map_word status, status_OK;
1100 unsigned long cmd_adr, timeo;
1101 int wbufsize, z, ret=0, bytes, words;
1103 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1104 adr += chip->start;
1105 cmd_adr = adr & ~(wbufsize-1);
1107 /* Let's determine this according to the interleave only once */
1108 status_OK = CMD(0x80);
1110 spin_lock(chip->mutex);
1111 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1112 if (ret) {
1113 spin_unlock(chip->mutex);
1114 return ret;
1117 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1118 [...], the device will not accept any more Write to Buffer commands".
1119 So we must check here and reset those bits if they're set. Otherwise
1120 we're just pissing in the wind */
1121 if (chip->state != FL_STATUS)
1122 map_write(map, CMD(0x70), cmd_adr);
1123 status = map_read(map, cmd_adr);
1124 if (map_word_bitsset(map, status, CMD(0x30))) {
1125 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1126 map_write(map, CMD(0x50), cmd_adr);
1127 map_write(map, CMD(0x70), cmd_adr);
1130 ENABLE_VPP(map);
1131 chip->state = FL_WRITING_TO_BUFFER;
1133 z = 0;
1134 for (;;) {
1135 map_write(map, CMD(0xe8), cmd_adr);
1137 status = map_read(map, cmd_adr);
1138 if (map_word_andequal(map, status, status_OK, status_OK))
1139 break;
1141 spin_unlock(chip->mutex);
1142 cfi_udelay(1);
1143 spin_lock(chip->mutex);
1145 if (++z > 20) {
1146 /* Argh. Not ready for write to buffer */
1147 map_write(map, CMD(0x70), cmd_adr);
1148 chip->state = FL_STATUS;
1149 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1150 status.x[0], map_read(map, cmd_adr).x[0]);
1151 /* Odd. Clear status bits */
1152 map_write(map, CMD(0x50), cmd_adr);
1153 map_write(map, CMD(0x70), cmd_adr);
1154 ret = -EIO;
1155 goto out;
1159 /* Write length of data to come */
1160 bytes = len & (map_bankwidth(map)-1);
1161 words = len / map_bankwidth(map);
1162 map_write(map, CMD(words - !bytes), cmd_adr );
1164 /* Write data */
1165 z = 0;
1166 while(z < words * map_bankwidth(map)) {
1167 map_word datum = map_word_load(map, buf);
1168 map_write(map, datum, adr+z);
1170 z += map_bankwidth(map);
1171 buf += map_bankwidth(map);
1174 if (bytes) {
1175 map_word datum;
1177 datum = map_word_ff(map);
1178 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1179 map_write(map, datum, adr+z);
1182 /* GO GO GO */
1183 map_write(map, CMD(0xd0), cmd_adr);
1184 chip->state = FL_WRITING;
1186 spin_unlock(chip->mutex);
1187 INVALIDATE_CACHED_RANGE(map, adr, len);
1188 cfi_udelay(chip->buffer_write_time);
1189 spin_lock(chip->mutex);
1191 timeo = jiffies + (HZ/2);
1192 z = 0;
1193 for (;;) {
1194 if (chip->state != FL_WRITING) {
1195 /* Someone's suspended the write. Sleep */
1196 DECLARE_WAITQUEUE(wait, current);
1197 #if 1 // mask by Victor Yu. 05-14-2007
1198 set_current_state(TASK_UNINTERRUPTIBLE);
1199 #endif
1200 add_wait_queue(&chip->wq, &wait);
1201 spin_unlock(chip->mutex);
1202 schedule();
1203 remove_wait_queue(&chip->wq, &wait);
1204 timeo = jiffies + (HZ / 2); /* FIXME */
1205 spin_lock(chip->mutex);
1206 continue;
1209 status = map_read(map, cmd_adr);
1210 if (map_word_andequal(map, status, status_OK, status_OK))
1211 break;
1213 /* OK Still waiting */
1214 if (time_after(jiffies, timeo)) {
1215 chip->state = FL_STATUS;
1216 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1217 ret = -EIO;
1218 goto out;
1221 /* Latency issues. Drop the lock, wait a while and retry */
1222 spin_unlock(chip->mutex);
1223 cfi_udelay(1);
1224 z++;
1225 spin_lock(chip->mutex);
1227 if (!z) {
1228 chip->buffer_write_time--;
1229 if (!chip->buffer_write_time)
1230 chip->buffer_write_time++;
1232 if (z > 1)
1233 chip->buffer_write_time++;
1235 /* Done and happy. */
1236 chip->state = FL_STATUS;
1238 /* check for lock bit */
1239 if (map_word_bitsset(map, status, CMD(0x02))) {
1240 /* clear status */
1241 map_write(map, CMD(0x50), cmd_adr);
1242 /* put back into read status register mode */
1243 map_write(map, CMD(0x70), adr);
1244 ret = -EROFS;
1247 out:
1248 put_chip(map, chip, cmd_adr);
1249 spin_unlock(chip->mutex);
1250 return ret;
1253 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1254 size_t len, size_t *retlen, const u_char *buf)
1256 struct map_info *map = mtd->priv;
1257 struct cfi_private *cfi = map->fldrv_priv;
1258 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1259 int ret = 0;
1260 int chipnum;
1261 unsigned long ofs;
1263 *retlen = 0;
1264 if (!len)
1265 return 0;
1267 chipnum = to >> cfi->chipshift;
1268 ofs = to - (chipnum << cfi->chipshift);
1270 /* If it's not bus-aligned, do the first word write */
1271 if (ofs & (map_bankwidth(map)-1)) {
1272 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1273 if (local_len > len)
1274 local_len = len;
1275 ret = cfi_intelext_write_words(mtd, to, local_len,
1276 retlen, buf);
1277 if (ret)
1278 return ret;
1279 ofs += local_len;
1280 buf += local_len;
1281 len -= local_len;
1283 if (ofs >> cfi->chipshift) {
1284 chipnum ++;
1285 ofs = 0;
1286 if (chipnum == cfi->numchips)
1287 return 0;
1291 while(len) {
1292 /* We must not cross write block boundaries */
1293 int size = wbufsize - (ofs & (wbufsize-1));
1295 if (size > len)
1296 size = len;
1297 ret = do_write_buffer(map, &cfi->chips[chipnum],
1298 ofs, buf, size);
1299 if (ret)
1300 return ret;
1302 ofs += size;
1303 buf += size;
1304 (*retlen) += size;
1305 len -= size;
1307 if (ofs >> cfi->chipshift) {
1308 chipnum ++;
1309 ofs = 0;
1310 if (chipnum == cfi->numchips)
1311 return 0;
1314 return 0;
1317 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1318 unsigned long adr, int len, void *thunk);
1320 static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1321 loff_t ofs, size_t len, void *thunk)
1323 struct map_info *map = mtd->priv;
1324 struct cfi_private *cfi = map->fldrv_priv;
1325 unsigned long adr;
1326 int chipnum, ret = 0;
1327 int i, first;
1328 struct mtd_erase_region_info *regions = mtd->eraseregions;
1330 if (ofs > mtd->size)
1331 return -EINVAL;
1333 if ((len + ofs) > mtd->size)
1334 return -EINVAL;
1336 /* Check that both start and end of the requested erase are
1337 * aligned with the erasesize at the appropriate addresses.
1340 i = 0;
1342 /* Skip all erase regions which are ended before the start of
1343 the requested erase. Actually, to save on the calculations,
1344 we skip to the first erase region which starts after the
1345 start of the requested erase, and then go back one.
1348 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1349 i++;
1350 i--;
1352 /* OK, now i is pointing at the erase region in which this
1353 erase request starts. Check the start of the requested
1354 erase range is aligned with the erase size which is in
1355 effect here.
1358 if (ofs & (regions[i].erasesize-1))
1359 return -EINVAL;
1361 /* Remember the erase region we start on */
1362 first = i;
1364 /* Next, check that the end of the requested erase is aligned
1365 * with the erase region at that address.
1368 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1369 i++;
1371 /* As before, drop back one to point at the region in which
1372 the address actually falls
1374 i--;
1376 if ((ofs + len) & (regions[i].erasesize-1))
1377 return -EINVAL;
1379 chipnum = ofs >> cfi->chipshift;
1380 adr = ofs - (chipnum << cfi->chipshift);
1382 i=first;
1384 while(len) {
1385 unsigned long chipmask;
1386 int size = regions[i].erasesize;
1388 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
1390 if (ret)
1391 return ret;
1393 adr += size;
1394 len -= size;
1396 chipmask = (1 << cfi->chipshift) - 1;
1397 if ((adr & chipmask) == ((regions[i].offset + size * regions[i].numblocks) & chipmask))
1398 i++;
1400 if (adr >> cfi->chipshift) {
1401 adr = 0;
1402 chipnum++;
1404 if (chipnum >= cfi->numchips)
1405 break;
1409 return 0;
1413 static int do_erase_oneblock(struct map_info *map, struct flchip *chip,
1414 unsigned long adr, int len, void *thunk)
1416 struct cfi_private *cfi = map->fldrv_priv;
1417 map_word status, status_OK;
1418 unsigned long timeo;
1419 int retries = 3;
1420 DECLARE_WAITQUEUE(wait, current);
1421 int ret = 0;
1423 adr += chip->start;
1425 /* Let's determine this according to the interleave only once */
1426 status_OK = CMD(0x80);
1428 retry:
1429 spin_lock(chip->mutex);
1430 ret = get_chip(map, chip, adr, FL_ERASING);
1431 if (ret) {
1432 spin_unlock(chip->mutex);
1433 return ret;
1436 ENABLE_VPP(map);
1437 /* Clear the status register first */
1438 map_write(map, CMD(0x50), adr);
1440 /* Now erase */
1441 map_write(map, CMD(0x20), adr);
1442 map_write(map, CMD(0xD0), adr);
1443 chip->state = FL_ERASING;
1444 chip->erase_suspended = 0;
1446 spin_unlock(chip->mutex);
1447 INVALIDATE_CACHED_RANGE(map, adr, len);
1448 msleep(chip->erase_time / 2);
1449 spin_lock(chip->mutex);
1451 /* FIXME. Use a timer to check this, and return immediately. */
1452 /* Once the state machine's known to be working I'll do that */
1454 timeo = jiffies + (HZ*20);
1455 for (;;) {
1456 if (chip->state != FL_ERASING) {
1457 /* Someone's suspended the erase. Sleep */
1458 #if 1 // mask by Victor Yu. 05-14-2007
1459 set_current_state(TASK_UNINTERRUPTIBLE);
1460 #endif
1461 add_wait_queue(&chip->wq, &wait);
1462 spin_unlock(chip->mutex);
1463 schedule();
1464 remove_wait_queue(&chip->wq, &wait);
1465 spin_lock(chip->mutex);
1466 continue;
1468 if (chip->erase_suspended) {
1469 /* This erase was suspended and resumed.
1470 Adjust the timeout */
1471 timeo = jiffies + (HZ*20); /* FIXME */
1472 chip->erase_suspended = 0;
1475 status = map_read(map, adr);
1476 if (map_word_andequal(map, status, status_OK, status_OK))
1477 break;
1479 /* OK Still waiting */
1480 if (time_after(jiffies, timeo)) {
1481 map_write(map, CMD(0x70), adr);
1482 chip->state = FL_STATUS;
1483 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %lx, status = %lx.\n",
1484 adr, status.x[0], map_read(map, adr).x[0]);
1485 /* Clear status bits */
1486 map_write(map, CMD(0x50), adr);
1487 map_write(map, CMD(0x70), adr);
1488 DISABLE_VPP(map);
1489 spin_unlock(chip->mutex);
1490 return -EIO;
1493 /* Latency issues. Drop the lock, wait a while and retry */
1494 spin_unlock(chip->mutex);
1495 #if 1 // mask by Victor Yu. 05-14-2007
1496 set_current_state(TASK_UNINTERRUPTIBLE);
1497 #endif
1498 schedule_timeout(1);
1499 spin_lock(chip->mutex);
1502 DISABLE_VPP(map);
1503 ret = 0;
1505 /* We've broken this before. It doesn't hurt to be safe */
1506 map_write(map, CMD(0x70), adr);
1507 chip->state = FL_STATUS;
1508 status = map_read(map, adr);
1510 /* check for lock bit */
1511 if (map_word_bitsset(map, status, CMD(0x3a))) {
1512 unsigned char chipstatus = status.x[0];
1513 if (!map_word_equal(map, status, CMD(chipstatus))) {
1514 int i, w;
1515 for (w=0; w<map_words(map); w++) {
1516 for (i = 0; i<cfi_interleave(cfi); i++) {
1517 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1520 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1521 status.x[0], chipstatus);
1523 /* Reset the error bits */
1524 map_write(map, CMD(0x50), adr);
1525 map_write(map, CMD(0x70), adr);
1527 if ((chipstatus & 0x30) == 0x30) {
1528 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1529 ret = -EIO;
1530 } else if (chipstatus & 0x02) {
1531 /* Protection bit set */
1532 ret = -EROFS;
1533 } else if (chipstatus & 0x8) {
1534 /* Voltage */
1535 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1536 ret = -EIO;
1537 } else if (chipstatus & 0x20) {
1538 if (retries--) {
1539 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1540 timeo = jiffies + HZ;
1541 chip->state = FL_STATUS;
1542 spin_unlock(chip->mutex);
1543 goto retry;
1545 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1546 ret = -EIO;
1550 wake_up(&chip->wq);
1551 spin_unlock(chip->mutex);
1552 return ret;
1555 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1557 unsigned long ofs, len;
1558 int ret;
1560 ofs = instr->addr;
1561 len = instr->len;
1563 ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1564 if (ret)
1565 return ret;
1567 instr->state = MTD_ERASE_DONE;
1568 mtd_erase_callback(instr);
1570 return 0;
1573 static void cfi_intelext_sync (struct mtd_info *mtd)
1575 struct map_info *map = mtd->priv;
1576 struct cfi_private *cfi = map->fldrv_priv;
1577 int i;
1578 struct flchip *chip;
1579 int ret = 0;
1581 for (i=0; !ret && i<cfi->numchips; i++) {
1582 chip = &cfi->chips[i];
1584 spin_lock(chip->mutex);
1585 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1587 if (!ret) {
1588 chip->oldstate = chip->state;
1589 chip->state = FL_SYNCING;
1590 /* No need to wake_up() on this state change -
1591 * as the whole point is that nobody can do anything
1592 * with the chip now anyway.
1595 spin_unlock(chip->mutex);
1598 /* Unlock the chips again */
1600 for (i--; i >=0; i--) {
1601 chip = &cfi->chips[i];
1603 spin_lock(chip->mutex);
1605 if (chip->state == FL_SYNCING) {
1606 chip->state = chip->oldstate;
1607 wake_up(&chip->wq);
1609 spin_unlock(chip->mutex);
1613 #ifdef DEBUG_LOCK_BITS
1614 static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip,
1615 unsigned long adr, int len, void *thunk)
1617 struct cfi_private *cfi = map->fldrv_priv;
1618 int ofs_factor = cfi->interleave * cfi->device_type;
1620 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1621 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1622 adr, cfi_read_query(map, adr+(2*ofs_factor)));
1623 chip->state = FL_JEDEC_QUERY;
1624 return 0;
1626 #endif
1628 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1629 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1631 static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1632 unsigned long adr, int len, void *thunk)
1634 struct cfi_private *cfi = map->fldrv_priv;
1635 map_word status, status_OK;
1636 unsigned long timeo = jiffies + HZ;
1637 int ret;
1639 adr += chip->start;
1641 /* Let's determine this according to the interleave only once */
1642 status_OK = CMD(0x80);
1644 spin_lock(chip->mutex);
1645 ret = get_chip(map, chip, adr, FL_LOCKING);
1646 if (ret) {
1647 spin_unlock(chip->mutex);
1648 return ret;
1651 ENABLE_VPP(map);
1652 map_write(map, CMD(0x60), adr);
1654 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1655 map_write(map, CMD(0x01), adr);
1656 chip->state = FL_LOCKING;
1657 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1658 map_write(map, CMD(0xD0), adr);
1659 chip->state = FL_UNLOCKING;
1660 } else
1661 BUG();
1663 spin_unlock(chip->mutex);
1664 schedule_timeout(HZ);
1665 spin_lock(chip->mutex);
1667 /* FIXME. Use a timer to check this, and return immediately. */
1668 /* Once the state machine's known to be working I'll do that */
1670 timeo = jiffies + (HZ*20);
1671 for (;;) {
1673 status = map_read(map, adr);
1674 if (map_word_andequal(map, status, status_OK, status_OK))
1675 break;
1677 /* OK Still waiting */
1678 if (time_after(jiffies, timeo)) {
1679 map_write(map, CMD(0x70), adr);
1680 chip->state = FL_STATUS;
1681 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n",
1682 status.x[0], map_read(map, adr).x[0]);
1683 DISABLE_VPP(map);
1684 spin_unlock(chip->mutex);
1685 return -EIO;
1688 /* Latency issues. Drop the lock, wait a while and retry */
1689 spin_unlock(chip->mutex);
1690 cfi_udelay(1);
1691 spin_lock(chip->mutex);
1694 /* Done and happy. */
1695 chip->state = FL_STATUS;
1696 put_chip(map, chip, adr);
1697 spin_unlock(chip->mutex);
1698 return 0;
1701 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1703 int ret;
1705 #ifdef DEBUG_LOCK_BITS
1706 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1707 __FUNCTION__, ofs, len);
1708 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1709 ofs, len, 0);
1710 #endif
1712 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1713 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1715 #ifdef DEBUG_LOCK_BITS
1716 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1717 __FUNCTION__, ret);
1718 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1719 ofs, len, 0);
1720 #endif
1722 return ret;
1725 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1727 int ret;
1729 #ifdef DEBUG_LOCK_BITS
1730 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1731 __FUNCTION__, ofs, len);
1732 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1733 ofs, len, 0);
1734 #endif
1736 ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1737 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1739 #ifdef DEBUG_LOCK_BITS
1740 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1741 __FUNCTION__, ret);
1742 cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1743 ofs, len, 0);
1744 #endif
1746 return ret;
1749 static int cfi_intelext_suspend(struct mtd_info *mtd)
1751 struct map_info *map = mtd->priv;
1752 struct cfi_private *cfi = map->fldrv_priv;
1753 int i;
1754 struct flchip *chip;
1755 int ret = 0;
1757 for (i=0; !ret && i<cfi->numchips; i++) {
1758 chip = &cfi->chips[i];
1760 spin_lock(chip->mutex);
1762 switch (chip->state) {
1763 case FL_READY:
1764 case FL_STATUS:
1765 case FL_CFI_QUERY:
1766 case FL_JEDEC_QUERY:
1767 if (chip->oldstate == FL_READY) {
1768 chip->oldstate = chip->state;
1769 chip->state = FL_PM_SUSPENDED;
1770 /* No need to wake_up() on this state change -
1771 * as the whole point is that nobody can do anything
1772 * with the chip now anyway.
1775 break;
1776 default:
1777 ret = -EAGAIN;
1778 case FL_PM_SUSPENDED:
1779 break;
1781 spin_unlock(chip->mutex);
1784 /* Unlock the chips again */
1786 if (ret) {
1787 for (i--; i >=0; i--) {
1788 chip = &cfi->chips[i];
1790 spin_lock(chip->mutex);
1792 if (chip->state == FL_PM_SUSPENDED) {
1793 /* No need to force it into a known state here,
1794 because we're returning failure, and it didn't
1795 get power cycled */
1796 chip->state = chip->oldstate;
1797 wake_up(&chip->wq);
1799 spin_unlock(chip->mutex);
1803 return ret;
1806 static void cfi_intelext_resume(struct mtd_info *mtd)
1808 struct map_info *map = mtd->priv;
1809 struct cfi_private *cfi = map->fldrv_priv;
1810 int i;
1811 struct flchip *chip;
1813 for (i=0; i<cfi->numchips; i++) {
1815 chip = &cfi->chips[i];
1817 spin_lock(chip->mutex);
1819 /* Go to known state. Chip may have been power cycled */
1820 if (chip->state == FL_PM_SUSPENDED) {
1821 map_write(map, CMD(0xFF), cfi->chips[i].start);
1822 chip->state = FL_READY;
1823 wake_up(&chip->wq);
1826 spin_unlock(chip->mutex);
1830 static void cfi_intelext_destroy(struct mtd_info *mtd)
1832 struct map_info *map = mtd->priv;
1833 struct cfi_private *cfi = map->fldrv_priv;
1834 kfree(cfi->cmdset_priv);
1835 kfree(cfi->cfiq);
1836 kfree(cfi->chips[0].priv);
1837 kfree(cfi);
1838 kfree(mtd->eraseregions);
1841 static char im_name_1[]="cfi_cmdset_0001";
1842 static char im_name_3[]="cfi_cmdset_0003";
1844 int __init cfi_intelext_init(void)
1846 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1847 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1848 return 0;
1851 static void __exit cfi_intelext_exit(void)
1853 inter_module_unregister(im_name_1);
1854 inter_module_unregister(im_name_3);
1857 module_init(cfi_intelext_init);
1858 module_exit(cfi_intelext_exit);
1860 MODULE_LICENSE("GPL");
1861 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1862 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");