Add support for newer flash chips in latest RT-N66U hardware revision
[tomato.git] / release / src-rt / linux / linux-2.6 / drivers / mtd / chips / cfi_cmdset_0002.c
blobd81a69022d28fb70a0ddedeaaa2d3b7741e3a400
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
18 * This code is GPL
20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/init.h>
29 #include <asm/io.h>
30 #include <asm/byteorder.h>
32 #include <linux/errno.h>
33 #include <linux/slab.h>
34 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
45 #define MAX_WORD_RETRIES 3
47 #define MANUFACTURER_AMD 0x0001
48 #define MANUFACTURER_ATMEL 0x001F
49 #define MANUFACTURER_SST 0x00BF
50 #define MANUFACTURER_SAMSUNG 0x00EC
51 #define SST49LF004B 0x0060
52 #define SST49LF040B 0x0050
53 #define SST49LF008A 0x005a
54 #define AT49BV6416 0x00d6
56 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
60 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
61 static void cfi_amdstd_sync (struct mtd_info *);
62 static int cfi_amdstd_suspend (struct mtd_info *);
63 static void cfi_amdstd_resume (struct mtd_info *);
64 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static void cfi_amdstd_destroy(struct mtd_info *);
68 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
69 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
71 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
72 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
73 #include "fwh_lock.h"
75 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
76 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
78 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
79 .probe = NULL, /* Not usable directly */
80 .destroy = cfi_amdstd_destroy,
81 .name = "cfi_cmdset_0002",
82 .module = THIS_MODULE
86 /* #define DEBUG_CFI_FEATURES */
89 #ifdef DEBUG_CFI_FEATURES
90 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
92 const char* erase_suspend[3] = {
93 "Not supported", "Read only", "Read/write"
95 const char* top_bottom[6] = {
96 "No WP", "8x8KiB sectors at top & bottom, no WP",
97 "Bottom boot", "Top boot",
98 "Uniform, Bottom WP", "Uniform, Top WP"
101 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
102 printk(" Address sensitive unlock: %s\n",
103 (extp->SiliconRevision & 1) ? "Not required" : "Required");
105 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
106 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
107 else
108 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
110 if (extp->BlkProt == 0)
111 printk(" Block protection: Not supported\n");
112 else
113 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
116 printk(" Temporary block unprotect: %s\n",
117 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
118 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
119 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
120 printk(" Burst mode: %s\n",
121 extp->BurstMode ? "Supported" : "Not supported");
122 if (extp->PageMode == 0)
123 printk(" Page mode: Not supported\n");
124 else
125 printk(" Page mode: %d word page\n", extp->PageMode << 2);
127 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
128 extp->VppMin >> 4, extp->VppMin & 0xf);
129 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
130 extp->VppMax >> 4, extp->VppMax & 0xf);
132 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
133 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
134 else
135 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
137 #endif
139 #ifdef AMD_BOOTLOC_BUG
140 /* Wheee. Bring me the head of someone at AMD. */
141 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
143 struct map_info *map = mtd->priv;
144 struct cfi_private *cfi = map->fldrv_priv;
145 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
146 __u8 major = extp->MajorVersion;
147 __u8 minor = extp->MinorVersion;
149 if (((major << 8) | minor) < 0x3131) {
150 /* CFI version 1.0 => don't trust bootloc */
151 if (cfi->id & 0x80) {
152 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
153 extp->TopBottom = 3; /* top boot */
154 } else {
155 extp->TopBottom = 2; /* bottom boot */
159 #endif
161 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
163 struct map_info *map = mtd->priv;
164 struct cfi_private *cfi = map->fldrv_priv;
165 if (cfi->cfiq->BufWriteTimeoutTyp) {
166 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
167 mtd->write = cfi_amdstd_write_buffers;
168 if (extp->SiliconRevision >= 0x1C) {
169 mtd->writesize = 512;
170 mtd->flags &= ~MTD_BIT_WRITEABLE;
171 printk(KERN_INFO "Enabling Spansion 65nm mode, writesize = 512 bytes\n");
176 /* Atmel chips don't use the same PRI format as AMD chips */
177 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
179 struct map_info *map = mtd->priv;
180 struct cfi_private *cfi = map->fldrv_priv;
181 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
182 struct cfi_pri_atmel atmel_pri;
184 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
185 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
187 if (atmel_pri.Features & 0x02)
188 extp->EraseSuspend = 2;
190 if (atmel_pri.BottomBoot)
191 extp->TopBottom = 2;
192 else
193 extp->TopBottom = 3;
196 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
198 /* Setup for chips with a secsi area */
199 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
200 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
203 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
207 if ((cfi->cfiq->NumEraseRegions == 1) &&
208 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
209 mtd->erase = cfi_amdstd_erase_chip;
215 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
216 * locked by default.
218 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
220 mtd->lock = cfi_atmel_lock;
221 mtd->unlock = cfi_atmel_unlock;
222 mtd->flags |= MTD_STUPID_LOCK;
225 static struct cfi_fixup cfi_fixup_table[] = {
226 #ifdef AMD_BOOTLOC_BUG
227 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
228 #endif
229 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
230 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
231 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
232 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
233 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
234 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
235 #if !FORCE_WORD_WRITE
236 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
237 #endif
238 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
239 { 0, 0, NULL, NULL }
241 static struct cfi_fixup jedec_fixup_table[] = {
242 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
243 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
244 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
245 { 0, 0, NULL, NULL }
248 static struct cfi_fixup fixup_table[] = {
249 /* The CFI vendor ids and the JEDEC vendor IDs appear
250 * to be common. It is like the devices id's are as
251 * well. This table is to pick all cases where
252 * we know that is the case.
254 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
255 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
256 { 0, 0, NULL, NULL }
260 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
262 struct cfi_private *cfi = map->fldrv_priv;
263 struct mtd_info *mtd;
264 int i;
266 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
267 if (!mtd) {
268 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
269 return NULL;
271 mtd->priv = map;
272 mtd->type = MTD_NORFLASH;
274 /* Fill in the default mtd operations */
275 mtd->erase = cfi_amdstd_erase_varsize;
276 mtd->write = cfi_amdstd_write_words;
277 mtd->read = cfi_amdstd_read;
278 mtd->sync = cfi_amdstd_sync;
279 mtd->suspend = cfi_amdstd_suspend;
280 mtd->resume = cfi_amdstd_resume;
281 mtd->flags = MTD_CAP_NORFLASH;
282 mtd->name = map->name;
283 mtd->writesize = 1;
285 if (cfi->cfi_mode==CFI_MODE_CFI){
286 unsigned char bootloc;
288 * It's a real CFI chip, not one for which the probe
289 * routine faked a CFI structure. So we read the feature
290 * table from it.
292 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
293 struct cfi_pri_amdstd *extp;
295 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
296 if (!extp) {
297 kfree(mtd);
298 return NULL;
301 if (extp->MajorVersion < '0' || extp->MajorVersion > '3' ||
302 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
303 if (cfi->mfr == MANUFACTURER_SAMSUNG &&
304 ((extp->MajorVersion == '3' && extp->MinorVersion == '3') ||
305 (extp->MajorVersion == '0'))) {
306 printk(KERN_NOTICE " Newer Samsung Flash detected, "
307 "should be compatibile with Amd/Fujitsu.\n");
308 if (extp->MajorVersion == '0') {
309 switch (cfi->id) {
310 case 0x257e:
311 case 0x22e2:
312 extp->MajorVersion = '1';
313 break;
316 } else {
317 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
318 "version %c.%c.\n", extp->MajorVersion,
319 extp->MinorVersion);
320 kfree(extp);
321 kfree(mtd);
322 return NULL;
326 /* Install our own private info structure */
327 cfi->cmdset_priv = extp;
329 /* Apply cfi device specific fixups */
330 cfi_fixup(mtd, cfi_fixup_table);
332 #ifdef DEBUG_CFI_FEATURES
333 /* Tell the user about it in lots of lovely detail */
334 cfi_tell_features(extp);
335 #endif
337 bootloc = extp->TopBottom;
338 if ((bootloc != 2) && (bootloc != 3)) {
339 printk(KERN_WARNING "%s: CFI does not contain boot "
340 "bank location. Assuming top.\n", map->name);
341 bootloc = 2;
344 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
345 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
347 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
348 int j = (cfi->cfiq->NumEraseRegions-1)-i;
349 __u32 swap;
351 swap = cfi->cfiq->EraseRegionInfo[i];
352 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
353 cfi->cfiq->EraseRegionInfo[j] = swap;
356 /* Set the default CFI lock/unlock addresses */
357 cfi->addr_unlock1 = 0x555;
358 cfi->addr_unlock2 = 0x2aa;
359 /* Modify the unlock address if we are in compatibility mode */
360 if ( /* x16 in x8 mode */
361 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
362 (cfi->cfiq->InterfaceDesc == 2)) ||
363 /* x32 in x16 mode */
364 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
365 (cfi->cfiq->InterfaceDesc == 4)))
367 cfi->addr_unlock1 = 0xaaa;
368 cfi->addr_unlock2 = 0x555;
371 } /* CFI mode */
372 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
373 /* Apply jedec specific fixups */
374 cfi_fixup(mtd, jedec_fixup_table);
376 /* Apply generic fixups */
377 cfi_fixup(mtd, fixup_table);
379 for (i=0; i< cfi->numchips; i++) {
380 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
381 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
382 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
383 cfi->chips[i].ref_point_counter = 0;
384 init_waitqueue_head(&(cfi->chips[i].wq));
387 map->fldrv = &cfi_amdstd_chipdrv;
389 return cfi_amdstd_setup(mtd);
391 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
393 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
395 struct map_info *map = mtd->priv;
396 struct cfi_private *cfi = map->fldrv_priv;
397 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
398 unsigned long offset = 0;
399 int i,j;
401 printk(KERN_NOTICE "number of %s chips: %d\n",
402 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
403 /* Select the correct geometry setup */
404 mtd->size = devsize * cfi->numchips;
406 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
407 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
408 * mtd->numeraseregions, GFP_KERNEL);
409 if (!mtd->eraseregions) {
410 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
411 goto setup_err;
414 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
415 unsigned long ernum, ersize;
416 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
417 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
419 if (mtd->erasesize < ersize) {
420 mtd->erasesize = ersize;
422 for (j=0; j<cfi->numchips; j++) {
423 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
424 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
425 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
427 offset += (ersize * ernum);
429 if (offset != devsize) {
430 /* Argh */
431 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
432 goto setup_err;
434 #if 0
435 // debug
436 for (i=0; i<mtd->numeraseregions;i++){
437 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
438 i,mtd->eraseregions[i].offset,
439 mtd->eraseregions[i].erasesize,
440 mtd->eraseregions[i].numblocks);
442 #endif
444 /* FIXME: erase-suspend-program is broken. See
445 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
446 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
448 __module_get(THIS_MODULE);
449 return mtd;
451 setup_err:
452 if(mtd) {
453 kfree(mtd->eraseregions);
454 kfree(mtd);
456 kfree(cfi->cmdset_priv);
457 kfree(cfi->cfiq);
458 return NULL;
462 * Return true if the chip is ready.
464 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
465 * non-suspended sector) and is indicated by no toggle bits toggling.
467 * Note that anything more complicated than checking if no bits are toggling
468 * (including checking DQ5 for an error status) is tricky to get working
469 * correctly and is therefore not done (particulary with interleaved chips
470 * as each chip must be checked independantly of the others).
472 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
474 map_word d, t;
476 d = map_read(map, addr);
477 t = map_read(map, addr);
479 return map_word_equal(map, d, t);
483 * Return true if the chip is ready and has the correct value.
485 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
486 * non-suspended sector) and it is indicated by no bits toggling.
488 * Error are indicated by toggling bits or bits held with the wrong value,
489 * or with bits toggling.
491 * Note that anything more complicated than checking if no bits are toggling
492 * (including checking DQ5 for an error status) is tricky to get working
493 * correctly and is therefore not done (particulary with interleaved chips
494 * as each chip must be checked independantly of the others).
497 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
499 map_word oldd, curd;
501 oldd = map_read(map, addr);
502 curd = map_read(map, addr);
504 return map_word_equal(map, oldd, curd) &&
505 map_word_equal(map, curd, expected);
508 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
510 DECLARE_WAITQUEUE(wait, current);
511 struct cfi_private *cfi = map->fldrv_priv;
512 unsigned long timeo;
513 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
515 resettime:
516 timeo = jiffies + HZ;
517 retry:
518 switch (chip->state) {
520 case FL_STATUS:
521 for (;;) {
522 if (chip_ready(map, adr))
523 break;
525 if (time_after(jiffies, timeo)) {
526 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
527 spin_unlock(chip->mutex);
528 return -EIO;
530 spin_unlock(chip->mutex);
531 cfi_udelay(1);
532 spin_lock(chip->mutex);
533 /* Someone else might have been playing with it. */
534 goto retry;
537 case FL_READY:
538 case FL_CFI_QUERY:
539 case FL_JEDEC_QUERY:
540 return 0;
542 case FL_ERASING:
543 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
544 goto sleep;
546 if (!( mode == FL_READY
547 || mode == FL_POINT
548 || !cfip
549 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
550 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
552 goto sleep;
554 /* We could check to see if we're trying to access the sector
555 * that is currently being erased. However, no user will try
556 * anything like that so we just wait for the timeout. */
558 /* Erase suspend */
559 /* It's harmless to issue the Erase-Suspend and Erase-Resume
560 * commands when the erase algorithm isn't in progress. */
561 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
562 chip->oldstate = FL_ERASING;
563 chip->state = FL_ERASE_SUSPENDING;
564 chip->erase_suspended = 1;
565 for (;;) {
566 if (chip_ready(map, adr))
567 break;
569 if (time_after(jiffies, timeo)) {
570 /* Should have suspended the erase by now.
571 * Send an Erase-Resume command as either
572 * there was an error (so leave the erase
573 * routine to recover from it) or we trying to
574 * use the erase-in-progress sector. */
575 put_chip(map, chip, adr);
576 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
577 return -EIO;
580 spin_unlock(chip->mutex);
581 cfi_udelay(1);
582 spin_lock(chip->mutex);
583 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
584 So we can just loop here. */
586 chip->state = FL_READY;
587 return 0;
589 case FL_XIP_WHILE_ERASING:
590 if (mode != FL_READY && mode != FL_POINT &&
591 (!cfip || !(cfip->EraseSuspend&2)))
592 goto sleep;
593 chip->oldstate = chip->state;
594 chip->state = FL_READY;
595 return 0;
597 case FL_POINT:
598 /* Only if there's no operation suspended... */
599 if (mode == FL_READY && chip->oldstate == FL_READY)
600 return 0;
602 default:
603 sleep:
604 set_current_state(TASK_UNINTERRUPTIBLE);
605 add_wait_queue(&chip->wq, &wait);
606 spin_unlock(chip->mutex);
607 schedule();
608 remove_wait_queue(&chip->wq, &wait);
609 spin_lock(chip->mutex);
610 goto resettime;
615 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
617 struct cfi_private *cfi = map->fldrv_priv;
619 switch(chip->oldstate) {
620 case FL_ERASING:
621 map_write(map, CMD(0x30), chip->in_progress_block_addr);
622 chip->oldstate = FL_READY;
623 chip->state = FL_ERASING;
624 break;
626 case FL_XIP_WHILE_ERASING:
627 chip->state = chip->oldstate;
628 chip->oldstate = FL_READY;
629 break;
631 case FL_READY:
632 case FL_STATUS:
633 /* We should really make set_vpp() count, rather than doing this */
634 DISABLE_VPP(map);
635 break;
636 default:
637 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
639 wake_up(&chip->wq);
642 #ifdef CONFIG_MTD_XIP
645 * No interrupt what so ever can be serviced while the flash isn't in array
646 * mode. This is ensured by the xip_disable() and xip_enable() functions
647 * enclosing any code path where the flash is known not to be in array mode.
648 * And within a XIP disabled code path, only functions marked with __xipram
649 * may be called and nothing else (it's a good thing to inspect generated
650 * assembly to make sure inline functions were actually inlined and that gcc
651 * didn't emit calls to its own support functions). Also configuring MTD CFI
652 * support to a single buswidth and a single interleave is also recommended.
655 static void xip_disable(struct map_info *map, struct flchip *chip,
656 unsigned long adr)
658 /* TODO: chips with no XIP use should ignore and return */
659 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
660 local_irq_disable();
663 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
664 unsigned long adr)
666 struct cfi_private *cfi = map->fldrv_priv;
668 if (chip->state != FL_POINT && chip->state != FL_READY) {
669 map_write(map, CMD(0xf0), adr);
670 chip->state = FL_READY;
672 (void) map_read(map, adr);
673 xip_iprefetch();
674 local_irq_enable();
678 * When a delay is required for the flash operation to complete, the
679 * xip_udelay() function is polling for both the given timeout and pending
680 * (but still masked) hardware interrupts. Whenever there is an interrupt
681 * pending then the flash erase operation is suspended, array mode restored
682 * and interrupts unmasked. Task scheduling might also happen at that
683 * point. The CPU eventually returns from the interrupt or the call to
684 * schedule() and the suspended flash operation is resumed for the remaining
685 * of the delay period.
687 * Warning: this function _will_ fool interrupt latency tracing tools.
690 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
691 unsigned long adr, int usec)
693 struct cfi_private *cfi = map->fldrv_priv;
694 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
695 map_word status, OK = CMD(0x80);
696 unsigned long suspended, start = xip_currtime();
697 flstate_t oldstate;
699 do {
700 cpu_relax();
701 if (xip_irqpending() && extp &&
702 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
703 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
705 * Let's suspend the erase operation when supported.
706 * Note that we currently don't try to suspend
707 * interleaved chips if there is already another
708 * operation suspended (imagine what happens
709 * when one chip was already done with the current
710 * operation while another chip suspended it, then
711 * we resume the whole thing at once). Yes, it
712 * can happen!
714 map_write(map, CMD(0xb0), adr);
715 usec -= xip_elapsed_since(start);
716 suspended = xip_currtime();
717 do {
718 if (xip_elapsed_since(suspended) > 100000) {
720 * The chip doesn't want to suspend
721 * after waiting for 100 msecs.
722 * This is a critical error but there
723 * is not much we can do here.
725 return;
727 status = map_read(map, adr);
728 } while (!map_word_andequal(map, status, OK, OK));
730 /* Suspend succeeded */
731 oldstate = chip->state;
732 if (!map_word_bitsset(map, status, CMD(0x40)))
733 break;
734 chip->state = FL_XIP_WHILE_ERASING;
735 chip->erase_suspended = 1;
736 map_write(map, CMD(0xf0), adr);
737 (void) map_read(map, adr);
738 asm volatile (".rep 8; nop; .endr");
739 local_irq_enable();
740 spin_unlock(chip->mutex);
741 asm volatile (".rep 8; nop; .endr");
742 cond_resched();
745 * We're back. However someone else might have
746 * decided to go write to the chip if we are in
747 * a suspended erase state. If so let's wait
748 * until it's done.
750 spin_lock(chip->mutex);
751 while (chip->state != FL_XIP_WHILE_ERASING) {
752 DECLARE_WAITQUEUE(wait, current);
753 set_current_state(TASK_UNINTERRUPTIBLE);
754 add_wait_queue(&chip->wq, &wait);
755 spin_unlock(chip->mutex);
756 schedule();
757 remove_wait_queue(&chip->wq, &wait);
758 spin_lock(chip->mutex);
760 /* Disallow XIP again */
761 local_irq_disable();
763 /* Resume the write or erase operation */
764 map_write(map, CMD(0x30), adr);
765 chip->state = oldstate;
766 start = xip_currtime();
767 } else if (usec >= 1000000/HZ) {
769 * Try to save on CPU power when waiting delay
770 * is at least a system timer tick period.
771 * No need to be extremely accurate here.
773 xip_cpu_idle();
775 status = map_read(map, adr);
776 } while (!map_word_andequal(map, status, OK, OK)
777 && xip_elapsed_since(start) < usec);
780 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
783 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
784 * the flash is actively programming or erasing since we have to poll for
785 * the operation to complete anyway. We can't do that in a generic way with
786 * a XIP setup so do it before the actual flash operation in this case
787 * and stub it out from INVALIDATE_CACHE_UDELAY.
789 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
790 INVALIDATE_CACHED_RANGE(map, from, size)
792 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
793 UDELAY(map, chip, adr, usec)
796 * Extra notes:
798 * Activating this XIP support changes the way the code works a bit. For
799 * example the code to suspend the current process when concurrent access
800 * happens is never executed because xip_udelay() will always return with the
801 * same chip state as it was entered with. This is why there is no care for
802 * the presence of add_wait_queue() or schedule() calls from within a couple
803 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
804 * The queueing and scheduling are always happening within xip_udelay().
806 * Similarly, get_chip() and put_chip() just happen to always be executed
807 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
808 * is in array mode, therefore never executing many cases therein and not
809 * causing any problem with XIP.
812 #else
814 #define xip_disable(map, chip, adr)
815 #define xip_enable(map, chip, adr)
816 #define XIP_INVAL_CACHED_RANGE(x...)
818 #define UDELAY(map, chip, adr, usec) \
819 do { \
820 spin_unlock(chip->mutex); \
821 cfi_udelay(usec); \
822 spin_lock(chip->mutex); \
823 } while (0)
825 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
826 do { \
827 spin_unlock(chip->mutex); \
828 INVALIDATE_CACHED_RANGE(map, adr, len); \
829 cfi_udelay(usec); \
830 spin_lock(chip->mutex); \
831 } while (0)
833 #endif
835 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
837 unsigned long cmd_addr;
838 struct cfi_private *cfi = map->fldrv_priv;
839 int ret;
841 adr += chip->start;
843 /* Ensure cmd read/writes are aligned. */
844 cmd_addr = adr & ~(map_bankwidth(map)-1);
846 spin_lock(chip->mutex);
847 ret = get_chip(map, chip, cmd_addr, FL_READY);
848 if (ret) {
849 spin_unlock(chip->mutex);
850 return ret;
853 if (chip->state != FL_POINT && chip->state != FL_READY) {
854 map_write(map, CMD(0xf0), cmd_addr);
855 chip->state = FL_READY;
858 map_copy_from(map, buf, adr, len);
860 put_chip(map, chip, cmd_addr);
862 spin_unlock(chip->mutex);
863 return 0;
867 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
869 struct map_info *map = mtd->priv;
870 struct cfi_private *cfi = map->fldrv_priv;
871 unsigned long ofs;
872 int chipnum;
873 int ret = 0;
875 /* ofs: offset within the first chip that the first read should start */
877 chipnum = (from >> cfi->chipshift);
878 ofs = from - (chipnum << cfi->chipshift);
881 *retlen = 0;
883 while (len) {
884 unsigned long thislen;
886 if (chipnum >= cfi->numchips)
887 break;
889 if ((len + ofs -1) >> cfi->chipshift)
890 thislen = (1<<cfi->chipshift) - ofs;
891 else
892 thislen = len;
894 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
895 if (ret)
896 break;
898 *retlen += thislen;
899 len -= thislen;
900 buf += thislen;
902 ofs = 0;
903 chipnum++;
905 return ret;
909 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
911 DECLARE_WAITQUEUE(wait, current);
912 unsigned long timeo = jiffies + HZ;
913 struct cfi_private *cfi = map->fldrv_priv;
915 retry:
916 spin_lock(chip->mutex);
918 if (chip->state != FL_READY){
919 #if 0
920 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
921 #endif
922 set_current_state(TASK_UNINTERRUPTIBLE);
923 add_wait_queue(&chip->wq, &wait);
925 spin_unlock(chip->mutex);
927 schedule();
928 remove_wait_queue(&chip->wq, &wait);
929 #if 0
930 if(signal_pending(current))
931 return -EINTR;
932 #endif
933 timeo = jiffies + HZ;
935 goto retry;
938 adr += chip->start;
940 chip->state = FL_READY;
942 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
943 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
944 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
946 map_copy_from(map, buf, adr, len);
948 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
949 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
950 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
951 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
953 wake_up(&chip->wq);
954 spin_unlock(chip->mutex);
956 return 0;
959 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
961 struct map_info *map = mtd->priv;
962 struct cfi_private *cfi = map->fldrv_priv;
963 unsigned long ofs;
964 int chipnum;
965 int ret = 0;
968 /* ofs: offset within the first chip that the first read should start */
970 /* 8 secsi bytes per chip */
971 chipnum=from>>3;
972 ofs=from & 7;
975 *retlen = 0;
977 while (len) {
978 unsigned long thislen;
980 if (chipnum >= cfi->numchips)
981 break;
983 if ((len + ofs -1) >> 3)
984 thislen = (1<<3) - ofs;
985 else
986 thislen = len;
988 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
989 if (ret)
990 break;
992 *retlen += thislen;
993 len -= thislen;
994 buf += thislen;
996 ofs = 0;
997 chipnum++;
999 return ret;
1003 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1005 struct cfi_private *cfi = map->fldrv_priv;
1006 unsigned long timeo = jiffies + HZ;
1008 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1009 * have a max write time of a few hundreds usec). However, we should
1010 * use the maximum timeout value given by the chip at probe time
1011 * instead. Unfortunately, struct flchip does have a field for
1012 * maximum timeout, only for typical which can be far too short
1013 * depending of the conditions. The ' + 1' is to avoid having a
1014 * timeout of 0 jiffies if HZ is smaller than 1000.
1016 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1017 int ret = 0;
1018 map_word oldd;
1019 int retry_cnt = 0;
1021 adr += chip->start;
1023 spin_lock(chip->mutex);
1024 ret = get_chip(map, chip, adr, FL_WRITING);
1025 if (ret) {
1026 spin_unlock(chip->mutex);
1027 return ret;
1030 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1031 __func__, adr, datum.x[0] );
1034 * Check for a NOP for the case when the datum to write is already
1035 * present - it saves time and works around buggy chips that corrupt
1036 * data at other locations when 0xff is written to a location that
1037 * already contains 0xff.
1039 oldd = map_read(map, adr);
1040 if (map_word_equal(map, oldd, datum)) {
1041 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1042 __func__);
1043 goto op_done;
1046 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1047 ENABLE_VPP(map);
1048 xip_disable(map, chip, adr);
1049 retry:
1050 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1051 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1052 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1053 map_write(map, datum, adr);
1054 chip->state = FL_WRITING;
1056 INVALIDATE_CACHE_UDELAY(map, chip,
1057 adr, map_bankwidth(map),
1058 chip->word_write_time);
1060 /* See comment above for timeout value. */
1061 timeo = jiffies + uWriteTimeout;
1062 for (;;) {
1063 if (chip->state != FL_WRITING) {
1064 /* Someone's suspended the write. Sleep */
1065 DECLARE_WAITQUEUE(wait, current);
1067 set_current_state(TASK_UNINTERRUPTIBLE);
1068 add_wait_queue(&chip->wq, &wait);
1069 spin_unlock(chip->mutex);
1070 schedule();
1071 remove_wait_queue(&chip->wq, &wait);
1072 timeo = jiffies + (HZ / 2); /* FIXME */
1073 spin_lock(chip->mutex);
1074 continue;
1077 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1078 xip_enable(map, chip, adr);
1079 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1080 xip_disable(map, chip, adr);
1081 break;
1084 if (chip_ready(map, adr))
1085 break;
1087 /* Latency issues. Drop the lock, wait a while and retry */
1088 UDELAY(map, chip, adr, 1);
1090 /* Did we succeed? */
1091 if (!chip_good(map, adr, datum)) {
1092 /* reset on all failures. */
1093 map_write( map, CMD(0xF0), chip->start );
1094 /* FIXME - should have reset delay before continuing */
1096 if (++retry_cnt <= MAX_WORD_RETRIES)
1097 goto retry;
1099 ret = -EIO;
1101 xip_enable(map, chip, adr);
1102 op_done:
1103 chip->state = FL_READY;
1104 put_chip(map, chip, adr);
1105 spin_unlock(chip->mutex);
1107 return ret;
1111 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1112 size_t *retlen, const u_char *buf)
1114 struct map_info *map = mtd->priv;
1115 struct cfi_private *cfi = map->fldrv_priv;
1116 int ret = 0;
1117 int chipnum;
1118 unsigned long ofs, chipstart;
1119 DECLARE_WAITQUEUE(wait, current);
1121 *retlen = 0;
1122 if (!len)
1123 return 0;
1125 chipnum = to >> cfi->chipshift;
1126 ofs = to - (chipnum << cfi->chipshift);
1127 chipstart = cfi->chips[chipnum].start;
1129 /* If it's not bus-aligned, do the first byte write */
1130 if (ofs & (map_bankwidth(map)-1)) {
1131 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1132 int i = ofs - bus_ofs;
1133 int n = 0;
1134 map_word tmp_buf;
1136 retry:
1137 spin_lock(cfi->chips[chipnum].mutex);
1139 if (cfi->chips[chipnum].state != FL_READY) {
1140 #if 0
1141 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1142 #endif
1143 set_current_state(TASK_UNINTERRUPTIBLE);
1144 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1146 spin_unlock(cfi->chips[chipnum].mutex);
1148 schedule();
1149 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1150 #if 0
1151 if(signal_pending(current))
1152 return -EINTR;
1153 #endif
1154 goto retry;
1157 /* Load 'tmp_buf' with old contents of flash */
1158 tmp_buf = map_read(map, bus_ofs+chipstart);
1160 spin_unlock(cfi->chips[chipnum].mutex);
1162 /* Number of bytes to copy from buffer */
1163 n = min_t(int, len, map_bankwidth(map)-i);
1165 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1167 ret = do_write_oneword(map, &cfi->chips[chipnum],
1168 bus_ofs, tmp_buf);
1169 if (ret)
1170 return ret;
1172 ofs += n;
1173 buf += n;
1174 (*retlen) += n;
1175 len -= n;
1177 if (ofs >> cfi->chipshift) {
1178 chipnum ++;
1179 ofs = 0;
1180 if (chipnum == cfi->numchips)
1181 return 0;
1185 /* We are now aligned, write as much as possible */
1186 while(len >= map_bankwidth(map)) {
1187 map_word datum;
1189 datum = map_word_load(map, buf);
1191 ret = do_write_oneword(map, &cfi->chips[chipnum],
1192 ofs, datum);
1193 if (ret)
1194 return ret;
1196 ofs += map_bankwidth(map);
1197 buf += map_bankwidth(map);
1198 (*retlen) += map_bankwidth(map);
1199 len -= map_bankwidth(map);
1201 if (ofs >> cfi->chipshift) {
1202 chipnum ++;
1203 ofs = 0;
1204 if (chipnum == cfi->numchips)
1205 return 0;
1206 chipstart = cfi->chips[chipnum].start;
1210 /* Write the trailing bytes if any */
1211 if (len & (map_bankwidth(map)-1)) {
1212 map_word tmp_buf;
1214 retry1:
1215 spin_lock(cfi->chips[chipnum].mutex);
1217 if (cfi->chips[chipnum].state != FL_READY) {
1218 #if 0
1219 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1220 #endif
1221 set_current_state(TASK_UNINTERRUPTIBLE);
1222 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1224 spin_unlock(cfi->chips[chipnum].mutex);
1226 schedule();
1227 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1228 #if 0
1229 if(signal_pending(current))
1230 return -EINTR;
1231 #endif
1232 goto retry1;
1235 tmp_buf = map_read(map, ofs + chipstart);
1237 spin_unlock(cfi->chips[chipnum].mutex);
1239 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1241 ret = do_write_oneword(map, &cfi->chips[chipnum],
1242 ofs, tmp_buf);
1243 if (ret)
1244 return ret;
1246 (*retlen) += len;
1249 return 0;
1252 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1253 unsigned long adr, const u_char *buf,
1254 int len)
1256 struct cfi_private *cfi = map->fldrv_priv;
1257 unsigned long timeo = jiffies + HZ;
1258 /* see comments in do_write_oneword() regarding uWriteTimeout, 20ms */
1259 unsigned long uWriteTimeout = ( HZ / 50 ) + 1;
1260 int ret = -EIO;
1261 unsigned long cmd_adr;
1262 int z, words, prolog, epilog, buflen = len;
1263 map_word datum, pdat, edat;
1265 adr += chip->start;
1266 cmd_adr = adr;
1268 spin_lock(chip->mutex);
1269 ret = get_chip(map, chip, adr, FL_WRITING);
1270 if (ret) {
1271 spin_unlock(chip->mutex);
1272 return ret;
1275 datum = map_word_load(map, buf);
1277 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1278 __func__, adr, datum.x[0] );
1280 XIP_INVAL_CACHED_RANGE(map, adr, len);
1281 ENABLE_VPP(map);
1282 xip_disable(map, chip, cmd_adr);
1283 /* If start is not bus-aligned, prepend old contents of flash */
1284 prolog = (adr & (map_bankwidth(map)-1));
1285 if (prolog) {
1286 adr -= prolog;
1287 cmd_adr -= prolog;
1288 len += prolog;
1289 pdat = map_read(map, adr);
1291 /* If end is not bus-aligned, append old contents of flash */
1292 epilog = ((adr + len) & (map_bankwidth(map)-1));
1293 if (epilog) {
1294 len += map_bankwidth(map)-epilog;
1295 edat = map_read(map, adr + len - map_bankwidth(map));
1298 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1299 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1300 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1302 /* Write Buffer Load */
1303 map_write(map, CMD(0x25), cmd_adr);
1304 (void) map_read(map, cmd_adr);
1306 chip->state = FL_WRITING_TO_BUFFER;
1308 /* Write length of data to come */
1309 words = len / map_bankwidth(map);
1310 map_write(map, CMD(words - 1), cmd_adr);
1311 /* Write data */
1312 z = 0;
1313 if (prolog) {
1314 datum = map_word_load_partial(map, pdat, buf, prolog,
1315 min_t(int, buflen, map_bankwidth(map) - prolog));
1316 map_write(map, datum, adr);
1318 z += map_bankwidth(map);
1319 buf += map_bankwidth(map) - prolog;
1321 while(z < words * map_bankwidth(map)) {
1322 if (epilog && z >= (words-1) * map_bankwidth(map))
1323 datum = map_word_load_partial(map, edat, buf, 0, epilog);
1324 else
1325 datum = map_word_load(map, buf);
1326 map_write(map, datum, adr + z);
1328 z += map_bankwidth(map);
1329 buf += map_bankwidth(map);
1331 z -= map_bankwidth(map);
1333 adr += z;
1335 /* Write Buffer Program Confirm: GO GO GO */
1336 map_write(map, CMD(0x29), cmd_adr);
1337 chip->state = FL_WRITING;
1339 INVALIDATE_CACHE_UDELAY(map, chip,
1340 adr, map_bankwidth(map),
1341 chip->word_write_time);
1343 timeo = jiffies + uWriteTimeout;
1345 for (;;) {
1346 if (chip->state != FL_WRITING) {
1347 /* Someone's suspended the write. Sleep */
1348 DECLARE_WAITQUEUE(wait, current);
1350 set_current_state(TASK_UNINTERRUPTIBLE);
1351 add_wait_queue(&chip->wq, &wait);
1352 spin_unlock(chip->mutex);
1353 schedule();
1354 remove_wait_queue(&chip->wq, &wait);
1355 timeo = jiffies + (HZ / 2); /* FIXME */
1356 spin_lock(chip->mutex);
1357 continue;
1360 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1361 break;
1363 if (chip_ready(map, adr)) {
1364 xip_enable(map, chip, adr);
1365 goto op_done;
1368 /* Latency issues. Drop the lock, wait a while and retry */
1369 UDELAY(map, chip, adr, 1);
1372 /* reset on all failures. */
1373 map_write( map, CMD(0xF0), chip->start );
1374 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1375 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1376 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1377 xip_enable(map, chip, adr);
1379 printk(KERN_WARNING "MTD %s(): software timeout\n",
1380 __func__ );
1382 ret = -EIO;
1383 op_done:
1384 chip->state = FL_READY;
1385 put_chip(map, chip, adr);
1386 spin_unlock(chip->mutex);
1388 return ret;
1392 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1393 size_t *retlen, const u_char *buf)
1395 struct map_info *map = mtd->priv;
1396 struct cfi_private *cfi = map->fldrv_priv;
1397 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1398 int ret = 0;
1399 int chipnum;
1400 unsigned long ofs;
1402 *retlen = 0;
1403 if (!len)
1404 return 0;
1406 chipnum = to >> cfi->chipshift;
1407 ofs = to - (chipnum << cfi->chipshift);
1409 while (len) {
1410 /* We must not cross write block boundaries */
1411 int size = wbufsize - (ofs & (wbufsize-1));
1413 if (size > len)
1414 size = len;
1416 ret = do_write_buffer(map, &cfi->chips[chipnum],
1417 ofs, buf, size);
1418 if (ret)
1419 return ret;
1421 ofs += size;
1422 buf += size;
1423 (*retlen) += size;
1424 len -= size;
1426 if (ofs >> cfi->chipshift) {
1427 chipnum ++;
1428 ofs = 0;
1429 if (chipnum == cfi->numchips)
1430 return 0;
1434 return 0;
1439 * Handle devices with one erase region, that only implement
1440 * the chip erase command.
1442 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1444 struct cfi_private *cfi = map->fldrv_priv;
1445 unsigned long timeo = jiffies + HZ;
1446 unsigned long int adr;
1447 DECLARE_WAITQUEUE(wait, current);
1448 int ret = 0;
1450 adr = cfi->addr_unlock1;
1452 spin_lock(chip->mutex);
1453 ret = get_chip(map, chip, adr, FL_WRITING);
1454 if (ret) {
1455 spin_unlock(chip->mutex);
1456 return ret;
1459 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1460 __func__, chip->start );
1462 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1463 ENABLE_VPP(map);
1464 xip_disable(map, chip, adr);
1466 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1467 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1468 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1469 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1470 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1471 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1473 chip->state = FL_ERASING;
1474 chip->erase_suspended = 0;
1475 chip->in_progress_block_addr = adr;
1477 INVALIDATE_CACHE_UDELAY(map, chip,
1478 adr, map->size,
1479 chip->erase_time*500);
1481 timeo = jiffies + (HZ*20);
1483 for (;;) {
1484 if (chip->state != FL_ERASING) {
1485 /* Someone's suspended the erase. Sleep */
1486 set_current_state(TASK_UNINTERRUPTIBLE);
1487 add_wait_queue(&chip->wq, &wait);
1488 spin_unlock(chip->mutex);
1489 schedule();
1490 remove_wait_queue(&chip->wq, &wait);
1491 spin_lock(chip->mutex);
1492 continue;
1494 if (chip->erase_suspended) {
1495 /* This erase was suspended and resumed.
1496 Adjust the timeout */
1497 timeo = jiffies + (HZ*20); /* FIXME */
1498 chip->erase_suspended = 0;
1501 if (chip_ready(map, adr))
1502 break;
1504 if (time_after(jiffies, timeo)) {
1505 printk(KERN_WARNING "MTD %s(): software timeout\n",
1506 __func__ );
1507 break;
1510 /* Latency issues. Drop the lock, wait a while and retry */
1511 UDELAY(map, chip, adr, 1000000/HZ);
1513 /* Did we succeed? */
1514 if (!chip_good(map, adr, map_word_ff(map))) {
1515 /* reset on all failures. */
1516 map_write( map, CMD(0xF0), chip->start );
1517 /* FIXME - should have reset delay before continuing */
1519 ret = -EIO;
1522 chip->state = FL_READY;
1523 xip_enable(map, chip, adr);
1524 put_chip(map, chip, adr);
1525 spin_unlock(chip->mutex);
1527 return ret;
1531 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1533 struct cfi_private *cfi = map->fldrv_priv;
1534 unsigned long timeo = jiffies + HZ;
1535 DECLARE_WAITQUEUE(wait, current);
1536 int ret = 0;
1538 adr += chip->start;
1540 spin_lock(chip->mutex);
1541 ret = get_chip(map, chip, adr, FL_ERASING);
1542 if (ret) {
1543 spin_unlock(chip->mutex);
1544 return ret;
1547 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1548 __func__, adr );
1550 XIP_INVAL_CACHED_RANGE(map, adr, len);
1551 ENABLE_VPP(map);
1552 xip_disable(map, chip, adr);
1554 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1555 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1556 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1557 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1558 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1559 map_write(map, CMD(0x30), adr);
1561 chip->state = FL_ERASING;
1562 chip->erase_suspended = 0;
1563 chip->in_progress_block_addr = adr;
1565 INVALIDATE_CACHE_UDELAY(map, chip,
1566 adr, len,
1567 chip->erase_time*500);
1569 timeo = jiffies + (HZ*20);
1571 for (;;) {
1572 if (chip->state != FL_ERASING) {
1573 /* Someone's suspended the erase. Sleep */
1574 set_current_state(TASK_UNINTERRUPTIBLE);
1575 add_wait_queue(&chip->wq, &wait);
1576 spin_unlock(chip->mutex);
1577 schedule();
1578 remove_wait_queue(&chip->wq, &wait);
1579 spin_lock(chip->mutex);
1580 continue;
1582 if (chip->erase_suspended) {
1583 /* This erase was suspended and resumed.
1584 Adjust the timeout */
1585 timeo = jiffies + (HZ*20); /* FIXME */
1586 chip->erase_suspended = 0;
1589 if (chip_ready(map, adr)) {
1590 xip_enable(map, chip, adr);
1591 break;
1594 if (time_after(jiffies, timeo)) {
1595 xip_enable(map, chip, adr);
1596 printk(KERN_WARNING "MTD %s(): software timeout\n",
1597 __func__ );
1598 break;
1601 /* Latency issues. Drop the lock, wait a while and retry */
1602 UDELAY(map, chip, adr, 1000000/HZ);
1604 /* Did we succeed? */
1605 if (!chip_good(map, adr, map_word_ff(map))) {
1606 /* reset on all failures. */
1607 map_write( map, CMD(0xF0), chip->start );
1608 /* FIXME - should have reset delay before continuing */
1610 ret = -EIO;
1613 chip->state = FL_READY;
1614 put_chip(map, chip, adr);
1615 spin_unlock(chip->mutex);
1616 return ret;
1620 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1622 unsigned long ofs, len;
1623 int ret;
1625 ofs = instr->addr;
1626 len = instr->len;
1628 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1629 if (ret)
1630 return ret;
1632 instr->state = MTD_ERASE_DONE;
1633 mtd_erase_callback(instr);
1635 return 0;
1639 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1641 struct map_info *map = mtd->priv;
1642 struct cfi_private *cfi = map->fldrv_priv;
1643 int ret = 0;
1645 if (instr->addr != 0)
1646 return -EINVAL;
1648 if (instr->len != mtd->size)
1649 return -EINVAL;
1651 ret = do_erase_chip(map, &cfi->chips[0]);
1652 if (ret)
1653 return ret;
1655 instr->state = MTD_ERASE_DONE;
1656 mtd_erase_callback(instr);
1658 return 0;
1661 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1662 unsigned long adr, int len, void *thunk)
1664 struct cfi_private *cfi = map->fldrv_priv;
1665 int ret;
1667 spin_lock(chip->mutex);
1668 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1669 if (ret)
1670 goto out_unlock;
1671 chip->state = FL_LOCKING;
1673 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1674 __func__, adr, len);
1676 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1677 cfi->device_type, NULL);
1678 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1679 cfi->device_type, NULL);
1680 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1681 cfi->device_type, NULL);
1682 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1683 cfi->device_type, NULL);
1684 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1685 cfi->device_type, NULL);
1686 map_write(map, CMD(0x40), chip->start + adr);
1688 chip->state = FL_READY;
1689 put_chip(map, chip, adr + chip->start);
1690 ret = 0;
1692 out_unlock:
1693 spin_unlock(chip->mutex);
1694 return ret;
1697 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1698 unsigned long adr, int len, void *thunk)
1700 struct cfi_private *cfi = map->fldrv_priv;
1701 int ret;
1703 spin_lock(chip->mutex);
1704 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1705 if (ret)
1706 goto out_unlock;
1707 chip->state = FL_UNLOCKING;
1709 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1710 __func__, adr, len);
1712 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1713 cfi->device_type, NULL);
1714 map_write(map, CMD(0x70), adr);
1716 chip->state = FL_READY;
1717 put_chip(map, chip, adr + chip->start);
1718 ret = 0;
1720 out_unlock:
1721 spin_unlock(chip->mutex);
1722 return ret;
1725 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1727 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1730 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1732 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1736 static void cfi_amdstd_sync (struct mtd_info *mtd)
1738 struct map_info *map = mtd->priv;
1739 struct cfi_private *cfi = map->fldrv_priv;
1740 int i;
1741 struct flchip *chip;
1742 int ret = 0;
1743 DECLARE_WAITQUEUE(wait, current);
1745 for (i=0; !ret && i<cfi->numchips; i++) {
1746 chip = &cfi->chips[i];
1748 retry:
1749 spin_lock(chip->mutex);
1751 switch(chip->state) {
1752 case FL_READY:
1753 case FL_STATUS:
1754 case FL_CFI_QUERY:
1755 case FL_JEDEC_QUERY:
1756 chip->oldstate = chip->state;
1757 chip->state = FL_SYNCING;
1758 /* No need to wake_up() on this state change -
1759 * as the whole point is that nobody can do anything
1760 * with the chip now anyway.
1762 case FL_SYNCING:
1763 spin_unlock(chip->mutex);
1764 break;
1766 default:
1767 /* Not an idle state */
1768 set_current_state(TASK_UNINTERRUPTIBLE);
1769 add_wait_queue(&chip->wq, &wait);
1771 spin_unlock(chip->mutex);
1773 schedule();
1775 remove_wait_queue(&chip->wq, &wait);
1777 goto retry;
1781 /* Unlock the chips again */
1783 for (i--; i >=0; i--) {
1784 chip = &cfi->chips[i];
1786 spin_lock(chip->mutex);
1788 if (chip->state == FL_SYNCING) {
1789 chip->state = chip->oldstate;
1790 wake_up(&chip->wq);
1792 spin_unlock(chip->mutex);
1797 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1799 struct map_info *map = mtd->priv;
1800 struct cfi_private *cfi = map->fldrv_priv;
1801 int i;
1802 struct flchip *chip;
1803 int ret = 0;
1805 for (i=0; !ret && i<cfi->numchips; i++) {
1806 chip = &cfi->chips[i];
1808 spin_lock(chip->mutex);
1810 switch(chip->state) {
1811 case FL_READY:
1812 case FL_STATUS:
1813 case FL_CFI_QUERY:
1814 case FL_JEDEC_QUERY:
1815 chip->oldstate = chip->state;
1816 chip->state = FL_PM_SUSPENDED;
1817 /* No need to wake_up() on this state change -
1818 * as the whole point is that nobody can do anything
1819 * with the chip now anyway.
1821 case FL_PM_SUSPENDED:
1822 break;
1824 default:
1825 ret = -EAGAIN;
1826 break;
1828 spin_unlock(chip->mutex);
1831 /* Unlock the chips again */
1833 if (ret) {
1834 for (i--; i >=0; i--) {
1835 chip = &cfi->chips[i];
1837 spin_lock(chip->mutex);
1839 if (chip->state == FL_PM_SUSPENDED) {
1840 chip->state = chip->oldstate;
1841 wake_up(&chip->wq);
1843 spin_unlock(chip->mutex);
1847 return ret;
1851 static void cfi_amdstd_resume(struct mtd_info *mtd)
1853 struct map_info *map = mtd->priv;
1854 struct cfi_private *cfi = map->fldrv_priv;
1855 int i;
1856 struct flchip *chip;
1858 for (i=0; i<cfi->numchips; i++) {
1860 chip = &cfi->chips[i];
1862 spin_lock(chip->mutex);
1864 if (chip->state == FL_PM_SUSPENDED) {
1865 chip->state = FL_READY;
1866 map_write(map, CMD(0xF0), chip->start);
1867 wake_up(&chip->wq);
1869 else
1870 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1872 spin_unlock(chip->mutex);
1876 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1878 struct map_info *map = mtd->priv;
1879 struct cfi_private *cfi = map->fldrv_priv;
1881 kfree(cfi->cmdset_priv);
1882 kfree(cfi->cfiq);
1883 kfree(cfi);
1884 kfree(mtd->eraseregions);
1887 MODULE_LICENSE("GPL");
1888 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1889 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");