ACPI: thinkpad-acpi: handle HKEY thermal and battery alarms
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / mtd / chips / cfi_cmdset_0002.c
blob9e7a236136484f90394265481de64f489fbf4975
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
18 * This code is GPL
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/compatmac.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37 #include <linux/mtd/xip.h>
39 #define AMD_BOOTLOC_BUG
40 #define FORCE_WORD_WRITE 0
42 #define MAX_WORD_RETRIES 3
44 #define MANUFACTURER_AMD 0x0001
45 #define MANUFACTURER_ATMEL 0x001F
46 #define MANUFACTURER_SST 0x00BF
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static void cfi_amdstd_destroy(struct mtd_info *);
64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
69 #include "fwh_lock.h"
71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
74 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
75 .probe = NULL, /* Not usable directly */
76 .destroy = cfi_amdstd_destroy,
77 .name = "cfi_cmdset_0002",
78 .module = THIS_MODULE
82 /* #define DEBUG_CFI_FEATURES */
85 #ifdef DEBUG_CFI_FEATURES
86 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
88 const char* erase_suspend[3] = {
89 "Not supported", "Read only", "Read/write"
91 const char* top_bottom[6] = {
92 "No WP", "8x8KiB sectors at top & bottom, no WP",
93 "Bottom boot", "Top boot",
94 "Uniform, Bottom WP", "Uniform, Top WP"
97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
98 printk(" Address sensitive unlock: %s\n",
99 (extp->SiliconRevision & 1) ? "Not required" : "Required");
101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
103 else
104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
106 if (extp->BlkProt == 0)
107 printk(" Block protection: Not supported\n");
108 else
109 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
112 printk(" Temporary block unprotect: %s\n",
113 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
116 printk(" Burst mode: %s\n",
117 extp->BurstMode ? "Supported" : "Not supported");
118 if (extp->PageMode == 0)
119 printk(" Page mode: Not supported\n");
120 else
121 printk(" Page mode: %d word page\n", extp->PageMode << 2);
123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMin >> 4, extp->VppMin & 0xf);
125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126 extp->VppMax >> 4, extp->VppMax & 0xf);
128 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
130 else
131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
133 #endif
135 #ifdef AMD_BOOTLOC_BUG
136 /* Wheee. Bring me the head of someone at AMD. */
137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
139 struct map_info *map = mtd->priv;
140 struct cfi_private *cfi = map->fldrv_priv;
141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
142 __u8 major = extp->MajorVersion;
143 __u8 minor = extp->MinorVersion;
145 if (((major << 8) | minor) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */
147 if (cfi->id & 0x80) {
148 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
149 extp->TopBottom = 3; /* top boot */
150 } else {
151 extp->TopBottom = 2; /* bottom boot */
155 #endif
157 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
159 struct map_info *map = mtd->priv;
160 struct cfi_private *cfi = map->fldrv_priv;
161 if (cfi->cfiq->BufWriteTimeoutTyp) {
162 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
163 mtd->write = cfi_amdstd_write_buffers;
167 /* Atmel chips don't use the same PRI format as AMD chips */
168 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
170 struct map_info *map = mtd->priv;
171 struct cfi_private *cfi = map->fldrv_priv;
172 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
173 struct cfi_pri_atmel atmel_pri;
175 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
178 if (atmel_pri.Features & 0x02)
179 extp->EraseSuspend = 2;
181 if (atmel_pri.BottomBoot)
182 extp->TopBottom = 2;
183 else
184 extp->TopBottom = 3;
186 /* burst write mode not supported */
187 cfi->cfiq->BufWriteTimeoutTyp = 0;
188 cfi->cfiq->BufWriteTimeoutMax = 0;
191 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
193 /* Setup for chips with a secsi area */
194 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
195 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
198 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
200 struct map_info *map = mtd->priv;
201 struct cfi_private *cfi = map->fldrv_priv;
202 if ((cfi->cfiq->NumEraseRegions == 1) &&
203 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
204 mtd->erase = cfi_amdstd_erase_chip;
210 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
211 * locked by default.
213 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
215 mtd->lock = cfi_atmel_lock;
216 mtd->unlock = cfi_atmel_unlock;
217 mtd->flags |= MTD_POWERUP_LOCK;
220 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
222 struct map_info *map = mtd->priv;
223 struct cfi_private *cfi = map->fldrv_priv;
225 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
226 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
227 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
231 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
233 struct map_info *map = mtd->priv;
234 struct cfi_private *cfi = map->fldrv_priv;
236 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
237 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
238 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
242 static struct cfi_fixup cfi_fixup_table[] = {
243 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
244 #ifdef AMD_BOOTLOC_BUG
245 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
246 #endif
247 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
248 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
249 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
250 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
251 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
252 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
253 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
254 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
255 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
256 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
257 #if !FORCE_WORD_WRITE
258 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
259 #endif
260 { 0, 0, NULL, NULL }
262 static struct cfi_fixup jedec_fixup_table[] = {
263 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
264 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
265 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
266 { 0, 0, NULL, NULL }
269 static struct cfi_fixup fixup_table[] = {
270 /* The CFI vendor ids and the JEDEC vendor IDs appear
271 * to be common. It is like the devices id's are as
272 * well. This table is to pick all cases where
273 * we know that is the case.
275 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
276 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
277 { 0, 0, NULL, NULL }
281 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
283 struct cfi_private *cfi = map->fldrv_priv;
284 struct mtd_info *mtd;
285 int i;
287 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
288 if (!mtd) {
289 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
290 return NULL;
292 mtd->priv = map;
293 mtd->type = MTD_NORFLASH;
295 /* Fill in the default mtd operations */
296 mtd->erase = cfi_amdstd_erase_varsize;
297 mtd->write = cfi_amdstd_write_words;
298 mtd->read = cfi_amdstd_read;
299 mtd->sync = cfi_amdstd_sync;
300 mtd->suspend = cfi_amdstd_suspend;
301 mtd->resume = cfi_amdstd_resume;
302 mtd->flags = MTD_CAP_NORFLASH;
303 mtd->name = map->name;
304 mtd->writesize = 1;
306 if (cfi->cfi_mode==CFI_MODE_CFI){
307 unsigned char bootloc;
309 * It's a real CFI chip, not one for which the probe
310 * routine faked a CFI structure. So we read the feature
311 * table from it.
313 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
314 struct cfi_pri_amdstd *extp;
316 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
317 if (!extp) {
318 kfree(mtd);
319 return NULL;
322 if (extp->MajorVersion != '1' ||
323 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
324 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
325 "version %c.%c.\n", extp->MajorVersion,
326 extp->MinorVersion);
327 kfree(extp);
328 kfree(mtd);
329 return NULL;
332 /* Install our own private info structure */
333 cfi->cmdset_priv = extp;
335 /* Apply cfi device specific fixups */
336 cfi_fixup(mtd, cfi_fixup_table);
338 #ifdef DEBUG_CFI_FEATURES
339 /* Tell the user about it in lots of lovely detail */
340 cfi_tell_features(extp);
341 #endif
343 bootloc = extp->TopBottom;
344 if ((bootloc != 2) && (bootloc != 3)) {
345 printk(KERN_WARNING "%s: CFI does not contain boot "
346 "bank location. Assuming top.\n", map->name);
347 bootloc = 2;
350 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
351 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
353 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
354 int j = (cfi->cfiq->NumEraseRegions-1)-i;
355 __u32 swap;
357 swap = cfi->cfiq->EraseRegionInfo[i];
358 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
359 cfi->cfiq->EraseRegionInfo[j] = swap;
362 /* Set the default CFI lock/unlock addresses */
363 cfi->addr_unlock1 = 0x555;
364 cfi->addr_unlock2 = 0x2aa;
366 } /* CFI mode */
367 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
368 /* Apply jedec specific fixups */
369 cfi_fixup(mtd, jedec_fixup_table);
371 /* Apply generic fixups */
372 cfi_fixup(mtd, fixup_table);
374 for (i=0; i< cfi->numchips; i++) {
375 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
376 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
377 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
378 cfi->chips[i].ref_point_counter = 0;
379 init_waitqueue_head(&(cfi->chips[i].wq));
382 map->fldrv = &cfi_amdstd_chipdrv;
384 return cfi_amdstd_setup(mtd);
386 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
388 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
390 struct map_info *map = mtd->priv;
391 struct cfi_private *cfi = map->fldrv_priv;
392 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
393 unsigned long offset = 0;
394 int i,j;
396 printk(KERN_NOTICE "number of %s chips: %d\n",
397 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
398 /* Select the correct geometry setup */
399 mtd->size = devsize * cfi->numchips;
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) {
405 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
406 goto setup_err;
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
412 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
414 if (mtd->erasesize < ersize) {
415 mtd->erasesize = ersize;
417 for (j=0; j<cfi->numchips; j++) {
418 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
419 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
420 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
422 offset += (ersize * ernum);
424 if (offset != devsize) {
425 /* Argh */
426 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
427 goto setup_err;
429 #if 0
430 // debug
431 for (i=0; i<mtd->numeraseregions;i++){
432 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks);
437 #endif
439 /* FIXME: erase-suspend-program is broken. See
440 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
441 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
443 __module_get(THIS_MODULE);
444 return mtd;
446 setup_err:
447 if(mtd) {
448 kfree(mtd->eraseregions);
449 kfree(mtd);
451 kfree(cfi->cmdset_priv);
452 kfree(cfi->cfiq);
453 return NULL;
457 * Return true if the chip is ready.
459 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
460 * non-suspended sector) and is indicated by no toggle bits toggling.
462 * Note that anything more complicated than checking if no bits are toggling
463 * (including checking DQ5 for an error status) is tricky to get working
464 * correctly and is therefore not done (particulary with interleaved chips
465 * as each chip must be checked independantly of the others).
467 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
469 map_word d, t;
471 d = map_read(map, addr);
472 t = map_read(map, addr);
474 return map_word_equal(map, d, t);
478 * Return true if the chip is ready and has the correct value.
480 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
481 * non-suspended sector) and it is indicated by no bits toggling.
483 * Error are indicated by toggling bits or bits held with the wrong value,
484 * or with bits toggling.
486 * Note that anything more complicated than checking if no bits are toggling
487 * (including checking DQ5 for an error status) is tricky to get working
488 * correctly and is therefore not done (particulary with interleaved chips
489 * as each chip must be checked independantly of the others).
492 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
494 map_word oldd, curd;
496 oldd = map_read(map, addr);
497 curd = map_read(map, addr);
499 return map_word_equal(map, oldd, curd) &&
500 map_word_equal(map, curd, expected);
503 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
505 DECLARE_WAITQUEUE(wait, current);
506 struct cfi_private *cfi = map->fldrv_priv;
507 unsigned long timeo;
508 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
510 resettime:
511 timeo = jiffies + HZ;
512 retry:
513 switch (chip->state) {
515 case FL_STATUS:
516 for (;;) {
517 if (chip_ready(map, adr))
518 break;
520 if (time_after(jiffies, timeo)) {
521 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
522 spin_unlock(chip->mutex);
523 return -EIO;
525 spin_unlock(chip->mutex);
526 cfi_udelay(1);
527 spin_lock(chip->mutex);
528 /* Someone else might have been playing with it. */
529 goto retry;
532 case FL_READY:
533 case FL_CFI_QUERY:
534 case FL_JEDEC_QUERY:
535 return 0;
537 case FL_ERASING:
538 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
539 goto sleep;
541 if (!( mode == FL_READY
542 || mode == FL_POINT
543 || !cfip
544 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
545 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
547 goto sleep;
549 /* We could check to see if we're trying to access the sector
550 * that is currently being erased. However, no user will try
551 * anything like that so we just wait for the timeout. */
553 /* Erase suspend */
554 /* It's harmless to issue the Erase-Suspend and Erase-Resume
555 * commands when the erase algorithm isn't in progress. */
556 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
557 chip->oldstate = FL_ERASING;
558 chip->state = FL_ERASE_SUSPENDING;
559 chip->erase_suspended = 1;
560 for (;;) {
561 if (chip_ready(map, adr))
562 break;
564 if (time_after(jiffies, timeo)) {
565 /* Should have suspended the erase by now.
566 * Send an Erase-Resume command as either
567 * there was an error (so leave the erase
568 * routine to recover from it) or we trying to
569 * use the erase-in-progress sector. */
570 map_write(map, CMD(0x30), chip->in_progress_block_addr);
571 chip->state = FL_ERASING;
572 chip->oldstate = FL_READY;
573 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
574 return -EIO;
577 spin_unlock(chip->mutex);
578 cfi_udelay(1);
579 spin_lock(chip->mutex);
580 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
581 So we can just loop here. */
583 chip->state = FL_READY;
584 return 0;
586 case FL_XIP_WHILE_ERASING:
587 if (mode != FL_READY && mode != FL_POINT &&
588 (!cfip || !(cfip->EraseSuspend&2)))
589 goto sleep;
590 chip->oldstate = chip->state;
591 chip->state = FL_READY;
592 return 0;
594 case FL_POINT:
595 /* Only if there's no operation suspended... */
596 if (mode == FL_READY && chip->oldstate == FL_READY)
597 return 0;
599 default:
600 sleep:
601 set_current_state(TASK_UNINTERRUPTIBLE);
602 add_wait_queue(&chip->wq, &wait);
603 spin_unlock(chip->mutex);
604 schedule();
605 remove_wait_queue(&chip->wq, &wait);
606 spin_lock(chip->mutex);
607 goto resettime;
612 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
614 struct cfi_private *cfi = map->fldrv_priv;
616 switch(chip->oldstate) {
617 case FL_ERASING:
618 chip->state = chip->oldstate;
619 map_write(map, CMD(0x30), chip->in_progress_block_addr);
620 chip->oldstate = FL_READY;
621 chip->state = FL_ERASING;
622 break;
624 case FL_XIP_WHILE_ERASING:
625 chip->state = chip->oldstate;
626 chip->oldstate = FL_READY;
627 break;
629 case FL_READY:
630 case FL_STATUS:
631 /* We should really make set_vpp() count, rather than doing this */
632 DISABLE_VPP(map);
633 break;
634 default:
635 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
637 wake_up(&chip->wq);
640 #ifdef CONFIG_MTD_XIP
643 * No interrupt what so ever can be serviced while the flash isn't in array
644 * mode. This is ensured by the xip_disable() and xip_enable() functions
645 * enclosing any code path where the flash is known not to be in array mode.
646 * And within a XIP disabled code path, only functions marked with __xipram
647 * may be called and nothing else (it's a good thing to inspect generated
648 * assembly to make sure inline functions were actually inlined and that gcc
649 * didn't emit calls to its own support functions). Also configuring MTD CFI
650 * support to a single buswidth and a single interleave is also recommended.
653 static void xip_disable(struct map_info *map, struct flchip *chip,
654 unsigned long adr)
656 /* TODO: chips with no XIP use should ignore and return */
657 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
658 local_irq_disable();
661 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
662 unsigned long adr)
664 struct cfi_private *cfi = map->fldrv_priv;
666 if (chip->state != FL_POINT && chip->state != FL_READY) {
667 map_write(map, CMD(0xf0), adr);
668 chip->state = FL_READY;
670 (void) map_read(map, adr);
671 xip_iprefetch();
672 local_irq_enable();
676 * When a delay is required for the flash operation to complete, the
677 * xip_udelay() function is polling for both the given timeout and pending
678 * (but still masked) hardware interrupts. Whenever there is an interrupt
679 * pending then the flash erase operation is suspended, array mode restored
680 * and interrupts unmasked. Task scheduling might also happen at that
681 * point. The CPU eventually returns from the interrupt or the call to
682 * schedule() and the suspended flash operation is resumed for the remaining
683 * of the delay period.
685 * Warning: this function _will_ fool interrupt latency tracing tools.
688 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
689 unsigned long adr, int usec)
691 struct cfi_private *cfi = map->fldrv_priv;
692 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
693 map_word status, OK = CMD(0x80);
694 unsigned long suspended, start = xip_currtime();
695 flstate_t oldstate;
697 do {
698 cpu_relax();
699 if (xip_irqpending() && extp &&
700 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
701 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
703 * Let's suspend the erase operation when supported.
704 * Note that we currently don't try to suspend
705 * interleaved chips if there is already another
706 * operation suspended (imagine what happens
707 * when one chip was already done with the current
708 * operation while another chip suspended it, then
709 * we resume the whole thing at once). Yes, it
710 * can happen!
712 map_write(map, CMD(0xb0), adr);
713 usec -= xip_elapsed_since(start);
714 suspended = xip_currtime();
715 do {
716 if (xip_elapsed_since(suspended) > 100000) {
718 * The chip doesn't want to suspend
719 * after waiting for 100 msecs.
720 * This is a critical error but there
721 * is not much we can do here.
723 return;
725 status = map_read(map, adr);
726 } while (!map_word_andequal(map, status, OK, OK));
728 /* Suspend succeeded */
729 oldstate = chip->state;
730 if (!map_word_bitsset(map, status, CMD(0x40)))
731 break;
732 chip->state = FL_XIP_WHILE_ERASING;
733 chip->erase_suspended = 1;
734 map_write(map, CMD(0xf0), adr);
735 (void) map_read(map, adr);
736 xip_iprefetch();
737 local_irq_enable();
738 spin_unlock(chip->mutex);
739 xip_iprefetch();
740 cond_resched();
743 * We're back. However someone else might have
744 * decided to go write to the chip if we are in
745 * a suspended erase state. If so let's wait
746 * until it's done.
748 spin_lock(chip->mutex);
749 while (chip->state != FL_XIP_WHILE_ERASING) {
750 DECLARE_WAITQUEUE(wait, current);
751 set_current_state(TASK_UNINTERRUPTIBLE);
752 add_wait_queue(&chip->wq, &wait);
753 spin_unlock(chip->mutex);
754 schedule();
755 remove_wait_queue(&chip->wq, &wait);
756 spin_lock(chip->mutex);
758 /* Disallow XIP again */
759 local_irq_disable();
761 /* Resume the write or erase operation */
762 map_write(map, CMD(0x30), adr);
763 chip->state = oldstate;
764 start = xip_currtime();
765 } else if (usec >= 1000000/HZ) {
767 * Try to save on CPU power when waiting delay
768 * is at least a system timer tick period.
769 * No need to be extremely accurate here.
771 xip_cpu_idle();
773 status = map_read(map, adr);
774 } while (!map_word_andequal(map, status, OK, OK)
775 && xip_elapsed_since(start) < usec);
778 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
781 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
782 * the flash is actively programming or erasing since we have to poll for
783 * the operation to complete anyway. We can't do that in a generic way with
784 * a XIP setup so do it before the actual flash operation in this case
785 * and stub it out from INVALIDATE_CACHE_UDELAY.
787 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
788 INVALIDATE_CACHED_RANGE(map, from, size)
790 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
791 UDELAY(map, chip, adr, usec)
794 * Extra notes:
796 * Activating this XIP support changes the way the code works a bit. For
797 * example the code to suspend the current process when concurrent access
798 * happens is never executed because xip_udelay() will always return with the
799 * same chip state as it was entered with. This is why there is no care for
800 * the presence of add_wait_queue() or schedule() calls from within a couple
801 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
802 * The queueing and scheduling are always happening within xip_udelay().
804 * Similarly, get_chip() and put_chip() just happen to always be executed
805 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
806 * is in array mode, therefore never executing many cases therein and not
807 * causing any problem with XIP.
810 #else
812 #define xip_disable(map, chip, adr)
813 #define xip_enable(map, chip, adr)
814 #define XIP_INVAL_CACHED_RANGE(x...)
816 #define UDELAY(map, chip, adr, usec) \
817 do { \
818 spin_unlock(chip->mutex); \
819 cfi_udelay(usec); \
820 spin_lock(chip->mutex); \
821 } while (0)
823 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
824 do { \
825 spin_unlock(chip->mutex); \
826 INVALIDATE_CACHED_RANGE(map, adr, len); \
827 cfi_udelay(usec); \
828 spin_lock(chip->mutex); \
829 } while (0)
831 #endif
833 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
835 unsigned long cmd_addr;
836 struct cfi_private *cfi = map->fldrv_priv;
837 int ret;
839 adr += chip->start;
841 /* Ensure cmd read/writes are aligned. */
842 cmd_addr = adr & ~(map_bankwidth(map)-1);
844 spin_lock(chip->mutex);
845 ret = get_chip(map, chip, cmd_addr, FL_READY);
846 if (ret) {
847 spin_unlock(chip->mutex);
848 return ret;
851 if (chip->state != FL_POINT && chip->state != FL_READY) {
852 map_write(map, CMD(0xf0), cmd_addr);
853 chip->state = FL_READY;
856 map_copy_from(map, buf, adr, len);
858 put_chip(map, chip, cmd_addr);
860 spin_unlock(chip->mutex);
861 return 0;
865 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
867 struct map_info *map = mtd->priv;
868 struct cfi_private *cfi = map->fldrv_priv;
869 unsigned long ofs;
870 int chipnum;
871 int ret = 0;
873 /* ofs: offset within the first chip that the first read should start */
875 chipnum = (from >> cfi->chipshift);
876 ofs = from - (chipnum << cfi->chipshift);
879 *retlen = 0;
881 while (len) {
882 unsigned long thislen;
884 if (chipnum >= cfi->numchips)
885 break;
887 if ((len + ofs -1) >> cfi->chipshift)
888 thislen = (1<<cfi->chipshift) - ofs;
889 else
890 thislen = len;
892 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
893 if (ret)
894 break;
896 *retlen += thislen;
897 len -= thislen;
898 buf += thislen;
900 ofs = 0;
901 chipnum++;
903 return ret;
907 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
909 DECLARE_WAITQUEUE(wait, current);
910 unsigned long timeo = jiffies + HZ;
911 struct cfi_private *cfi = map->fldrv_priv;
913 retry:
914 spin_lock(chip->mutex);
916 if (chip->state != FL_READY){
917 #if 0
918 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
919 #endif
920 set_current_state(TASK_UNINTERRUPTIBLE);
921 add_wait_queue(&chip->wq, &wait);
923 spin_unlock(chip->mutex);
925 schedule();
926 remove_wait_queue(&chip->wq, &wait);
927 #if 0
928 if(signal_pending(current))
929 return -EINTR;
930 #endif
931 timeo = jiffies + HZ;
933 goto retry;
936 adr += chip->start;
938 chip->state = FL_READY;
940 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
941 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
942 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
944 map_copy_from(map, buf, adr, len);
946 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
947 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
948 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
949 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
951 wake_up(&chip->wq);
952 spin_unlock(chip->mutex);
954 return 0;
957 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
959 struct map_info *map = mtd->priv;
960 struct cfi_private *cfi = map->fldrv_priv;
961 unsigned long ofs;
962 int chipnum;
963 int ret = 0;
966 /* ofs: offset within the first chip that the first read should start */
968 /* 8 secsi bytes per chip */
969 chipnum=from>>3;
970 ofs=from & 7;
973 *retlen = 0;
975 while (len) {
976 unsigned long thislen;
978 if (chipnum >= cfi->numchips)
979 break;
981 if ((len + ofs -1) >> 3)
982 thislen = (1<<3) - ofs;
983 else
984 thislen = len;
986 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
987 if (ret)
988 break;
990 *retlen += thislen;
991 len -= thislen;
992 buf += thislen;
994 ofs = 0;
995 chipnum++;
997 return ret;
1001 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1003 struct cfi_private *cfi = map->fldrv_priv;
1004 unsigned long timeo = jiffies + HZ;
1006 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1007 * have a max write time of a few hundreds usec). However, we should
1008 * use the maximum timeout value given by the chip at probe time
1009 * instead. Unfortunately, struct flchip does have a field for
1010 * maximum timeout, only for typical which can be far too short
1011 * depending of the conditions. The ' + 1' is to avoid having a
1012 * timeout of 0 jiffies if HZ is smaller than 1000.
1014 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1015 int ret = 0;
1016 map_word oldd;
1017 int retry_cnt = 0;
1019 adr += chip->start;
1021 spin_lock(chip->mutex);
1022 ret = get_chip(map, chip, adr, FL_WRITING);
1023 if (ret) {
1024 spin_unlock(chip->mutex);
1025 return ret;
1028 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1029 __func__, adr, datum.x[0] );
1032 * Check for a NOP for the case when the datum to write is already
1033 * present - it saves time and works around buggy chips that corrupt
1034 * data at other locations when 0xff is written to a location that
1035 * already contains 0xff.
1037 oldd = map_read(map, adr);
1038 if (map_word_equal(map, oldd, datum)) {
1039 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1040 __func__);
1041 goto op_done;
1044 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1045 ENABLE_VPP(map);
1046 xip_disable(map, chip, adr);
1047 retry:
1048 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1049 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1050 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1051 map_write(map, datum, adr);
1052 chip->state = FL_WRITING;
1054 INVALIDATE_CACHE_UDELAY(map, chip,
1055 adr, map_bankwidth(map),
1056 chip->word_write_time);
1058 /* See comment above for timeout value. */
1059 timeo = jiffies + uWriteTimeout;
1060 for (;;) {
1061 if (chip->state != FL_WRITING) {
1062 /* Someone's suspended the write. Sleep */
1063 DECLARE_WAITQUEUE(wait, current);
1065 set_current_state(TASK_UNINTERRUPTIBLE);
1066 add_wait_queue(&chip->wq, &wait);
1067 spin_unlock(chip->mutex);
1068 schedule();
1069 remove_wait_queue(&chip->wq, &wait);
1070 timeo = jiffies + (HZ / 2); /* FIXME */
1071 spin_lock(chip->mutex);
1072 continue;
1075 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1076 xip_enable(map, chip, adr);
1077 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1078 xip_disable(map, chip, adr);
1079 break;
1082 if (chip_ready(map, adr))
1083 break;
1085 /* Latency issues. Drop the lock, wait a while and retry */
1086 UDELAY(map, chip, adr, 1);
1088 /* Did we succeed? */
1089 if (!chip_good(map, adr, datum)) {
1090 /* reset on all failures. */
1091 map_write( map, CMD(0xF0), chip->start );
1092 /* FIXME - should have reset delay before continuing */
1094 if (++retry_cnt <= MAX_WORD_RETRIES)
1095 goto retry;
1097 ret = -EIO;
1099 xip_enable(map, chip, adr);
1100 op_done:
1101 chip->state = FL_READY;
1102 put_chip(map, chip, adr);
1103 spin_unlock(chip->mutex);
1105 return ret;
1109 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1110 size_t *retlen, const u_char *buf)
1112 struct map_info *map = mtd->priv;
1113 struct cfi_private *cfi = map->fldrv_priv;
1114 int ret = 0;
1115 int chipnum;
1116 unsigned long ofs, chipstart;
1117 DECLARE_WAITQUEUE(wait, current);
1119 *retlen = 0;
1120 if (!len)
1121 return 0;
1123 chipnum = to >> cfi->chipshift;
1124 ofs = to - (chipnum << cfi->chipshift);
1125 chipstart = cfi->chips[chipnum].start;
1127 /* If it's not bus-aligned, do the first byte write */
1128 if (ofs & (map_bankwidth(map)-1)) {
1129 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1130 int i = ofs - bus_ofs;
1131 int n = 0;
1132 map_word tmp_buf;
1134 retry:
1135 spin_lock(cfi->chips[chipnum].mutex);
1137 if (cfi->chips[chipnum].state != FL_READY) {
1138 #if 0
1139 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1140 #endif
1141 set_current_state(TASK_UNINTERRUPTIBLE);
1142 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1144 spin_unlock(cfi->chips[chipnum].mutex);
1146 schedule();
1147 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1148 #if 0
1149 if(signal_pending(current))
1150 return -EINTR;
1151 #endif
1152 goto retry;
1155 /* Load 'tmp_buf' with old contents of flash */
1156 tmp_buf = map_read(map, bus_ofs+chipstart);
1158 spin_unlock(cfi->chips[chipnum].mutex);
1160 /* Number of bytes to copy from buffer */
1161 n = min_t(int, len, map_bankwidth(map)-i);
1163 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1165 ret = do_write_oneword(map, &cfi->chips[chipnum],
1166 bus_ofs, tmp_buf);
1167 if (ret)
1168 return ret;
1170 ofs += n;
1171 buf += n;
1172 (*retlen) += n;
1173 len -= n;
1175 if (ofs >> cfi->chipshift) {
1176 chipnum ++;
1177 ofs = 0;
1178 if (chipnum == cfi->numchips)
1179 return 0;
1183 /* We are now aligned, write as much as possible */
1184 while(len >= map_bankwidth(map)) {
1185 map_word datum;
1187 datum = map_word_load(map, buf);
1189 ret = do_write_oneword(map, &cfi->chips[chipnum],
1190 ofs, datum);
1191 if (ret)
1192 return ret;
1194 ofs += map_bankwidth(map);
1195 buf += map_bankwidth(map);
1196 (*retlen) += map_bankwidth(map);
1197 len -= map_bankwidth(map);
1199 if (ofs >> cfi->chipshift) {
1200 chipnum ++;
1201 ofs = 0;
1202 if (chipnum == cfi->numchips)
1203 return 0;
1204 chipstart = cfi->chips[chipnum].start;
1208 /* Write the trailing bytes if any */
1209 if (len & (map_bankwidth(map)-1)) {
1210 map_word tmp_buf;
1212 retry1:
1213 spin_lock(cfi->chips[chipnum].mutex);
1215 if (cfi->chips[chipnum].state != FL_READY) {
1216 #if 0
1217 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1218 #endif
1219 set_current_state(TASK_UNINTERRUPTIBLE);
1220 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1222 spin_unlock(cfi->chips[chipnum].mutex);
1224 schedule();
1225 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1226 #if 0
1227 if(signal_pending(current))
1228 return -EINTR;
1229 #endif
1230 goto retry1;
1233 tmp_buf = map_read(map, ofs + chipstart);
1235 spin_unlock(cfi->chips[chipnum].mutex);
1237 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1239 ret = do_write_oneword(map, &cfi->chips[chipnum],
1240 ofs, tmp_buf);
1241 if (ret)
1242 return ret;
1244 (*retlen) += len;
1247 return 0;
1252 * FIXME: interleaved mode not tested, and probably not supported!
1254 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1255 unsigned long adr, const u_char *buf,
1256 int len)
1258 struct cfi_private *cfi = map->fldrv_priv;
1259 unsigned long timeo = jiffies + HZ;
1260 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1261 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1262 int ret = -EIO;
1263 unsigned long cmd_adr;
1264 int z, words;
1265 map_word datum;
1267 adr += chip->start;
1268 cmd_adr = adr;
1270 spin_lock(chip->mutex);
1271 ret = get_chip(map, chip, adr, FL_WRITING);
1272 if (ret) {
1273 spin_unlock(chip->mutex);
1274 return ret;
1277 datum = map_word_load(map, buf);
1279 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1280 __func__, adr, datum.x[0] );
1282 XIP_INVAL_CACHED_RANGE(map, adr, len);
1283 ENABLE_VPP(map);
1284 xip_disable(map, chip, cmd_adr);
1286 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1287 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1288 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1290 /* Write Buffer Load */
1291 map_write(map, CMD(0x25), cmd_adr);
1293 chip->state = FL_WRITING_TO_BUFFER;
1295 /* Write length of data to come */
1296 words = len / map_bankwidth(map);
1297 map_write(map, CMD(words - 1), cmd_adr);
1298 /* Write data */
1299 z = 0;
1300 while(z < words * map_bankwidth(map)) {
1301 datum = map_word_load(map, buf);
1302 map_write(map, datum, adr + z);
1304 z += map_bankwidth(map);
1305 buf += map_bankwidth(map);
1307 z -= map_bankwidth(map);
1309 adr += z;
1311 /* Write Buffer Program Confirm: GO GO GO */
1312 map_write(map, CMD(0x29), cmd_adr);
1313 chip->state = FL_WRITING;
1315 INVALIDATE_CACHE_UDELAY(map, chip,
1316 adr, map_bankwidth(map),
1317 chip->word_write_time);
1319 timeo = jiffies + uWriteTimeout;
1321 for (;;) {
1322 if (chip->state != FL_WRITING) {
1323 /* Someone's suspended the write. Sleep */
1324 DECLARE_WAITQUEUE(wait, current);
1326 set_current_state(TASK_UNINTERRUPTIBLE);
1327 add_wait_queue(&chip->wq, &wait);
1328 spin_unlock(chip->mutex);
1329 schedule();
1330 remove_wait_queue(&chip->wq, &wait);
1331 timeo = jiffies + (HZ / 2); /* FIXME */
1332 spin_lock(chip->mutex);
1333 continue;
1336 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1337 break;
1339 if (chip_ready(map, adr)) {
1340 xip_enable(map, chip, adr);
1341 goto op_done;
1344 /* Latency issues. Drop the lock, wait a while and retry */
1345 UDELAY(map, chip, adr, 1);
1348 /* reset on all failures. */
1349 map_write( map, CMD(0xF0), chip->start );
1350 xip_enable(map, chip, adr);
1351 /* FIXME - should have reset delay before continuing */
1353 printk(KERN_WARNING "MTD %s(): software timeout\n",
1354 __func__ );
1356 ret = -EIO;
1357 op_done:
1358 chip->state = FL_READY;
1359 put_chip(map, chip, adr);
1360 spin_unlock(chip->mutex);
1362 return ret;
1366 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1367 size_t *retlen, const u_char *buf)
1369 struct map_info *map = mtd->priv;
1370 struct cfi_private *cfi = map->fldrv_priv;
1371 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1372 int ret = 0;
1373 int chipnum;
1374 unsigned long ofs;
1376 *retlen = 0;
1377 if (!len)
1378 return 0;
1380 chipnum = to >> cfi->chipshift;
1381 ofs = to - (chipnum << cfi->chipshift);
1383 /* If it's not bus-aligned, do the first word write */
1384 if (ofs & (map_bankwidth(map)-1)) {
1385 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1386 if (local_len > len)
1387 local_len = len;
1388 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1389 local_len, retlen, buf);
1390 if (ret)
1391 return ret;
1392 ofs += local_len;
1393 buf += local_len;
1394 len -= local_len;
1396 if (ofs >> cfi->chipshift) {
1397 chipnum ++;
1398 ofs = 0;
1399 if (chipnum == cfi->numchips)
1400 return 0;
1404 /* Write buffer is worth it only if more than one word to write... */
1405 while (len >= map_bankwidth(map) * 2) {
1406 /* We must not cross write block boundaries */
1407 int size = wbufsize - (ofs & (wbufsize-1));
1409 if (size > len)
1410 size = len;
1411 if (size % map_bankwidth(map))
1412 size -= size % map_bankwidth(map);
1414 ret = do_write_buffer(map, &cfi->chips[chipnum],
1415 ofs, buf, size);
1416 if (ret)
1417 return ret;
1419 ofs += size;
1420 buf += size;
1421 (*retlen) += size;
1422 len -= size;
1424 if (ofs >> cfi->chipshift) {
1425 chipnum ++;
1426 ofs = 0;
1427 if (chipnum == cfi->numchips)
1428 return 0;
1432 if (len) {
1433 size_t retlen_dregs = 0;
1435 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1436 len, &retlen_dregs, buf);
1438 *retlen += retlen_dregs;
1439 return ret;
1442 return 0;
1447 * Handle devices with one erase region, that only implement
1448 * the chip erase command.
1450 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1452 struct cfi_private *cfi = map->fldrv_priv;
1453 unsigned long timeo = jiffies + HZ;
1454 unsigned long int adr;
1455 DECLARE_WAITQUEUE(wait, current);
1456 int ret = 0;
1458 adr = cfi->addr_unlock1;
1460 spin_lock(chip->mutex);
1461 ret = get_chip(map, chip, adr, FL_WRITING);
1462 if (ret) {
1463 spin_unlock(chip->mutex);
1464 return ret;
1467 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1468 __func__, chip->start );
1470 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1471 ENABLE_VPP(map);
1472 xip_disable(map, chip, adr);
1474 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1475 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1476 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1477 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1478 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1479 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1481 chip->state = FL_ERASING;
1482 chip->erase_suspended = 0;
1483 chip->in_progress_block_addr = adr;
1485 INVALIDATE_CACHE_UDELAY(map, chip,
1486 adr, map->size,
1487 chip->erase_time*500);
1489 timeo = jiffies + (HZ*20);
1491 for (;;) {
1492 if (chip->state != FL_ERASING) {
1493 /* Someone's suspended the erase. Sleep */
1494 set_current_state(TASK_UNINTERRUPTIBLE);
1495 add_wait_queue(&chip->wq, &wait);
1496 spin_unlock(chip->mutex);
1497 schedule();
1498 remove_wait_queue(&chip->wq, &wait);
1499 spin_lock(chip->mutex);
1500 continue;
1502 if (chip->erase_suspended) {
1503 /* This erase was suspended and resumed.
1504 Adjust the timeout */
1505 timeo = jiffies + (HZ*20); /* FIXME */
1506 chip->erase_suspended = 0;
1509 if (chip_ready(map, adr))
1510 break;
1512 if (time_after(jiffies, timeo)) {
1513 printk(KERN_WARNING "MTD %s(): software timeout\n",
1514 __func__ );
1515 break;
1518 /* Latency issues. Drop the lock, wait a while and retry */
1519 UDELAY(map, chip, adr, 1000000/HZ);
1521 /* Did we succeed? */
1522 if (!chip_good(map, adr, map_word_ff(map))) {
1523 /* reset on all failures. */
1524 map_write( map, CMD(0xF0), chip->start );
1525 /* FIXME - should have reset delay before continuing */
1527 ret = -EIO;
1530 chip->state = FL_READY;
1531 xip_enable(map, chip, adr);
1532 put_chip(map, chip, adr);
1533 spin_unlock(chip->mutex);
1535 return ret;
1539 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1541 struct cfi_private *cfi = map->fldrv_priv;
1542 unsigned long timeo = jiffies + HZ;
1543 DECLARE_WAITQUEUE(wait, current);
1544 int ret = 0;
1546 adr += chip->start;
1548 spin_lock(chip->mutex);
1549 ret = get_chip(map, chip, adr, FL_ERASING);
1550 if (ret) {
1551 spin_unlock(chip->mutex);
1552 return ret;
1555 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1556 __func__, adr );
1558 XIP_INVAL_CACHED_RANGE(map, adr, len);
1559 ENABLE_VPP(map);
1560 xip_disable(map, chip, adr);
1562 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1563 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1564 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1565 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1566 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1567 map_write(map, CMD(0x30), adr);
1569 chip->state = FL_ERASING;
1570 chip->erase_suspended = 0;
1571 chip->in_progress_block_addr = adr;
1573 INVALIDATE_CACHE_UDELAY(map, chip,
1574 adr, len,
1575 chip->erase_time*500);
1577 timeo = jiffies + (HZ*20);
1579 for (;;) {
1580 if (chip->state != FL_ERASING) {
1581 /* Someone's suspended the erase. Sleep */
1582 set_current_state(TASK_UNINTERRUPTIBLE);
1583 add_wait_queue(&chip->wq, &wait);
1584 spin_unlock(chip->mutex);
1585 schedule();
1586 remove_wait_queue(&chip->wq, &wait);
1587 spin_lock(chip->mutex);
1588 continue;
1590 if (chip->erase_suspended) {
1591 /* This erase was suspended and resumed.
1592 Adjust the timeout */
1593 timeo = jiffies + (HZ*20); /* FIXME */
1594 chip->erase_suspended = 0;
1597 if (chip_ready(map, adr)) {
1598 xip_enable(map, chip, adr);
1599 break;
1602 if (time_after(jiffies, timeo)) {
1603 xip_enable(map, chip, adr);
1604 printk(KERN_WARNING "MTD %s(): software timeout\n",
1605 __func__ );
1606 break;
1609 /* Latency issues. Drop the lock, wait a while and retry */
1610 UDELAY(map, chip, adr, 1000000/HZ);
1612 /* Did we succeed? */
1613 if (!chip_good(map, adr, map_word_ff(map))) {
1614 /* reset on all failures. */
1615 map_write( map, CMD(0xF0), chip->start );
1616 /* FIXME - should have reset delay before continuing */
1618 ret = -EIO;
1621 chip->state = FL_READY;
1622 put_chip(map, chip, adr);
1623 spin_unlock(chip->mutex);
1624 return ret;
1628 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1630 unsigned long ofs, len;
1631 int ret;
1633 ofs = instr->addr;
1634 len = instr->len;
1636 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1637 if (ret)
1638 return ret;
1640 instr->state = MTD_ERASE_DONE;
1641 mtd_erase_callback(instr);
1643 return 0;
1647 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1649 struct map_info *map = mtd->priv;
1650 struct cfi_private *cfi = map->fldrv_priv;
1651 int ret = 0;
1653 if (instr->addr != 0)
1654 return -EINVAL;
1656 if (instr->len != mtd->size)
1657 return -EINVAL;
1659 ret = do_erase_chip(map, &cfi->chips[0]);
1660 if (ret)
1661 return ret;
1663 instr->state = MTD_ERASE_DONE;
1664 mtd_erase_callback(instr);
1666 return 0;
1669 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1670 unsigned long adr, int len, void *thunk)
1672 struct cfi_private *cfi = map->fldrv_priv;
1673 int ret;
1675 spin_lock(chip->mutex);
1676 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1677 if (ret)
1678 goto out_unlock;
1679 chip->state = FL_LOCKING;
1681 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1682 __func__, adr, len);
1684 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1685 cfi->device_type, NULL);
1686 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1687 cfi->device_type, NULL);
1688 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1689 cfi->device_type, NULL);
1690 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1691 cfi->device_type, NULL);
1692 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1693 cfi->device_type, NULL);
1694 map_write(map, CMD(0x40), chip->start + adr);
1696 chip->state = FL_READY;
1697 put_chip(map, chip, adr + chip->start);
1698 ret = 0;
1700 out_unlock:
1701 spin_unlock(chip->mutex);
1702 return ret;
1705 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1706 unsigned long adr, int len, void *thunk)
1708 struct cfi_private *cfi = map->fldrv_priv;
1709 int ret;
1711 spin_lock(chip->mutex);
1712 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1713 if (ret)
1714 goto out_unlock;
1715 chip->state = FL_UNLOCKING;
1717 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1718 __func__, adr, len);
1720 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1721 cfi->device_type, NULL);
1722 map_write(map, CMD(0x70), adr);
1724 chip->state = FL_READY;
1725 put_chip(map, chip, adr + chip->start);
1726 ret = 0;
1728 out_unlock:
1729 spin_unlock(chip->mutex);
1730 return ret;
1733 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1735 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1738 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1740 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1744 static void cfi_amdstd_sync (struct mtd_info *mtd)
1746 struct map_info *map = mtd->priv;
1747 struct cfi_private *cfi = map->fldrv_priv;
1748 int i;
1749 struct flchip *chip;
1750 int ret = 0;
1751 DECLARE_WAITQUEUE(wait, current);
1753 for (i=0; !ret && i<cfi->numchips; i++) {
1754 chip = &cfi->chips[i];
1756 retry:
1757 spin_lock(chip->mutex);
1759 switch(chip->state) {
1760 case FL_READY:
1761 case FL_STATUS:
1762 case FL_CFI_QUERY:
1763 case FL_JEDEC_QUERY:
1764 chip->oldstate = chip->state;
1765 chip->state = FL_SYNCING;
1766 /* No need to wake_up() on this state change -
1767 * as the whole point is that nobody can do anything
1768 * with the chip now anyway.
1770 case FL_SYNCING:
1771 spin_unlock(chip->mutex);
1772 break;
1774 default:
1775 /* Not an idle state */
1776 set_current_state(TASK_UNINTERRUPTIBLE);
1777 add_wait_queue(&chip->wq, &wait);
1779 spin_unlock(chip->mutex);
1781 schedule();
1783 remove_wait_queue(&chip->wq, &wait);
1785 goto retry;
1789 /* Unlock the chips again */
1791 for (i--; i >=0; i--) {
1792 chip = &cfi->chips[i];
1794 spin_lock(chip->mutex);
1796 if (chip->state == FL_SYNCING) {
1797 chip->state = chip->oldstate;
1798 wake_up(&chip->wq);
1800 spin_unlock(chip->mutex);
1805 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1807 struct map_info *map = mtd->priv;
1808 struct cfi_private *cfi = map->fldrv_priv;
1809 int i;
1810 struct flchip *chip;
1811 int ret = 0;
1813 for (i=0; !ret && i<cfi->numchips; i++) {
1814 chip = &cfi->chips[i];
1816 spin_lock(chip->mutex);
1818 switch(chip->state) {
1819 case FL_READY:
1820 case FL_STATUS:
1821 case FL_CFI_QUERY:
1822 case FL_JEDEC_QUERY:
1823 chip->oldstate = chip->state;
1824 chip->state = FL_PM_SUSPENDED;
1825 /* No need to wake_up() on this state change -
1826 * as the whole point is that nobody can do anything
1827 * with the chip now anyway.
1829 case FL_PM_SUSPENDED:
1830 break;
1832 default:
1833 ret = -EAGAIN;
1834 break;
1836 spin_unlock(chip->mutex);
1839 /* Unlock the chips again */
1841 if (ret) {
1842 for (i--; i >=0; i--) {
1843 chip = &cfi->chips[i];
1845 spin_lock(chip->mutex);
1847 if (chip->state == FL_PM_SUSPENDED) {
1848 chip->state = chip->oldstate;
1849 wake_up(&chip->wq);
1851 spin_unlock(chip->mutex);
1855 return ret;
1859 static void cfi_amdstd_resume(struct mtd_info *mtd)
1861 struct map_info *map = mtd->priv;
1862 struct cfi_private *cfi = map->fldrv_priv;
1863 int i;
1864 struct flchip *chip;
1866 for (i=0; i<cfi->numchips; i++) {
1868 chip = &cfi->chips[i];
1870 spin_lock(chip->mutex);
1872 if (chip->state == FL_PM_SUSPENDED) {
1873 chip->state = FL_READY;
1874 map_write(map, CMD(0xF0), chip->start);
1875 wake_up(&chip->wq);
1877 else
1878 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1880 spin_unlock(chip->mutex);
1884 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1886 struct map_info *map = mtd->priv;
1887 struct cfi_private *cfi = map->fldrv_priv;
1889 kfree(cfi->cmdset_priv);
1890 kfree(cfi->cfiq);
1891 kfree(cfi);
1892 kfree(mtd->eraseregions);
1895 MODULE_LICENSE("GPL");
1896 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1897 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");