Merge tag 'omap-for-v3.13/fixes-for-merge-window-take2' of git://git.kernel.org/pub...
[linux-2.6.git] / drivers / mtd / chips / cfi_cmdset_0002.c
blob89b9d689153298f3b7a3965adf811a3d9963de7c
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
20 * This code is GPL
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/of.h>
37 #include <linux/of_platform.h>
38 #include <linux/mtd/map.h>
39 #include <linux/mtd/mtd.h>
40 #include <linux/mtd/cfi.h>
41 #include <linux/mtd/xip.h>
43 #define AMD_BOOTLOC_BUG
44 #define FORCE_WORD_WRITE 0
46 #define MAX_WORD_RETRIES 3
48 #define SST49LF004B 0x0060
49 #define SST49LF040B 0x0050
50 #define SST49LF008A 0x005a
51 #define AT49BV6416 0x00d6
53 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
57 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
58 static void cfi_amdstd_sync (struct mtd_info *);
59 static int cfi_amdstd_suspend (struct mtd_info *);
60 static void cfi_amdstd_resume (struct mtd_info *);
61 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
62 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
65 size_t *retlen, const u_char *buf);
67 static void cfi_amdstd_destroy(struct mtd_info *);
69 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
70 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
72 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
73 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
74 #include "fwh_lock.h"
76 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
77 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
79 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
80 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
81 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
83 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
84 .probe = NULL, /* Not usable directly */
85 .destroy = cfi_amdstd_destroy,
86 .name = "cfi_cmdset_0002",
87 .module = THIS_MODULE
91 /* #define DEBUG_CFI_FEATURES */
94 #ifdef DEBUG_CFI_FEATURES
95 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
97 const char* erase_suspend[3] = {
98 "Not supported", "Read only", "Read/write"
100 const char* top_bottom[6] = {
101 "No WP", "8x8KiB sectors at top & bottom, no WP",
102 "Bottom boot", "Top boot",
103 "Uniform, Bottom WP", "Uniform, Top WP"
106 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
107 printk(" Address sensitive unlock: %s\n",
108 (extp->SiliconRevision & 1) ? "Not required" : "Required");
110 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
111 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
112 else
113 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
115 if (extp->BlkProt == 0)
116 printk(" Block protection: Not supported\n");
117 else
118 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
121 printk(" Temporary block unprotect: %s\n",
122 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
123 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
124 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
125 printk(" Burst mode: %s\n",
126 extp->BurstMode ? "Supported" : "Not supported");
127 if (extp->PageMode == 0)
128 printk(" Page mode: Not supported\n");
129 else
130 printk(" Page mode: %d word page\n", extp->PageMode << 2);
132 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
133 extp->VppMin >> 4, extp->VppMin & 0xf);
134 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
135 extp->VppMax >> 4, extp->VppMax & 0xf);
137 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
138 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
139 else
140 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
142 #endif
144 #ifdef AMD_BOOTLOC_BUG
145 /* Wheee. Bring me the head of someone at AMD. */
146 static void fixup_amd_bootblock(struct mtd_info *mtd)
148 struct map_info *map = mtd->priv;
149 struct cfi_private *cfi = map->fldrv_priv;
150 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
151 __u8 major = extp->MajorVersion;
152 __u8 minor = extp->MinorVersion;
154 if (((major << 8) | minor) < 0x3131) {
155 /* CFI version 1.0 => don't trust bootloc */
157 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
158 map->name, cfi->mfr, cfi->id);
160 /* AFAICS all 29LV400 with a bottom boot block have a device ID
161 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
162 * These were badly detected as they have the 0x80 bit set
163 * so treat them as a special case.
165 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
167 /* Macronix added CFI to their 2nd generation
168 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
169 * Fujitsu, Spansion, EON, ESI and older Macronix)
170 * has CFI.
172 * Therefore also check the manufacturer.
173 * This reduces the risk of false detection due to
174 * the 8-bit device ID.
176 (cfi->mfr == CFI_MFR_MACRONIX)) {
177 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
178 " detected\n", map->name);
179 extp->TopBottom = 2; /* bottom boot */
180 } else
181 if (cfi->id & 0x80) {
182 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
183 extp->TopBottom = 3; /* top boot */
184 } else {
185 extp->TopBottom = 2; /* bottom boot */
188 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
189 " deduced %s from Device ID\n", map->name, major, minor,
190 extp->TopBottom == 2 ? "bottom" : "top");
193 #endif
195 static void fixup_use_write_buffers(struct mtd_info *mtd)
197 struct map_info *map = mtd->priv;
198 struct cfi_private *cfi = map->fldrv_priv;
199 if (cfi->cfiq->BufWriteTimeoutTyp) {
200 pr_debug("Using buffer write method\n" );
201 mtd->_write = cfi_amdstd_write_buffers;
205 /* Atmel chips don't use the same PRI format as AMD chips */
206 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
208 struct map_info *map = mtd->priv;
209 struct cfi_private *cfi = map->fldrv_priv;
210 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
211 struct cfi_pri_atmel atmel_pri;
213 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
214 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
216 if (atmel_pri.Features & 0x02)
217 extp->EraseSuspend = 2;
219 /* Some chips got it backwards... */
220 if (cfi->id == AT49BV6416) {
221 if (atmel_pri.BottomBoot)
222 extp->TopBottom = 3;
223 else
224 extp->TopBottom = 2;
225 } else {
226 if (atmel_pri.BottomBoot)
227 extp->TopBottom = 2;
228 else
229 extp->TopBottom = 3;
232 /* burst write mode not supported */
233 cfi->cfiq->BufWriteTimeoutTyp = 0;
234 cfi->cfiq->BufWriteTimeoutMax = 0;
237 static void fixup_use_secsi(struct mtd_info *mtd)
239 /* Setup for chips with a secsi area */
240 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
241 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
244 static void fixup_use_erase_chip(struct mtd_info *mtd)
246 struct map_info *map = mtd->priv;
247 struct cfi_private *cfi = map->fldrv_priv;
248 if ((cfi->cfiq->NumEraseRegions == 1) &&
249 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
250 mtd->_erase = cfi_amdstd_erase_chip;
256 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
257 * locked by default.
259 static void fixup_use_atmel_lock(struct mtd_info *mtd)
261 mtd->_lock = cfi_atmel_lock;
262 mtd->_unlock = cfi_atmel_unlock;
263 mtd->flags |= MTD_POWERUP_LOCK;
266 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
268 struct map_info *map = mtd->priv;
269 struct cfi_private *cfi = map->fldrv_priv;
272 * These flashes report two separate eraseblock regions based on the
273 * sector_erase-size and block_erase-size, although they both operate on the
274 * same memory. This is not allowed according to CFI, so we just pick the
275 * sector_erase-size.
277 cfi->cfiq->NumEraseRegions = 1;
280 static void fixup_sst39vf(struct mtd_info *mtd)
282 struct map_info *map = mtd->priv;
283 struct cfi_private *cfi = map->fldrv_priv;
285 fixup_old_sst_eraseregion(mtd);
287 cfi->addr_unlock1 = 0x5555;
288 cfi->addr_unlock2 = 0x2AAA;
291 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
293 struct map_info *map = mtd->priv;
294 struct cfi_private *cfi = map->fldrv_priv;
296 fixup_old_sst_eraseregion(mtd);
298 cfi->addr_unlock1 = 0x555;
299 cfi->addr_unlock2 = 0x2AA;
301 cfi->sector_erase_cmd = CMD(0x50);
304 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
306 struct map_info *map = mtd->priv;
307 struct cfi_private *cfi = map->fldrv_priv;
309 fixup_sst39vf_rev_b(mtd);
312 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
313 * it should report a size of 8KBytes (0x0020*256).
315 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
316 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
319 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
321 struct map_info *map = mtd->priv;
322 struct cfi_private *cfi = map->fldrv_priv;
324 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
325 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
326 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
330 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
332 struct map_info *map = mtd->priv;
333 struct cfi_private *cfi = map->fldrv_priv;
335 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
336 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
337 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
341 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
343 struct map_info *map = mtd->priv;
344 struct cfi_private *cfi = map->fldrv_priv;
347 * S29NS512P flash uses more than 8bits to report number of sectors,
348 * which is not permitted by CFI.
350 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
351 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
354 /* Used to fix CFI-Tables of chips without Extended Query Tables */
355 static struct cfi_fixup cfi_nopri_fixup_table[] = {
356 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
357 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
358 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
359 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
360 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
361 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
362 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
363 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
364 { 0, 0, NULL }
367 static struct cfi_fixup cfi_fixup_table[] = {
368 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
369 #ifdef AMD_BOOTLOC_BUG
370 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
371 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
372 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
373 #endif
374 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
375 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
376 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
377 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
378 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
379 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
380 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
381 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
382 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
383 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
384 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
385 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
386 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
387 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
388 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
389 #if !FORCE_WORD_WRITE
390 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
391 #endif
392 { 0, 0, NULL }
394 static struct cfi_fixup jedec_fixup_table[] = {
395 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
396 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
397 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
398 { 0, 0, NULL }
401 static struct cfi_fixup fixup_table[] = {
402 /* The CFI vendor ids and the JEDEC vendor IDs appear
403 * to be common. It is like the devices id's are as
404 * well. This table is to pick all cases where
405 * we know that is the case.
407 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
408 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
409 { 0, 0, NULL }
413 static void cfi_fixup_major_minor(struct cfi_private *cfi,
414 struct cfi_pri_amdstd *extp)
416 if (cfi->mfr == CFI_MFR_SAMSUNG) {
417 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
418 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
420 * Samsung K8P2815UQB and K8D6x16UxM chips
421 * report major=0 / minor=0.
422 * K8D3x16UxC chips report major=3 / minor=3.
424 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
425 " Extended Query version to 1.%c\n",
426 extp->MinorVersion);
427 extp->MajorVersion = '1';
432 * SST 38VF640x chips report major=0xFF / minor=0xFF.
434 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
435 extp->MajorVersion = '1';
436 extp->MinorVersion = '0';
440 static int is_m29ew(struct cfi_private *cfi)
442 if (cfi->mfr == CFI_MFR_INTEL &&
443 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
444 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
445 return 1;
446 return 0;
450 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
451 * Some revisions of the M29EW suffer from erase suspend hang ups. In
452 * particular, it can occur when the sequence
453 * Erase Confirm -> Suspend -> Program -> Resume
454 * causes a lockup due to internal timing issues. The consequence is that the
455 * erase cannot be resumed without inserting a dummy command after programming
456 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
457 * that writes an F0 command code before the RESUME command.
459 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
460 unsigned long adr)
462 struct cfi_private *cfi = map->fldrv_priv;
463 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
464 if (is_m29ew(cfi))
465 map_write(map, CMD(0xF0), adr);
469 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
471 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
472 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
473 * command is issued after an ERASE RESUME operation without waiting for a
474 * minimum delay. The result is that once the ERASE seems to be completed
475 * (no bits are toggling), the contents of the Flash memory block on which
476 * the erase was ongoing could be inconsistent with the expected values
477 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
478 * values), causing a consequent failure of the ERASE operation.
479 * The occurrence of this issue could be high, especially when file system
480 * operations on the Flash are intensive. As a result, it is recommended
481 * that a patch be applied. Intensive file system operations can cause many
482 * calls to the garbage routine to free Flash space (also by erasing physical
483 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
484 * commands can occur. The problem disappears when a delay is inserted after
485 * the RESUME command by using the udelay() function available in Linux.
486 * The DELAY value must be tuned based on the customer's platform.
487 * The maximum value that fixes the problem in all cases is 500us.
488 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
489 * in most cases.
490 * We have chosen 500µs because this latency is acceptable.
492 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
495 * Resolving the Delay After Resume Issue see Micron TN-13-07
496 * Worst case delay must be 500µs but 30-50µs should be ok as well
498 if (is_m29ew(cfi))
499 cfi_udelay(500);
502 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
504 struct cfi_private *cfi = map->fldrv_priv;
505 struct device_node __maybe_unused *np = map->device_node;
506 struct mtd_info *mtd;
507 int i;
509 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
510 if (!mtd) {
511 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
512 return NULL;
514 mtd->priv = map;
515 mtd->type = MTD_NORFLASH;
517 /* Fill in the default mtd operations */
518 mtd->_erase = cfi_amdstd_erase_varsize;
519 mtd->_write = cfi_amdstd_write_words;
520 mtd->_read = cfi_amdstd_read;
521 mtd->_sync = cfi_amdstd_sync;
522 mtd->_suspend = cfi_amdstd_suspend;
523 mtd->_resume = cfi_amdstd_resume;
524 mtd->flags = MTD_CAP_NORFLASH;
525 mtd->name = map->name;
526 mtd->writesize = 1;
527 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
529 pr_debug("MTD %s(): write buffer size %d\n", __func__,
530 mtd->writebufsize);
532 mtd->_panic_write = cfi_amdstd_panic_write;
533 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
535 if (cfi->cfi_mode==CFI_MODE_CFI){
536 unsigned char bootloc;
537 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
538 struct cfi_pri_amdstd *extp;
540 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
541 if (extp) {
543 * It's a real CFI chip, not one for which the probe
544 * routine faked a CFI structure.
546 cfi_fixup_major_minor(cfi, extp);
549 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
550 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
551 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
552 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
553 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
555 if (extp->MajorVersion != '1' ||
556 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
557 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
558 "version %c.%c (%#02x/%#02x).\n",
559 extp->MajorVersion, extp->MinorVersion,
560 extp->MajorVersion, extp->MinorVersion);
561 kfree(extp);
562 kfree(mtd);
563 return NULL;
566 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
567 extp->MajorVersion, extp->MinorVersion);
569 /* Install our own private info structure */
570 cfi->cmdset_priv = extp;
572 /* Apply cfi device specific fixups */
573 cfi_fixup(mtd, cfi_fixup_table);
575 #ifdef DEBUG_CFI_FEATURES
576 /* Tell the user about it in lots of lovely detail */
577 cfi_tell_features(extp);
578 #endif
580 #ifdef CONFIG_OF
581 if (np && of_property_read_bool(
582 np, "use-advanced-sector-protection")
583 && extp->BlkProtUnprot == 8) {
584 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
585 mtd->_lock = cfi_ppb_lock;
586 mtd->_unlock = cfi_ppb_unlock;
587 mtd->_is_locked = cfi_ppb_is_locked;
589 #endif
591 bootloc = extp->TopBottom;
592 if ((bootloc < 2) || (bootloc > 5)) {
593 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
594 "bank location (%d). Assuming bottom.\n",
595 map->name, bootloc);
596 bootloc = 2;
599 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
600 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
602 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
603 int j = (cfi->cfiq->NumEraseRegions-1)-i;
604 __u32 swap;
606 swap = cfi->cfiq->EraseRegionInfo[i];
607 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
608 cfi->cfiq->EraseRegionInfo[j] = swap;
611 /* Set the default CFI lock/unlock addresses */
612 cfi->addr_unlock1 = 0x555;
613 cfi->addr_unlock2 = 0x2aa;
615 cfi_fixup(mtd, cfi_nopri_fixup_table);
617 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
618 kfree(mtd);
619 return NULL;
622 } /* CFI mode */
623 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
624 /* Apply jedec specific fixups */
625 cfi_fixup(mtd, jedec_fixup_table);
627 /* Apply generic fixups */
628 cfi_fixup(mtd, fixup_table);
630 for (i=0; i< cfi->numchips; i++) {
631 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
632 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
633 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
634 cfi->chips[i].ref_point_counter = 0;
635 init_waitqueue_head(&(cfi->chips[i].wq));
638 map->fldrv = &cfi_amdstd_chipdrv;
640 return cfi_amdstd_setup(mtd);
642 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
643 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
644 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
645 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
646 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
648 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
650 struct map_info *map = mtd->priv;
651 struct cfi_private *cfi = map->fldrv_priv;
652 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
653 unsigned long offset = 0;
654 int i,j;
656 printk(KERN_NOTICE "number of %s chips: %d\n",
657 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
658 /* Select the correct geometry setup */
659 mtd->size = devsize * cfi->numchips;
661 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
662 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
663 * mtd->numeraseregions, GFP_KERNEL);
664 if (!mtd->eraseregions) {
665 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
666 goto setup_err;
669 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
670 unsigned long ernum, ersize;
671 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
672 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
674 if (mtd->erasesize < ersize) {
675 mtd->erasesize = ersize;
677 for (j=0; j<cfi->numchips; j++) {
678 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
679 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
680 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
682 offset += (ersize * ernum);
684 if (offset != devsize) {
685 /* Argh */
686 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
687 goto setup_err;
690 __module_get(THIS_MODULE);
691 register_reboot_notifier(&mtd->reboot_notifier);
692 return mtd;
694 setup_err:
695 kfree(mtd->eraseregions);
696 kfree(mtd);
697 kfree(cfi->cmdset_priv);
698 kfree(cfi->cfiq);
699 return NULL;
703 * Return true if the chip is ready.
705 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
706 * non-suspended sector) and is indicated by no toggle bits toggling.
708 * Note that anything more complicated than checking if no bits are toggling
709 * (including checking DQ5 for an error status) is tricky to get working
710 * correctly and is therefore not done (particularly with interleaved chips
711 * as each chip must be checked independently of the others).
713 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
715 map_word d, t;
717 d = map_read(map, addr);
718 t = map_read(map, addr);
720 return map_word_equal(map, d, t);
724 * Return true if the chip is ready and has the correct value.
726 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
727 * non-suspended sector) and it is indicated by no bits toggling.
729 * Error are indicated by toggling bits or bits held with the wrong value,
730 * or with bits toggling.
732 * Note that anything more complicated than checking if no bits are toggling
733 * (including checking DQ5 for an error status) is tricky to get working
734 * correctly and is therefore not done (particularly with interleaved chips
735 * as each chip must be checked independently of the others).
738 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
740 map_word oldd, curd;
742 oldd = map_read(map, addr);
743 curd = map_read(map, addr);
745 return map_word_equal(map, oldd, curd) &&
746 map_word_equal(map, curd, expected);
749 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
751 DECLARE_WAITQUEUE(wait, current);
752 struct cfi_private *cfi = map->fldrv_priv;
753 unsigned long timeo;
754 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
756 resettime:
757 timeo = jiffies + HZ;
758 retry:
759 switch (chip->state) {
761 case FL_STATUS:
762 for (;;) {
763 if (chip_ready(map, adr))
764 break;
766 if (time_after(jiffies, timeo)) {
767 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
768 return -EIO;
770 mutex_unlock(&chip->mutex);
771 cfi_udelay(1);
772 mutex_lock(&chip->mutex);
773 /* Someone else might have been playing with it. */
774 goto retry;
777 case FL_READY:
778 case FL_CFI_QUERY:
779 case FL_JEDEC_QUERY:
780 return 0;
782 case FL_ERASING:
783 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
784 !(mode == FL_READY || mode == FL_POINT ||
785 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
786 goto sleep;
788 /* We could check to see if we're trying to access the sector
789 * that is currently being erased. However, no user will try
790 * anything like that so we just wait for the timeout. */
792 /* Erase suspend */
793 /* It's harmless to issue the Erase-Suspend and Erase-Resume
794 * commands when the erase algorithm isn't in progress. */
795 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
796 chip->oldstate = FL_ERASING;
797 chip->state = FL_ERASE_SUSPENDING;
798 chip->erase_suspended = 1;
799 for (;;) {
800 if (chip_ready(map, adr))
801 break;
803 if (time_after(jiffies, timeo)) {
804 /* Should have suspended the erase by now.
805 * Send an Erase-Resume command as either
806 * there was an error (so leave the erase
807 * routine to recover from it) or we trying to
808 * use the erase-in-progress sector. */
809 put_chip(map, chip, adr);
810 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
811 return -EIO;
814 mutex_unlock(&chip->mutex);
815 cfi_udelay(1);
816 mutex_lock(&chip->mutex);
817 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
818 So we can just loop here. */
820 chip->state = FL_READY;
821 return 0;
823 case FL_XIP_WHILE_ERASING:
824 if (mode != FL_READY && mode != FL_POINT &&
825 (!cfip || !(cfip->EraseSuspend&2)))
826 goto sleep;
827 chip->oldstate = chip->state;
828 chip->state = FL_READY;
829 return 0;
831 case FL_SHUTDOWN:
832 /* The machine is rebooting */
833 return -EIO;
835 case FL_POINT:
836 /* Only if there's no operation suspended... */
837 if (mode == FL_READY && chip->oldstate == FL_READY)
838 return 0;
840 default:
841 sleep:
842 set_current_state(TASK_UNINTERRUPTIBLE);
843 add_wait_queue(&chip->wq, &wait);
844 mutex_unlock(&chip->mutex);
845 schedule();
846 remove_wait_queue(&chip->wq, &wait);
847 mutex_lock(&chip->mutex);
848 goto resettime;
853 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
855 struct cfi_private *cfi = map->fldrv_priv;
857 switch(chip->oldstate) {
858 case FL_ERASING:
859 cfi_fixup_m29ew_erase_suspend(map,
860 chip->in_progress_block_addr);
861 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
862 cfi_fixup_m29ew_delay_after_resume(cfi);
863 chip->oldstate = FL_READY;
864 chip->state = FL_ERASING;
865 break;
867 case FL_XIP_WHILE_ERASING:
868 chip->state = chip->oldstate;
869 chip->oldstate = FL_READY;
870 break;
872 case FL_READY:
873 case FL_STATUS:
874 break;
875 default:
876 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
878 wake_up(&chip->wq);
881 #ifdef CONFIG_MTD_XIP
884 * No interrupt what so ever can be serviced while the flash isn't in array
885 * mode. This is ensured by the xip_disable() and xip_enable() functions
886 * enclosing any code path where the flash is known not to be in array mode.
887 * And within a XIP disabled code path, only functions marked with __xipram
888 * may be called and nothing else (it's a good thing to inspect generated
889 * assembly to make sure inline functions were actually inlined and that gcc
890 * didn't emit calls to its own support functions). Also configuring MTD CFI
891 * support to a single buswidth and a single interleave is also recommended.
894 static void xip_disable(struct map_info *map, struct flchip *chip,
895 unsigned long adr)
897 /* TODO: chips with no XIP use should ignore and return */
898 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
899 local_irq_disable();
902 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
903 unsigned long adr)
905 struct cfi_private *cfi = map->fldrv_priv;
907 if (chip->state != FL_POINT && chip->state != FL_READY) {
908 map_write(map, CMD(0xf0), adr);
909 chip->state = FL_READY;
911 (void) map_read(map, adr);
912 xip_iprefetch();
913 local_irq_enable();
917 * When a delay is required for the flash operation to complete, the
918 * xip_udelay() function is polling for both the given timeout and pending
919 * (but still masked) hardware interrupts. Whenever there is an interrupt
920 * pending then the flash erase operation is suspended, array mode restored
921 * and interrupts unmasked. Task scheduling might also happen at that
922 * point. The CPU eventually returns from the interrupt or the call to
923 * schedule() and the suspended flash operation is resumed for the remaining
924 * of the delay period.
926 * Warning: this function _will_ fool interrupt latency tracing tools.
929 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
930 unsigned long adr, int usec)
932 struct cfi_private *cfi = map->fldrv_priv;
933 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
934 map_word status, OK = CMD(0x80);
935 unsigned long suspended, start = xip_currtime();
936 flstate_t oldstate;
938 do {
939 cpu_relax();
940 if (xip_irqpending() && extp &&
941 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
942 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
944 * Let's suspend the erase operation when supported.
945 * Note that we currently don't try to suspend
946 * interleaved chips if there is already another
947 * operation suspended (imagine what happens
948 * when one chip was already done with the current
949 * operation while another chip suspended it, then
950 * we resume the whole thing at once). Yes, it
951 * can happen!
953 map_write(map, CMD(0xb0), adr);
954 usec -= xip_elapsed_since(start);
955 suspended = xip_currtime();
956 do {
957 if (xip_elapsed_since(suspended) > 100000) {
959 * The chip doesn't want to suspend
960 * after waiting for 100 msecs.
961 * This is a critical error but there
962 * is not much we can do here.
964 return;
966 status = map_read(map, adr);
967 } while (!map_word_andequal(map, status, OK, OK));
969 /* Suspend succeeded */
970 oldstate = chip->state;
971 if (!map_word_bitsset(map, status, CMD(0x40)))
972 break;
973 chip->state = FL_XIP_WHILE_ERASING;
974 chip->erase_suspended = 1;
975 map_write(map, CMD(0xf0), adr);
976 (void) map_read(map, adr);
977 xip_iprefetch();
978 local_irq_enable();
979 mutex_unlock(&chip->mutex);
980 xip_iprefetch();
981 cond_resched();
984 * We're back. However someone else might have
985 * decided to go write to the chip if we are in
986 * a suspended erase state. If so let's wait
987 * until it's done.
989 mutex_lock(&chip->mutex);
990 while (chip->state != FL_XIP_WHILE_ERASING) {
991 DECLARE_WAITQUEUE(wait, current);
992 set_current_state(TASK_UNINTERRUPTIBLE);
993 add_wait_queue(&chip->wq, &wait);
994 mutex_unlock(&chip->mutex);
995 schedule();
996 remove_wait_queue(&chip->wq, &wait);
997 mutex_lock(&chip->mutex);
999 /* Disallow XIP again */
1000 local_irq_disable();
1002 /* Correct Erase Suspend Hangups for M29EW */
1003 cfi_fixup_m29ew_erase_suspend(map, adr);
1004 /* Resume the write or erase operation */
1005 map_write(map, cfi->sector_erase_cmd, adr);
1006 chip->state = oldstate;
1007 start = xip_currtime();
1008 } else if (usec >= 1000000/HZ) {
1010 * Try to save on CPU power when waiting delay
1011 * is at least a system timer tick period.
1012 * No need to be extremely accurate here.
1014 xip_cpu_idle();
1016 status = map_read(map, adr);
1017 } while (!map_word_andequal(map, status, OK, OK)
1018 && xip_elapsed_since(start) < usec);
1021 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1024 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1025 * the flash is actively programming or erasing since we have to poll for
1026 * the operation to complete anyway. We can't do that in a generic way with
1027 * a XIP setup so do it before the actual flash operation in this case
1028 * and stub it out from INVALIDATE_CACHE_UDELAY.
1030 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1031 INVALIDATE_CACHED_RANGE(map, from, size)
1033 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1034 UDELAY(map, chip, adr, usec)
1037 * Extra notes:
1039 * Activating this XIP support changes the way the code works a bit. For
1040 * example the code to suspend the current process when concurrent access
1041 * happens is never executed because xip_udelay() will always return with the
1042 * same chip state as it was entered with. This is why there is no care for
1043 * the presence of add_wait_queue() or schedule() calls from within a couple
1044 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1045 * The queueing and scheduling are always happening within xip_udelay().
1047 * Similarly, get_chip() and put_chip() just happen to always be executed
1048 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1049 * is in array mode, therefore never executing many cases therein and not
1050 * causing any problem with XIP.
1053 #else
1055 #define xip_disable(map, chip, adr)
1056 #define xip_enable(map, chip, adr)
1057 #define XIP_INVAL_CACHED_RANGE(x...)
1059 #define UDELAY(map, chip, adr, usec) \
1060 do { \
1061 mutex_unlock(&chip->mutex); \
1062 cfi_udelay(usec); \
1063 mutex_lock(&chip->mutex); \
1064 } while (0)
1066 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1067 do { \
1068 mutex_unlock(&chip->mutex); \
1069 INVALIDATE_CACHED_RANGE(map, adr, len); \
1070 cfi_udelay(usec); \
1071 mutex_lock(&chip->mutex); \
1072 } while (0)
1074 #endif
1076 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1078 unsigned long cmd_addr;
1079 struct cfi_private *cfi = map->fldrv_priv;
1080 int ret;
1082 adr += chip->start;
1084 /* Ensure cmd read/writes are aligned. */
1085 cmd_addr = adr & ~(map_bankwidth(map)-1);
1087 mutex_lock(&chip->mutex);
1088 ret = get_chip(map, chip, cmd_addr, FL_READY);
1089 if (ret) {
1090 mutex_unlock(&chip->mutex);
1091 return ret;
1094 if (chip->state != FL_POINT && chip->state != FL_READY) {
1095 map_write(map, CMD(0xf0), cmd_addr);
1096 chip->state = FL_READY;
1099 map_copy_from(map, buf, adr, len);
1101 put_chip(map, chip, cmd_addr);
1103 mutex_unlock(&chip->mutex);
1104 return 0;
1108 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1110 struct map_info *map = mtd->priv;
1111 struct cfi_private *cfi = map->fldrv_priv;
1112 unsigned long ofs;
1113 int chipnum;
1114 int ret = 0;
1116 /* ofs: offset within the first chip that the first read should start */
1117 chipnum = (from >> cfi->chipshift);
1118 ofs = from - (chipnum << cfi->chipshift);
1120 while (len) {
1121 unsigned long thislen;
1123 if (chipnum >= cfi->numchips)
1124 break;
1126 if ((len + ofs -1) >> cfi->chipshift)
1127 thislen = (1<<cfi->chipshift) - ofs;
1128 else
1129 thislen = len;
1131 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1132 if (ret)
1133 break;
1135 *retlen += thislen;
1136 len -= thislen;
1137 buf += thislen;
1139 ofs = 0;
1140 chipnum++;
1142 return ret;
1146 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1148 DECLARE_WAITQUEUE(wait, current);
1149 unsigned long timeo = jiffies + HZ;
1150 struct cfi_private *cfi = map->fldrv_priv;
1152 retry:
1153 mutex_lock(&chip->mutex);
1155 if (chip->state != FL_READY){
1156 set_current_state(TASK_UNINTERRUPTIBLE);
1157 add_wait_queue(&chip->wq, &wait);
1159 mutex_unlock(&chip->mutex);
1161 schedule();
1162 remove_wait_queue(&chip->wq, &wait);
1163 timeo = jiffies + HZ;
1165 goto retry;
1168 adr += chip->start;
1170 chip->state = FL_READY;
1172 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1173 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1174 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1176 map_copy_from(map, buf, adr, len);
1178 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1179 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1180 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1181 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1183 wake_up(&chip->wq);
1184 mutex_unlock(&chip->mutex);
1186 return 0;
1189 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1191 struct map_info *map = mtd->priv;
1192 struct cfi_private *cfi = map->fldrv_priv;
1193 unsigned long ofs;
1194 int chipnum;
1195 int ret = 0;
1197 /* ofs: offset within the first chip that the first read should start */
1198 /* 8 secsi bytes per chip */
1199 chipnum=from>>3;
1200 ofs=from & 7;
1202 while (len) {
1203 unsigned long thislen;
1205 if (chipnum >= cfi->numchips)
1206 break;
1208 if ((len + ofs -1) >> 3)
1209 thislen = (1<<3) - ofs;
1210 else
1211 thislen = len;
1213 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1214 if (ret)
1215 break;
1217 *retlen += thislen;
1218 len -= thislen;
1219 buf += thislen;
1221 ofs = 0;
1222 chipnum++;
1224 return ret;
1228 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1230 struct cfi_private *cfi = map->fldrv_priv;
1231 unsigned long timeo = jiffies + HZ;
1233 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1234 * have a max write time of a few hundreds usec). However, we should
1235 * use the maximum timeout value given by the chip at probe time
1236 * instead. Unfortunately, struct flchip does have a field for
1237 * maximum timeout, only for typical which can be far too short
1238 * depending of the conditions. The ' + 1' is to avoid having a
1239 * timeout of 0 jiffies if HZ is smaller than 1000.
1241 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1242 int ret = 0;
1243 map_word oldd;
1244 int retry_cnt = 0;
1246 adr += chip->start;
1248 mutex_lock(&chip->mutex);
1249 ret = get_chip(map, chip, adr, FL_WRITING);
1250 if (ret) {
1251 mutex_unlock(&chip->mutex);
1252 return ret;
1255 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1256 __func__, adr, datum.x[0] );
1259 * Check for a NOP for the case when the datum to write is already
1260 * present - it saves time and works around buggy chips that corrupt
1261 * data at other locations when 0xff is written to a location that
1262 * already contains 0xff.
1264 oldd = map_read(map, adr);
1265 if (map_word_equal(map, oldd, datum)) {
1266 pr_debug("MTD %s(): NOP\n",
1267 __func__);
1268 goto op_done;
1271 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1272 ENABLE_VPP(map);
1273 xip_disable(map, chip, adr);
1274 retry:
1275 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1276 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1277 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1278 map_write(map, datum, adr);
1279 chip->state = FL_WRITING;
1281 INVALIDATE_CACHE_UDELAY(map, chip,
1282 adr, map_bankwidth(map),
1283 chip->word_write_time);
1285 /* See comment above for timeout value. */
1286 timeo = jiffies + uWriteTimeout;
1287 for (;;) {
1288 if (chip->state != FL_WRITING) {
1289 /* Someone's suspended the write. Sleep */
1290 DECLARE_WAITQUEUE(wait, current);
1292 set_current_state(TASK_UNINTERRUPTIBLE);
1293 add_wait_queue(&chip->wq, &wait);
1294 mutex_unlock(&chip->mutex);
1295 schedule();
1296 remove_wait_queue(&chip->wq, &wait);
1297 timeo = jiffies + (HZ / 2); /* FIXME */
1298 mutex_lock(&chip->mutex);
1299 continue;
1302 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1303 xip_enable(map, chip, adr);
1304 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1305 xip_disable(map, chip, adr);
1306 break;
1309 if (chip_ready(map, adr))
1310 break;
1312 /* Latency issues. Drop the lock, wait a while and retry */
1313 UDELAY(map, chip, adr, 1);
1315 /* Did we succeed? */
1316 if (!chip_good(map, adr, datum)) {
1317 /* reset on all failures. */
1318 map_write( map, CMD(0xF0), chip->start );
1319 /* FIXME - should have reset delay before continuing */
1321 if (++retry_cnt <= MAX_WORD_RETRIES)
1322 goto retry;
1324 ret = -EIO;
1326 xip_enable(map, chip, adr);
1327 op_done:
1328 chip->state = FL_READY;
1329 DISABLE_VPP(map);
1330 put_chip(map, chip, adr);
1331 mutex_unlock(&chip->mutex);
1333 return ret;
1337 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1338 size_t *retlen, const u_char *buf)
1340 struct map_info *map = mtd->priv;
1341 struct cfi_private *cfi = map->fldrv_priv;
1342 int ret = 0;
1343 int chipnum;
1344 unsigned long ofs, chipstart;
1345 DECLARE_WAITQUEUE(wait, current);
1347 chipnum = to >> cfi->chipshift;
1348 ofs = to - (chipnum << cfi->chipshift);
1349 chipstart = cfi->chips[chipnum].start;
1351 /* If it's not bus-aligned, do the first byte write */
1352 if (ofs & (map_bankwidth(map)-1)) {
1353 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1354 int i = ofs - bus_ofs;
1355 int n = 0;
1356 map_word tmp_buf;
1358 retry:
1359 mutex_lock(&cfi->chips[chipnum].mutex);
1361 if (cfi->chips[chipnum].state != FL_READY) {
1362 set_current_state(TASK_UNINTERRUPTIBLE);
1363 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1365 mutex_unlock(&cfi->chips[chipnum].mutex);
1367 schedule();
1368 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1369 goto retry;
1372 /* Load 'tmp_buf' with old contents of flash */
1373 tmp_buf = map_read(map, bus_ofs+chipstart);
1375 mutex_unlock(&cfi->chips[chipnum].mutex);
1377 /* Number of bytes to copy from buffer */
1378 n = min_t(int, len, map_bankwidth(map)-i);
1380 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1382 ret = do_write_oneword(map, &cfi->chips[chipnum],
1383 bus_ofs, tmp_buf);
1384 if (ret)
1385 return ret;
1387 ofs += n;
1388 buf += n;
1389 (*retlen) += n;
1390 len -= n;
1392 if (ofs >> cfi->chipshift) {
1393 chipnum ++;
1394 ofs = 0;
1395 if (chipnum == cfi->numchips)
1396 return 0;
1400 /* We are now aligned, write as much as possible */
1401 while(len >= map_bankwidth(map)) {
1402 map_word datum;
1404 datum = map_word_load(map, buf);
1406 ret = do_write_oneword(map, &cfi->chips[chipnum],
1407 ofs, datum);
1408 if (ret)
1409 return ret;
1411 ofs += map_bankwidth(map);
1412 buf += map_bankwidth(map);
1413 (*retlen) += map_bankwidth(map);
1414 len -= map_bankwidth(map);
1416 if (ofs >> cfi->chipshift) {
1417 chipnum ++;
1418 ofs = 0;
1419 if (chipnum == cfi->numchips)
1420 return 0;
1421 chipstart = cfi->chips[chipnum].start;
1425 /* Write the trailing bytes if any */
1426 if (len & (map_bankwidth(map)-1)) {
1427 map_word tmp_buf;
1429 retry1:
1430 mutex_lock(&cfi->chips[chipnum].mutex);
1432 if (cfi->chips[chipnum].state != FL_READY) {
1433 set_current_state(TASK_UNINTERRUPTIBLE);
1434 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1436 mutex_unlock(&cfi->chips[chipnum].mutex);
1438 schedule();
1439 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1440 goto retry1;
1443 tmp_buf = map_read(map, ofs + chipstart);
1445 mutex_unlock(&cfi->chips[chipnum].mutex);
1447 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1449 ret = do_write_oneword(map, &cfi->chips[chipnum],
1450 ofs, tmp_buf);
1451 if (ret)
1452 return ret;
1454 (*retlen) += len;
1457 return 0;
1462 * FIXME: interleaved mode not tested, and probably not supported!
1464 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1465 unsigned long adr, const u_char *buf,
1466 int len)
1468 struct cfi_private *cfi = map->fldrv_priv;
1469 unsigned long timeo = jiffies + HZ;
1470 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1471 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1472 int ret = -EIO;
1473 unsigned long cmd_adr;
1474 int z, words;
1475 map_word datum;
1477 adr += chip->start;
1478 cmd_adr = adr;
1480 mutex_lock(&chip->mutex);
1481 ret = get_chip(map, chip, adr, FL_WRITING);
1482 if (ret) {
1483 mutex_unlock(&chip->mutex);
1484 return ret;
1487 datum = map_word_load(map, buf);
1489 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1490 __func__, adr, datum.x[0] );
1492 XIP_INVAL_CACHED_RANGE(map, adr, len);
1493 ENABLE_VPP(map);
1494 xip_disable(map, chip, cmd_adr);
1496 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1497 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1499 /* Write Buffer Load */
1500 map_write(map, CMD(0x25), cmd_adr);
1502 chip->state = FL_WRITING_TO_BUFFER;
1504 /* Write length of data to come */
1505 words = len / map_bankwidth(map);
1506 map_write(map, CMD(words - 1), cmd_adr);
1507 /* Write data */
1508 z = 0;
1509 while(z < words * map_bankwidth(map)) {
1510 datum = map_word_load(map, buf);
1511 map_write(map, datum, adr + z);
1513 z += map_bankwidth(map);
1514 buf += map_bankwidth(map);
1516 z -= map_bankwidth(map);
1518 adr += z;
1520 /* Write Buffer Program Confirm: GO GO GO */
1521 map_write(map, CMD(0x29), cmd_adr);
1522 chip->state = FL_WRITING;
1524 INVALIDATE_CACHE_UDELAY(map, chip,
1525 adr, map_bankwidth(map),
1526 chip->word_write_time);
1528 timeo = jiffies + uWriteTimeout;
1530 for (;;) {
1531 if (chip->state != FL_WRITING) {
1532 /* Someone's suspended the write. Sleep */
1533 DECLARE_WAITQUEUE(wait, current);
1535 set_current_state(TASK_UNINTERRUPTIBLE);
1536 add_wait_queue(&chip->wq, &wait);
1537 mutex_unlock(&chip->mutex);
1538 schedule();
1539 remove_wait_queue(&chip->wq, &wait);
1540 timeo = jiffies + (HZ / 2); /* FIXME */
1541 mutex_lock(&chip->mutex);
1542 continue;
1545 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1546 break;
1548 if (chip_ready(map, adr)) {
1549 xip_enable(map, chip, adr);
1550 goto op_done;
1553 /* Latency issues. Drop the lock, wait a while and retry */
1554 UDELAY(map, chip, adr, 1);
1558 * Recovery from write-buffer programming failures requires
1559 * the write-to-buffer-reset sequence. Since the last part
1560 * of the sequence also works as a normal reset, we can run
1561 * the same commands regardless of why we are here.
1562 * See e.g.
1563 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1565 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1566 cfi->device_type, NULL);
1567 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1568 cfi->device_type, NULL);
1569 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1570 cfi->device_type, NULL);
1571 xip_enable(map, chip, adr);
1572 /* FIXME - should have reset delay before continuing */
1574 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
1575 __func__, adr);
1577 ret = -EIO;
1578 op_done:
1579 chip->state = FL_READY;
1580 DISABLE_VPP(map);
1581 put_chip(map, chip, adr);
1582 mutex_unlock(&chip->mutex);
1584 return ret;
1588 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1589 size_t *retlen, const u_char *buf)
1591 struct map_info *map = mtd->priv;
1592 struct cfi_private *cfi = map->fldrv_priv;
1593 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1594 int ret = 0;
1595 int chipnum;
1596 unsigned long ofs;
1598 chipnum = to >> cfi->chipshift;
1599 ofs = to - (chipnum << cfi->chipshift);
1601 /* If it's not bus-aligned, do the first word write */
1602 if (ofs & (map_bankwidth(map)-1)) {
1603 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1604 if (local_len > len)
1605 local_len = len;
1606 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1607 local_len, retlen, buf);
1608 if (ret)
1609 return ret;
1610 ofs += local_len;
1611 buf += local_len;
1612 len -= local_len;
1614 if (ofs >> cfi->chipshift) {
1615 chipnum ++;
1616 ofs = 0;
1617 if (chipnum == cfi->numchips)
1618 return 0;
1622 /* Write buffer is worth it only if more than one word to write... */
1623 while (len >= map_bankwidth(map) * 2) {
1624 /* We must not cross write block boundaries */
1625 int size = wbufsize - (ofs & (wbufsize-1));
1627 if (size > len)
1628 size = len;
1629 if (size % map_bankwidth(map))
1630 size -= size % map_bankwidth(map);
1632 ret = do_write_buffer(map, &cfi->chips[chipnum],
1633 ofs, buf, size);
1634 if (ret)
1635 return ret;
1637 ofs += size;
1638 buf += size;
1639 (*retlen) += size;
1640 len -= size;
1642 if (ofs >> cfi->chipshift) {
1643 chipnum ++;
1644 ofs = 0;
1645 if (chipnum == cfi->numchips)
1646 return 0;
1650 if (len) {
1651 size_t retlen_dregs = 0;
1653 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1654 len, &retlen_dregs, buf);
1656 *retlen += retlen_dregs;
1657 return ret;
1660 return 0;
1664 * Wait for the flash chip to become ready to write data
1666 * This is only called during the panic_write() path. When panic_write()
1667 * is called, the kernel is in the process of a panic, and will soon be
1668 * dead. Therefore we don't take any locks, and attempt to get access
1669 * to the chip as soon as possible.
1671 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1672 unsigned long adr)
1674 struct cfi_private *cfi = map->fldrv_priv;
1675 int retries = 10;
1676 int i;
1679 * If the driver thinks the chip is idle, and no toggle bits
1680 * are changing, then the chip is actually idle for sure.
1682 if (chip->state == FL_READY && chip_ready(map, adr))
1683 return 0;
1686 * Try several times to reset the chip and then wait for it
1687 * to become idle. The upper limit of a few milliseconds of
1688 * delay isn't a big problem: the kernel is dying anyway. It
1689 * is more important to save the messages.
1691 while (retries > 0) {
1692 const unsigned long timeo = (HZ / 1000) + 1;
1694 /* send the reset command */
1695 map_write(map, CMD(0xF0), chip->start);
1697 /* wait for the chip to become ready */
1698 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1699 if (chip_ready(map, adr))
1700 return 0;
1702 udelay(1);
1706 /* the chip never became ready */
1707 return -EBUSY;
1711 * Write out one word of data to a single flash chip during a kernel panic
1713 * This is only called during the panic_write() path. When panic_write()
1714 * is called, the kernel is in the process of a panic, and will soon be
1715 * dead. Therefore we don't take any locks, and attempt to get access
1716 * to the chip as soon as possible.
1718 * The implementation of this routine is intentionally similar to
1719 * do_write_oneword(), in order to ease code maintenance.
1721 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1722 unsigned long adr, map_word datum)
1724 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1725 struct cfi_private *cfi = map->fldrv_priv;
1726 int retry_cnt = 0;
1727 map_word oldd;
1728 int ret = 0;
1729 int i;
1731 adr += chip->start;
1733 ret = cfi_amdstd_panic_wait(map, chip, adr);
1734 if (ret)
1735 return ret;
1737 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1738 __func__, adr, datum.x[0]);
1741 * Check for a NOP for the case when the datum to write is already
1742 * present - it saves time and works around buggy chips that corrupt
1743 * data at other locations when 0xff is written to a location that
1744 * already contains 0xff.
1746 oldd = map_read(map, adr);
1747 if (map_word_equal(map, oldd, datum)) {
1748 pr_debug("MTD %s(): NOP\n", __func__);
1749 goto op_done;
1752 ENABLE_VPP(map);
1754 retry:
1755 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1756 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1757 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1758 map_write(map, datum, adr);
1760 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1761 if (chip_ready(map, adr))
1762 break;
1764 udelay(1);
1767 if (!chip_good(map, adr, datum)) {
1768 /* reset on all failures. */
1769 map_write(map, CMD(0xF0), chip->start);
1770 /* FIXME - should have reset delay before continuing */
1772 if (++retry_cnt <= MAX_WORD_RETRIES)
1773 goto retry;
1775 ret = -EIO;
1778 op_done:
1779 DISABLE_VPP(map);
1780 return ret;
1784 * Write out some data during a kernel panic
1786 * This is used by the mtdoops driver to save the dying messages from a
1787 * kernel which has panic'd.
1789 * This routine ignores all of the locking used throughout the rest of the
1790 * driver, in order to ensure that the data gets written out no matter what
1791 * state this driver (and the flash chip itself) was in when the kernel crashed.
1793 * The implementation of this routine is intentionally similar to
1794 * cfi_amdstd_write_words(), in order to ease code maintenance.
1796 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1797 size_t *retlen, const u_char *buf)
1799 struct map_info *map = mtd->priv;
1800 struct cfi_private *cfi = map->fldrv_priv;
1801 unsigned long ofs, chipstart;
1802 int ret = 0;
1803 int chipnum;
1805 chipnum = to >> cfi->chipshift;
1806 ofs = to - (chipnum << cfi->chipshift);
1807 chipstart = cfi->chips[chipnum].start;
1809 /* If it's not bus aligned, do the first byte write */
1810 if (ofs & (map_bankwidth(map) - 1)) {
1811 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1812 int i = ofs - bus_ofs;
1813 int n = 0;
1814 map_word tmp_buf;
1816 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1817 if (ret)
1818 return ret;
1820 /* Load 'tmp_buf' with old contents of flash */
1821 tmp_buf = map_read(map, bus_ofs + chipstart);
1823 /* Number of bytes to copy from buffer */
1824 n = min_t(int, len, map_bankwidth(map) - i);
1826 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1828 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1829 bus_ofs, tmp_buf);
1830 if (ret)
1831 return ret;
1833 ofs += n;
1834 buf += n;
1835 (*retlen) += n;
1836 len -= n;
1838 if (ofs >> cfi->chipshift) {
1839 chipnum++;
1840 ofs = 0;
1841 if (chipnum == cfi->numchips)
1842 return 0;
1846 /* We are now aligned, write as much as possible */
1847 while (len >= map_bankwidth(map)) {
1848 map_word datum;
1850 datum = map_word_load(map, buf);
1852 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1853 ofs, datum);
1854 if (ret)
1855 return ret;
1857 ofs += map_bankwidth(map);
1858 buf += map_bankwidth(map);
1859 (*retlen) += map_bankwidth(map);
1860 len -= map_bankwidth(map);
1862 if (ofs >> cfi->chipshift) {
1863 chipnum++;
1864 ofs = 0;
1865 if (chipnum == cfi->numchips)
1866 return 0;
1868 chipstart = cfi->chips[chipnum].start;
1872 /* Write the trailing bytes if any */
1873 if (len & (map_bankwidth(map) - 1)) {
1874 map_word tmp_buf;
1876 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1877 if (ret)
1878 return ret;
1880 tmp_buf = map_read(map, ofs + chipstart);
1882 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1884 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1885 ofs, tmp_buf);
1886 if (ret)
1887 return ret;
1889 (*retlen) += len;
1892 return 0;
1897 * Handle devices with one erase region, that only implement
1898 * the chip erase command.
1900 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1902 struct cfi_private *cfi = map->fldrv_priv;
1903 unsigned long timeo = jiffies + HZ;
1904 unsigned long int adr;
1905 DECLARE_WAITQUEUE(wait, current);
1906 int ret = 0;
1908 adr = cfi->addr_unlock1;
1910 mutex_lock(&chip->mutex);
1911 ret = get_chip(map, chip, adr, FL_WRITING);
1912 if (ret) {
1913 mutex_unlock(&chip->mutex);
1914 return ret;
1917 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1918 __func__, chip->start );
1920 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1921 ENABLE_VPP(map);
1922 xip_disable(map, chip, adr);
1924 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1925 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1926 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1927 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1928 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1929 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1931 chip->state = FL_ERASING;
1932 chip->erase_suspended = 0;
1933 chip->in_progress_block_addr = adr;
1935 INVALIDATE_CACHE_UDELAY(map, chip,
1936 adr, map->size,
1937 chip->erase_time*500);
1939 timeo = jiffies + (HZ*20);
1941 for (;;) {
1942 if (chip->state != FL_ERASING) {
1943 /* Someone's suspended the erase. Sleep */
1944 set_current_state(TASK_UNINTERRUPTIBLE);
1945 add_wait_queue(&chip->wq, &wait);
1946 mutex_unlock(&chip->mutex);
1947 schedule();
1948 remove_wait_queue(&chip->wq, &wait);
1949 mutex_lock(&chip->mutex);
1950 continue;
1952 if (chip->erase_suspended) {
1953 /* This erase was suspended and resumed.
1954 Adjust the timeout */
1955 timeo = jiffies + (HZ*20); /* FIXME */
1956 chip->erase_suspended = 0;
1959 if (chip_ready(map, adr))
1960 break;
1962 if (time_after(jiffies, timeo)) {
1963 printk(KERN_WARNING "MTD %s(): software timeout\n",
1964 __func__ );
1965 break;
1968 /* Latency issues. Drop the lock, wait a while and retry */
1969 UDELAY(map, chip, adr, 1000000/HZ);
1971 /* Did we succeed? */
1972 if (!chip_good(map, adr, map_word_ff(map))) {
1973 /* reset on all failures. */
1974 map_write( map, CMD(0xF0), chip->start );
1975 /* FIXME - should have reset delay before continuing */
1977 ret = -EIO;
1980 chip->state = FL_READY;
1981 xip_enable(map, chip, adr);
1982 DISABLE_VPP(map);
1983 put_chip(map, chip, adr);
1984 mutex_unlock(&chip->mutex);
1986 return ret;
1990 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1992 struct cfi_private *cfi = map->fldrv_priv;
1993 unsigned long timeo = jiffies + HZ;
1994 DECLARE_WAITQUEUE(wait, current);
1995 int ret = 0;
1997 adr += chip->start;
1999 mutex_lock(&chip->mutex);
2000 ret = get_chip(map, chip, adr, FL_ERASING);
2001 if (ret) {
2002 mutex_unlock(&chip->mutex);
2003 return ret;
2006 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2007 __func__, adr );
2009 XIP_INVAL_CACHED_RANGE(map, adr, len);
2010 ENABLE_VPP(map);
2011 xip_disable(map, chip, adr);
2013 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2014 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2015 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2016 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2017 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2018 map_write(map, cfi->sector_erase_cmd, adr);
2020 chip->state = FL_ERASING;
2021 chip->erase_suspended = 0;
2022 chip->in_progress_block_addr = adr;
2024 INVALIDATE_CACHE_UDELAY(map, chip,
2025 adr, len,
2026 chip->erase_time*500);
2028 timeo = jiffies + (HZ*20);
2030 for (;;) {
2031 if (chip->state != FL_ERASING) {
2032 /* Someone's suspended the erase. Sleep */
2033 set_current_state(TASK_UNINTERRUPTIBLE);
2034 add_wait_queue(&chip->wq, &wait);
2035 mutex_unlock(&chip->mutex);
2036 schedule();
2037 remove_wait_queue(&chip->wq, &wait);
2038 mutex_lock(&chip->mutex);
2039 continue;
2041 if (chip->erase_suspended) {
2042 /* This erase was suspended and resumed.
2043 Adjust the timeout */
2044 timeo = jiffies + (HZ*20); /* FIXME */
2045 chip->erase_suspended = 0;
2048 if (chip_ready(map, adr)) {
2049 xip_enable(map, chip, adr);
2050 break;
2053 if (time_after(jiffies, timeo)) {
2054 xip_enable(map, chip, adr);
2055 printk(KERN_WARNING "MTD %s(): software timeout\n",
2056 __func__ );
2057 break;
2060 /* Latency issues. Drop the lock, wait a while and retry */
2061 UDELAY(map, chip, adr, 1000000/HZ);
2063 /* Did we succeed? */
2064 if (!chip_good(map, adr, map_word_ff(map))) {
2065 /* reset on all failures. */
2066 map_write( map, CMD(0xF0), chip->start );
2067 /* FIXME - should have reset delay before continuing */
2069 ret = -EIO;
2072 chip->state = FL_READY;
2073 DISABLE_VPP(map);
2074 put_chip(map, chip, adr);
2075 mutex_unlock(&chip->mutex);
2076 return ret;
2080 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2082 unsigned long ofs, len;
2083 int ret;
2085 ofs = instr->addr;
2086 len = instr->len;
2088 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2089 if (ret)
2090 return ret;
2092 instr->state = MTD_ERASE_DONE;
2093 mtd_erase_callback(instr);
2095 return 0;
2099 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2101 struct map_info *map = mtd->priv;
2102 struct cfi_private *cfi = map->fldrv_priv;
2103 int ret = 0;
2105 if (instr->addr != 0)
2106 return -EINVAL;
2108 if (instr->len != mtd->size)
2109 return -EINVAL;
2111 ret = do_erase_chip(map, &cfi->chips[0]);
2112 if (ret)
2113 return ret;
2115 instr->state = MTD_ERASE_DONE;
2116 mtd_erase_callback(instr);
2118 return 0;
2121 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2122 unsigned long adr, int len, void *thunk)
2124 struct cfi_private *cfi = map->fldrv_priv;
2125 int ret;
2127 mutex_lock(&chip->mutex);
2128 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2129 if (ret)
2130 goto out_unlock;
2131 chip->state = FL_LOCKING;
2133 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2135 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2136 cfi->device_type, NULL);
2137 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2138 cfi->device_type, NULL);
2139 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2140 cfi->device_type, NULL);
2141 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2142 cfi->device_type, NULL);
2143 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2144 cfi->device_type, NULL);
2145 map_write(map, CMD(0x40), chip->start + adr);
2147 chip->state = FL_READY;
2148 put_chip(map, chip, adr + chip->start);
2149 ret = 0;
2151 out_unlock:
2152 mutex_unlock(&chip->mutex);
2153 return ret;
2156 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2157 unsigned long adr, int len, void *thunk)
2159 struct cfi_private *cfi = map->fldrv_priv;
2160 int ret;
2162 mutex_lock(&chip->mutex);
2163 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2164 if (ret)
2165 goto out_unlock;
2166 chip->state = FL_UNLOCKING;
2168 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2170 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2171 cfi->device_type, NULL);
2172 map_write(map, CMD(0x70), adr);
2174 chip->state = FL_READY;
2175 put_chip(map, chip, adr + chip->start);
2176 ret = 0;
2178 out_unlock:
2179 mutex_unlock(&chip->mutex);
2180 return ret;
2183 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2185 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2188 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2190 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2194 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2197 struct ppb_lock {
2198 struct flchip *chip;
2199 loff_t offset;
2200 int locked;
2203 #define MAX_SECTORS 512
2205 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2206 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2207 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2209 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2210 struct flchip *chip,
2211 unsigned long adr, int len, void *thunk)
2213 struct cfi_private *cfi = map->fldrv_priv;
2214 unsigned long timeo;
2215 int ret;
2217 mutex_lock(&chip->mutex);
2218 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2219 if (ret) {
2220 mutex_unlock(&chip->mutex);
2221 return ret;
2224 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2226 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2227 cfi->device_type, NULL);
2228 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2229 cfi->device_type, NULL);
2230 /* PPB entry command */
2231 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2232 cfi->device_type, NULL);
2234 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2235 chip->state = FL_LOCKING;
2236 map_write(map, CMD(0xA0), chip->start + adr);
2237 map_write(map, CMD(0x00), chip->start + adr);
2238 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2240 * Unlocking of one specific sector is not supported, so we
2241 * have to unlock all sectors of this device instead
2243 chip->state = FL_UNLOCKING;
2244 map_write(map, CMD(0x80), chip->start);
2245 map_write(map, CMD(0x30), chip->start);
2246 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2247 chip->state = FL_JEDEC_QUERY;
2248 /* Return locked status: 0->locked, 1->unlocked */
2249 ret = !cfi_read_query(map, adr);
2250 } else
2251 BUG();
2254 * Wait for some time as unlocking of all sectors takes quite long
2256 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2257 for (;;) {
2258 if (chip_ready(map, adr))
2259 break;
2261 if (time_after(jiffies, timeo)) {
2262 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2263 ret = -EIO;
2264 break;
2267 UDELAY(map, chip, adr, 1);
2270 /* Exit BC commands */
2271 map_write(map, CMD(0x90), chip->start);
2272 map_write(map, CMD(0x00), chip->start);
2274 chip->state = FL_READY;
2275 put_chip(map, chip, adr + chip->start);
2276 mutex_unlock(&chip->mutex);
2278 return ret;
2281 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2282 uint64_t len)
2284 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2285 DO_XXLOCK_ONEBLOCK_LOCK);
2288 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2289 uint64_t len)
2291 struct mtd_erase_region_info *regions = mtd->eraseregions;
2292 struct map_info *map = mtd->priv;
2293 struct cfi_private *cfi = map->fldrv_priv;
2294 struct ppb_lock *sect;
2295 unsigned long adr;
2296 loff_t offset;
2297 uint64_t length;
2298 int chipnum;
2299 int i;
2300 int sectors;
2301 int ret;
2304 * PPB unlocking always unlocks all sectors of the flash chip.
2305 * We need to re-lock all previously locked sectors. So lets
2306 * first check the locking status of all sectors and save
2307 * it for future use.
2309 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2310 if (!sect)
2311 return -ENOMEM;
2314 * This code to walk all sectors is a slightly modified version
2315 * of the cfi_varsize_frob() code.
2317 i = 0;
2318 chipnum = 0;
2319 adr = 0;
2320 sectors = 0;
2321 offset = 0;
2322 length = mtd->size;
2324 while (length) {
2325 int size = regions[i].erasesize;
2328 * Only test sectors that shall not be unlocked. The other
2329 * sectors shall be unlocked, so lets keep their locking
2330 * status at "unlocked" (locked=0) for the final re-locking.
2332 if ((adr < ofs) || (adr >= (ofs + len))) {
2333 sect[sectors].chip = &cfi->chips[chipnum];
2334 sect[sectors].offset = offset;
2335 sect[sectors].locked = do_ppb_xxlock(
2336 map, &cfi->chips[chipnum], adr, 0,
2337 DO_XXLOCK_ONEBLOCK_GETLOCK);
2340 adr += size;
2341 offset += size;
2342 length -= size;
2344 if (offset == regions[i].offset + size * regions[i].numblocks)
2345 i++;
2347 if (adr >> cfi->chipshift) {
2348 adr = 0;
2349 chipnum++;
2351 if (chipnum >= cfi->numchips)
2352 break;
2355 sectors++;
2356 if (sectors >= MAX_SECTORS) {
2357 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2358 MAX_SECTORS);
2359 kfree(sect);
2360 return -EINVAL;
2364 /* Now unlock the whole chip */
2365 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2366 DO_XXLOCK_ONEBLOCK_UNLOCK);
2367 if (ret) {
2368 kfree(sect);
2369 return ret;
2373 * PPB unlocking always unlocks all sectors of the flash chip.
2374 * We need to re-lock all previously locked sectors.
2376 for (i = 0; i < sectors; i++) {
2377 if (sect[i].locked)
2378 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2379 DO_XXLOCK_ONEBLOCK_LOCK);
2382 kfree(sect);
2383 return ret;
2386 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2387 uint64_t len)
2389 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2390 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2393 static void cfi_amdstd_sync (struct mtd_info *mtd)
2395 struct map_info *map = mtd->priv;
2396 struct cfi_private *cfi = map->fldrv_priv;
2397 int i;
2398 struct flchip *chip;
2399 int ret = 0;
2400 DECLARE_WAITQUEUE(wait, current);
2402 for (i=0; !ret && i<cfi->numchips; i++) {
2403 chip = &cfi->chips[i];
2405 retry:
2406 mutex_lock(&chip->mutex);
2408 switch(chip->state) {
2409 case FL_READY:
2410 case FL_STATUS:
2411 case FL_CFI_QUERY:
2412 case FL_JEDEC_QUERY:
2413 chip->oldstate = chip->state;
2414 chip->state = FL_SYNCING;
2415 /* No need to wake_up() on this state change -
2416 * as the whole point is that nobody can do anything
2417 * with the chip now anyway.
2419 case FL_SYNCING:
2420 mutex_unlock(&chip->mutex);
2421 break;
2423 default:
2424 /* Not an idle state */
2425 set_current_state(TASK_UNINTERRUPTIBLE);
2426 add_wait_queue(&chip->wq, &wait);
2428 mutex_unlock(&chip->mutex);
2430 schedule();
2432 remove_wait_queue(&chip->wq, &wait);
2434 goto retry;
2438 /* Unlock the chips again */
2440 for (i--; i >=0; i--) {
2441 chip = &cfi->chips[i];
2443 mutex_lock(&chip->mutex);
2445 if (chip->state == FL_SYNCING) {
2446 chip->state = chip->oldstate;
2447 wake_up(&chip->wq);
2449 mutex_unlock(&chip->mutex);
2454 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2456 struct map_info *map = mtd->priv;
2457 struct cfi_private *cfi = map->fldrv_priv;
2458 int i;
2459 struct flchip *chip;
2460 int ret = 0;
2462 for (i=0; !ret && i<cfi->numchips; i++) {
2463 chip = &cfi->chips[i];
2465 mutex_lock(&chip->mutex);
2467 switch(chip->state) {
2468 case FL_READY:
2469 case FL_STATUS:
2470 case FL_CFI_QUERY:
2471 case FL_JEDEC_QUERY:
2472 chip->oldstate = chip->state;
2473 chip->state = FL_PM_SUSPENDED;
2474 /* No need to wake_up() on this state change -
2475 * as the whole point is that nobody can do anything
2476 * with the chip now anyway.
2478 case FL_PM_SUSPENDED:
2479 break;
2481 default:
2482 ret = -EAGAIN;
2483 break;
2485 mutex_unlock(&chip->mutex);
2488 /* Unlock the chips again */
2490 if (ret) {
2491 for (i--; i >=0; i--) {
2492 chip = &cfi->chips[i];
2494 mutex_lock(&chip->mutex);
2496 if (chip->state == FL_PM_SUSPENDED) {
2497 chip->state = chip->oldstate;
2498 wake_up(&chip->wq);
2500 mutex_unlock(&chip->mutex);
2504 return ret;
2508 static void cfi_amdstd_resume(struct mtd_info *mtd)
2510 struct map_info *map = mtd->priv;
2511 struct cfi_private *cfi = map->fldrv_priv;
2512 int i;
2513 struct flchip *chip;
2515 for (i=0; i<cfi->numchips; i++) {
2517 chip = &cfi->chips[i];
2519 mutex_lock(&chip->mutex);
2521 if (chip->state == FL_PM_SUSPENDED) {
2522 chip->state = FL_READY;
2523 map_write(map, CMD(0xF0), chip->start);
2524 wake_up(&chip->wq);
2526 else
2527 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2529 mutex_unlock(&chip->mutex);
2535 * Ensure that the flash device is put back into read array mode before
2536 * unloading the driver or rebooting. On some systems, rebooting while
2537 * the flash is in query/program/erase mode will prevent the CPU from
2538 * fetching the bootloader code, requiring a hard reset or power cycle.
2540 static int cfi_amdstd_reset(struct mtd_info *mtd)
2542 struct map_info *map = mtd->priv;
2543 struct cfi_private *cfi = map->fldrv_priv;
2544 int i, ret;
2545 struct flchip *chip;
2547 for (i = 0; i < cfi->numchips; i++) {
2549 chip = &cfi->chips[i];
2551 mutex_lock(&chip->mutex);
2553 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2554 if (!ret) {
2555 map_write(map, CMD(0xF0), chip->start);
2556 chip->state = FL_SHUTDOWN;
2557 put_chip(map, chip, chip->start);
2560 mutex_unlock(&chip->mutex);
2563 return 0;
2567 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2568 void *v)
2570 struct mtd_info *mtd;
2572 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2573 cfi_amdstd_reset(mtd);
2574 return NOTIFY_DONE;
2578 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2580 struct map_info *map = mtd->priv;
2581 struct cfi_private *cfi = map->fldrv_priv;
2583 cfi_amdstd_reset(mtd);
2584 unregister_reboot_notifier(&mtd->reboot_notifier);
2585 kfree(cfi->cmdset_priv);
2586 kfree(cfi->cfiq);
2587 kfree(cfi);
2588 kfree(mtd->eraseregions);
2591 MODULE_LICENSE("GPL");
2592 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2593 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2594 MODULE_ALIAS("cfi_cmdset_0006");
2595 MODULE_ALIAS("cfi_cmdset_0701");