GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / mtd / chips / cfi_cmdset_0002.c
blob31dd0231dd47ef157e57674c6c99ffcc2ee147b8
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
20 * This code is GPL
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6
51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_amdstd_sync (struct mtd_info *);
57 static int cfi_amdstd_suspend (struct mtd_info *);
58 static void cfi_amdstd_resume (struct mtd_info *);
59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static void cfi_amdstd_destroy(struct mtd_info *);
64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
69 #include "fwh_lock.h"
71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
74 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
75 .probe = NULL, /* Not usable directly */
76 .destroy = cfi_amdstd_destroy,
77 .name = "cfi_cmdset_0002",
78 .module = THIS_MODULE
82 /* #define DEBUG_CFI_FEATURES */
85 #ifdef DEBUG_CFI_FEATURES
86 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
88 const char* erase_suspend[3] = {
89 "Not supported", "Read only", "Read/write"
91 const char* top_bottom[6] = {
92 "No WP", "8x8KiB sectors at top & bottom, no WP",
93 "Bottom boot", "Top boot",
94 "Uniform, Bottom WP", "Uniform, Top WP"
97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
98 printk(" Address sensitive unlock: %s\n",
99 (extp->SiliconRevision & 1) ? "Not required" : "Required");
101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
103 else
104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
106 if (extp->BlkProt == 0)
107 printk(" Block protection: Not supported\n");
108 else
109 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
112 printk(" Temporary block unprotect: %s\n",
113 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
116 printk(" Burst mode: %s\n",
117 extp->BurstMode ? "Supported" : "Not supported");
118 if (extp->PageMode == 0)
119 printk(" Page mode: Not supported\n");
120 else
121 printk(" Page mode: %d word page\n", extp->PageMode << 2);
123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMin >> 4, extp->VppMin & 0xf);
125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126 extp->VppMax >> 4, extp->VppMax & 0xf);
128 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
130 else
131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
133 #endif
135 #ifdef AMD_BOOTLOC_BUG
136 /* Wheee. Bring me the head of someone at AMD. */
137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
139 struct map_info *map = mtd->priv;
140 struct cfi_private *cfi = map->fldrv_priv;
141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
142 __u8 major = extp->MajorVersion;
143 __u8 minor = extp->MinorVersion;
145 if (((major << 8) | minor) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */
148 DEBUG(MTD_DEBUG_LEVEL1,
149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
150 map->name, cfi->mfr, cfi->id);
152 /* AFAICS all 29LV400 with a bottom boot block have a device ID
153 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
154 * These were badly detected as they have the 0x80 bit set
155 * so treat them as a special case.
157 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
159 /* Macronix added CFI to their 2nd generation
160 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
161 * Fujitsu, Spansion, EON, ESI and older Macronix)
162 * has CFI.
164 * Therefore also check the manufacturer.
165 * This reduces the risk of false detection due to
166 * the 8-bit device ID.
168 (cfi->mfr == CFI_MFR_MACRONIX)) {
169 DEBUG(MTD_DEBUG_LEVEL1,
170 "%s: Macronix MX29LV400C with bottom boot block"
171 " detected\n", map->name);
172 extp->TopBottom = 2; /* bottom boot */
173 } else
174 if (cfi->id & 0x80) {
175 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
176 extp->TopBottom = 3; /* top boot */
177 } else {
178 extp->TopBottom = 2; /* bottom boot */
181 DEBUG(MTD_DEBUG_LEVEL1,
182 "%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->name, major, minor,
184 extp->TopBottom == 2 ? "bottom" : "top");
187 #endif
189 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv;
193 if (cfi->cfiq->BufWriteTimeoutTyp) {
194 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
195 mtd->write = cfi_amdstd_write_buffers;
199 /* Atmel chips don't use the same PRI format as AMD chips */
200 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
202 struct map_info *map = mtd->priv;
203 struct cfi_private *cfi = map->fldrv_priv;
204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
205 struct cfi_pri_atmel atmel_pri;
207 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
208 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
210 if (atmel_pri.Features & 0x02)
211 extp->EraseSuspend = 2;
213 /* Some chips got it backwards... */
214 if (cfi->id == AT49BV6416) {
215 if (atmel_pri.BottomBoot)
216 extp->TopBottom = 3;
217 else
218 extp->TopBottom = 2;
219 } else {
220 if (atmel_pri.BottomBoot)
221 extp->TopBottom = 2;
222 else
223 extp->TopBottom = 3;
226 /* burst write mode not supported */
227 cfi->cfiq->BufWriteTimeoutTyp = 0;
228 cfi->cfiq->BufWriteTimeoutMax = 0;
231 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
233 /* Setup for chips with a secsi area */
234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
238 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv;
242 if ((cfi->cfiq->NumEraseRegions == 1) &&
243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244 mtd->erase = cfi_amdstd_erase_chip;
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
251 * locked by default.
253 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
255 mtd->lock = cfi_atmel_lock;
256 mtd->unlock = cfi_atmel_unlock;
257 mtd->flags |= MTD_POWERUP_LOCK;
260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
262 struct map_info *map = mtd->priv;
263 struct cfi_private *cfi = map->fldrv_priv;
266 * These flashes report two seperate eraseblock regions based on the
267 * sector_erase-size and block_erase-size, although they both operate on the
268 * same memory. This is not allowed according to CFI, so we just pick the
269 * sector_erase-size.
271 cfi->cfiq->NumEraseRegions = 1;
274 static void fixup_sst39vf(struct mtd_info *mtd, void *param)
276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv;
279 fixup_old_sst_eraseregion(mtd);
281 cfi->addr_unlock1 = 0x5555;
282 cfi->addr_unlock2 = 0x2AAA;
285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv;
290 fixup_old_sst_eraseregion(mtd);
292 cfi->addr_unlock1 = 0x555;
293 cfi->addr_unlock2 = 0x2AA;
296 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
298 struct map_info *map = mtd->priv;
299 struct cfi_private *cfi = map->fldrv_priv;
301 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
302 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
303 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
307 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
309 struct map_info *map = mtd->priv;
310 struct cfi_private *cfi = map->fldrv_priv;
312 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
313 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
314 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
318 /* Used to fix CFI-Tables of chips without Extended Query Tables */
319 static struct cfi_fixup cfi_nopri_fixup_table[] = {
320 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
321 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
322 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
323 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
324 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
325 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
326 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
327 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
328 { 0, 0, NULL, NULL }
331 static struct cfi_fixup cfi_fixup_table[] = {
332 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
333 #ifdef AMD_BOOTLOC_BUG
334 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
335 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
336 #endif
337 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
338 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
339 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
340 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
341 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
342 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
343 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
344 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
345 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
346 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
347 #if !FORCE_WORD_WRITE
348 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
349 #endif
350 { 0, 0, NULL, NULL }
352 static struct cfi_fixup jedec_fixup_table[] = {
353 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
354 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
355 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
356 { 0, 0, NULL, NULL }
359 static struct cfi_fixup fixup_table[] = {
360 /* The CFI vendor ids and the JEDEC vendor IDs appear
361 * to be common. It is like the devices id's are as
362 * well. This table is to pick all cases where
363 * we know that is the case.
365 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
366 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
367 { 0, 0, NULL, NULL }
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372 struct cfi_pri_amdstd *extp)
374 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
375 extp->MajorVersion == '0')
376 extp->MajorVersion = '1';
379 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
381 struct cfi_private *cfi = map->fldrv_priv;
382 struct mtd_info *mtd;
383 int i;
385 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
386 if (!mtd) {
387 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
388 return NULL;
390 mtd->priv = map;
391 mtd->type = MTD_NORFLASH;
393 /* Fill in the default mtd operations */
394 mtd->erase = cfi_amdstd_erase_varsize;
395 mtd->write = cfi_amdstd_write_words;
396 mtd->read = cfi_amdstd_read;
397 mtd->sync = cfi_amdstd_sync;
398 mtd->suspend = cfi_amdstd_suspend;
399 mtd->resume = cfi_amdstd_resume;
400 mtd->flags = MTD_CAP_NORFLASH;
401 mtd->name = map->name;
402 mtd->writesize = 1;
404 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
406 if (cfi->cfi_mode==CFI_MODE_CFI){
407 unsigned char bootloc;
408 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
409 struct cfi_pri_amdstd *extp;
411 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
412 if (extp) {
414 * It's a real CFI chip, not one for which the probe
415 * routine faked a CFI structure.
417 cfi_fixup_major_minor(cfi, extp);
420 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
421 * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19
422 * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf
423 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
425 if (extp->MajorVersion != '1' ||
426 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
427 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
428 "version %c.%c (%#02x/%#02x).\n",
429 extp->MajorVersion, extp->MinorVersion,
430 extp->MajorVersion, extp->MinorVersion);
431 kfree(extp);
432 kfree(mtd);
433 return NULL;
436 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
437 extp->MajorVersion, extp->MinorVersion);
439 /* Install our own private info structure */
440 cfi->cmdset_priv = extp;
442 /* Apply cfi device specific fixups */
443 cfi_fixup(mtd, cfi_fixup_table);
445 #ifdef DEBUG_CFI_FEATURES
446 /* Tell the user about it in lots of lovely detail */
447 cfi_tell_features(extp);
448 #endif
450 bootloc = extp->TopBottom;
451 if ((bootloc < 2) || (bootloc > 5)) {
452 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
453 "bank location (%d). Assuming bottom.\n",
454 map->name, bootloc);
455 bootloc = 2;
458 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
459 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
461 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
462 int j = (cfi->cfiq->NumEraseRegions-1)-i;
463 __u32 swap;
465 swap = cfi->cfiq->EraseRegionInfo[i];
466 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
467 cfi->cfiq->EraseRegionInfo[j] = swap;
470 /* Set the default CFI lock/unlock addresses */
471 cfi->addr_unlock1 = 0x555;
472 cfi->addr_unlock2 = 0x2aa;
474 cfi_fixup(mtd, cfi_nopri_fixup_table);
476 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
477 kfree(mtd);
478 return NULL;
481 } /* CFI mode */
482 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
483 /* Apply jedec specific fixups */
484 cfi_fixup(mtd, jedec_fixup_table);
486 /* Apply generic fixups */
487 cfi_fixup(mtd, fixup_table);
489 for (i=0; i< cfi->numchips; i++) {
490 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
491 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
492 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
493 cfi->chips[i].ref_point_counter = 0;
494 init_waitqueue_head(&(cfi->chips[i].wq));
497 map->fldrv = &cfi_amdstd_chipdrv;
499 return cfi_amdstd_setup(mtd);
501 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
502 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
503 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
504 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
505 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
507 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
509 struct map_info *map = mtd->priv;
510 struct cfi_private *cfi = map->fldrv_priv;
511 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
512 unsigned long offset = 0;
513 int i,j;
515 printk(KERN_NOTICE "number of %s chips: %d\n",
516 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
517 /* Select the correct geometry setup */
518 mtd->size = devsize * cfi->numchips;
520 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
521 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
522 * mtd->numeraseregions, GFP_KERNEL);
523 if (!mtd->eraseregions) {
524 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
525 goto setup_err;
528 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
529 unsigned long ernum, ersize;
530 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
531 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
533 if (mtd->erasesize < ersize) {
534 mtd->erasesize = ersize;
536 for (j=0; j<cfi->numchips; j++) {
537 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
538 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
539 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
541 offset += (ersize * ernum);
543 if (offset != devsize) {
544 /* Argh */
545 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
546 goto setup_err;
549 __module_get(THIS_MODULE);
550 register_reboot_notifier(&mtd->reboot_notifier);
551 return mtd;
553 setup_err:
554 kfree(mtd->eraseregions);
555 kfree(mtd);
556 kfree(cfi->cmdset_priv);
557 kfree(cfi->cfiq);
558 return NULL;
562 * Return true if the chip is ready.
564 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
565 * non-suspended sector) and is indicated by no toggle bits toggling.
567 * Note that anything more complicated than checking if no bits are toggling
568 * (including checking DQ5 for an error status) is tricky to get working
569 * correctly and is therefore not done (particulary with interleaved chips
570 * as each chip must be checked independantly of the others).
572 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
574 map_word d, t;
576 d = map_read(map, addr);
577 t = map_read(map, addr);
579 return map_word_equal(map, d, t);
583 * Return true if the chip is ready and has the correct value.
585 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
586 * non-suspended sector) and it is indicated by no bits toggling.
588 * Error are indicated by toggling bits or bits held with the wrong value,
589 * or with bits toggling.
591 * Note that anything more complicated than checking if no bits are toggling
592 * (including checking DQ5 for an error status) is tricky to get working
593 * correctly and is therefore not done (particulary with interleaved chips
594 * as each chip must be checked independantly of the others).
597 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
599 map_word oldd, curd;
601 oldd = map_read(map, addr);
602 curd = map_read(map, addr);
604 return map_word_equal(map, oldd, curd) &&
605 map_word_equal(map, curd, expected);
608 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
610 DECLARE_WAITQUEUE(wait, current);
611 struct cfi_private *cfi = map->fldrv_priv;
612 unsigned long timeo;
613 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
615 resettime:
616 timeo = jiffies + HZ;
617 retry:
618 switch (chip->state) {
620 case FL_STATUS:
621 for (;;) {
622 if (chip_ready(map, adr))
623 break;
625 if (time_after(jiffies, timeo)) {
626 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
627 return -EIO;
629 mutex_unlock(&chip->mutex);
630 cfi_udelay(1);
631 mutex_lock(&chip->mutex);
632 /* Someone else might have been playing with it. */
633 goto retry;
636 case FL_READY:
637 case FL_CFI_QUERY:
638 case FL_JEDEC_QUERY:
639 return 0;
641 case FL_ERASING:
642 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
643 !(mode == FL_READY || mode == FL_POINT ||
644 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
645 goto sleep;
647 /* We could check to see if we're trying to access the sector
648 * that is currently being erased. However, no user will try
649 * anything like that so we just wait for the timeout. */
651 /* Erase suspend */
652 /* It's harmless to issue the Erase-Suspend and Erase-Resume
653 * commands when the erase algorithm isn't in progress. */
654 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
655 chip->oldstate = FL_ERASING;
656 chip->state = FL_ERASE_SUSPENDING;
657 chip->erase_suspended = 1;
658 for (;;) {
659 if (chip_ready(map, adr))
660 break;
662 if (time_after(jiffies, timeo)) {
663 /* Should have suspended the erase by now.
664 * Send an Erase-Resume command as either
665 * there was an error (so leave the erase
666 * routine to recover from it) or we trying to
667 * use the erase-in-progress sector. */
668 map_write(map, CMD(0x30), chip->in_progress_block_addr);
669 chip->state = FL_ERASING;
670 chip->oldstate = FL_READY;
671 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
672 return -EIO;
675 mutex_unlock(&chip->mutex);
676 cfi_udelay(1);
677 mutex_lock(&chip->mutex);
678 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
679 So we can just loop here. */
681 chip->state = FL_READY;
682 return 0;
684 case FL_XIP_WHILE_ERASING:
685 if (mode != FL_READY && mode != FL_POINT &&
686 (!cfip || !(cfip->EraseSuspend&2)))
687 goto sleep;
688 chip->oldstate = chip->state;
689 chip->state = FL_READY;
690 return 0;
692 case FL_SHUTDOWN:
693 /* The machine is rebooting */
694 return -EIO;
696 case FL_POINT:
697 /* Only if there's no operation suspended... */
698 if (mode == FL_READY && chip->oldstate == FL_READY)
699 return 0;
701 default:
702 sleep:
703 set_current_state(TASK_UNINTERRUPTIBLE);
704 add_wait_queue(&chip->wq, &wait);
705 mutex_unlock(&chip->mutex);
706 schedule();
707 remove_wait_queue(&chip->wq, &wait);
708 mutex_lock(&chip->mutex);
709 goto resettime;
714 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
716 struct cfi_private *cfi = map->fldrv_priv;
718 switch(chip->oldstate) {
719 case FL_ERASING:
720 chip->state = chip->oldstate;
721 map_write(map, CMD(0x30), chip->in_progress_block_addr);
722 chip->oldstate = FL_READY;
723 chip->state = FL_ERASING;
724 break;
726 case FL_XIP_WHILE_ERASING:
727 chip->state = chip->oldstate;
728 chip->oldstate = FL_READY;
729 break;
731 case FL_READY:
732 case FL_STATUS:
733 /* We should really make set_vpp() count, rather than doing this */
734 DISABLE_VPP(map);
735 break;
736 default:
737 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
739 wake_up(&chip->wq);
742 #ifdef CONFIG_MTD_XIP
745 * No interrupt what so ever can be serviced while the flash isn't in array
746 * mode. This is ensured by the xip_disable() and xip_enable() functions
747 * enclosing any code path where the flash is known not to be in array mode.
748 * And within a XIP disabled code path, only functions marked with __xipram
749 * may be called and nothing else (it's a good thing to inspect generated
750 * assembly to make sure inline functions were actually inlined and that gcc
751 * didn't emit calls to its own support functions). Also configuring MTD CFI
752 * support to a single buswidth and a single interleave is also recommended.
755 static void xip_disable(struct map_info *map, struct flchip *chip,
756 unsigned long adr)
758 /* TODO: chips with no XIP use should ignore and return */
759 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
760 local_irq_disable();
763 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
764 unsigned long adr)
766 struct cfi_private *cfi = map->fldrv_priv;
768 if (chip->state != FL_POINT && chip->state != FL_READY) {
769 map_write(map, CMD(0xf0), adr);
770 chip->state = FL_READY;
772 (void) map_read(map, adr);
773 xip_iprefetch();
774 local_irq_enable();
778 * When a delay is required for the flash operation to complete, the
779 * xip_udelay() function is polling for both the given timeout and pending
780 * (but still masked) hardware interrupts. Whenever there is an interrupt
781 * pending then the flash erase operation is suspended, array mode restored
782 * and interrupts unmasked. Task scheduling might also happen at that
783 * point. The CPU eventually returns from the interrupt or the call to
784 * schedule() and the suspended flash operation is resumed for the remaining
785 * of the delay period.
787 * Warning: this function _will_ fool interrupt latency tracing tools.
790 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
791 unsigned long adr, int usec)
793 struct cfi_private *cfi = map->fldrv_priv;
794 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
795 map_word status, OK = CMD(0x80);
796 unsigned long suspended, start = xip_currtime();
797 flstate_t oldstate;
799 do {
800 cpu_relax();
801 if (xip_irqpending() && extp &&
802 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
803 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
805 * Let's suspend the erase operation when supported.
806 * Note that we currently don't try to suspend
807 * interleaved chips if there is already another
808 * operation suspended (imagine what happens
809 * when one chip was already done with the current
810 * operation while another chip suspended it, then
811 * we resume the whole thing at once). Yes, it
812 * can happen!
814 map_write(map, CMD(0xb0), adr);
815 usec -= xip_elapsed_since(start);
816 suspended = xip_currtime();
817 do {
818 if (xip_elapsed_since(suspended) > 100000) {
820 * The chip doesn't want to suspend
821 * after waiting for 100 msecs.
822 * This is a critical error but there
823 * is not much we can do here.
825 return;
827 status = map_read(map, adr);
828 } while (!map_word_andequal(map, status, OK, OK));
830 /* Suspend succeeded */
831 oldstate = chip->state;
832 if (!map_word_bitsset(map, status, CMD(0x40)))
833 break;
834 chip->state = FL_XIP_WHILE_ERASING;
835 chip->erase_suspended = 1;
836 map_write(map, CMD(0xf0), adr);
837 (void) map_read(map, adr);
838 xip_iprefetch();
839 local_irq_enable();
840 mutex_unlock(&chip->mutex);
841 xip_iprefetch();
842 cond_resched();
845 * We're back. However someone else might have
846 * decided to go write to the chip if we are in
847 * a suspended erase state. If so let's wait
848 * until it's done.
850 mutex_lock(&chip->mutex);
851 while (chip->state != FL_XIP_WHILE_ERASING) {
852 DECLARE_WAITQUEUE(wait, current);
853 set_current_state(TASK_UNINTERRUPTIBLE);
854 add_wait_queue(&chip->wq, &wait);
855 mutex_unlock(&chip->mutex);
856 schedule();
857 remove_wait_queue(&chip->wq, &wait);
858 mutex_lock(&chip->mutex);
860 /* Disallow XIP again */
861 local_irq_disable();
863 /* Resume the write or erase operation */
864 map_write(map, CMD(0x30), adr);
865 chip->state = oldstate;
866 start = xip_currtime();
867 } else if (usec >= 1000000/HZ) {
869 * Try to save on CPU power when waiting delay
870 * is at least a system timer tick period.
871 * No need to be extremely accurate here.
873 xip_cpu_idle();
875 status = map_read(map, adr);
876 } while (!map_word_andequal(map, status, OK, OK)
877 && xip_elapsed_since(start) < usec);
880 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
883 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
884 * the flash is actively programming or erasing since we have to poll for
885 * the operation to complete anyway. We can't do that in a generic way with
886 * a XIP setup so do it before the actual flash operation in this case
887 * and stub it out from INVALIDATE_CACHE_UDELAY.
889 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
890 INVALIDATE_CACHED_RANGE(map, from, size)
892 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
893 UDELAY(map, chip, adr, usec)
896 * Extra notes:
898 * Activating this XIP support changes the way the code works a bit. For
899 * example the code to suspend the current process when concurrent access
900 * happens is never executed because xip_udelay() will always return with the
901 * same chip state as it was entered with. This is why there is no care for
902 * the presence of add_wait_queue() or schedule() calls from within a couple
903 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
904 * The queueing and scheduling are always happening within xip_udelay().
906 * Similarly, get_chip() and put_chip() just happen to always be executed
907 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
908 * is in array mode, therefore never executing many cases therein and not
909 * causing any problem with XIP.
912 #else
914 #define xip_disable(map, chip, adr)
915 #define xip_enable(map, chip, adr)
916 #define XIP_INVAL_CACHED_RANGE(x...)
918 #define UDELAY(map, chip, adr, usec) \
919 do { \
920 mutex_unlock(&chip->mutex); \
921 cfi_udelay(usec); \
922 mutex_lock(&chip->mutex); \
923 } while (0)
925 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
926 do { \
927 mutex_unlock(&chip->mutex); \
928 INVALIDATE_CACHED_RANGE(map, adr, len); \
929 cfi_udelay(usec); \
930 mutex_lock(&chip->mutex); \
931 } while (0)
933 #endif
935 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
937 unsigned long cmd_addr;
938 struct cfi_private *cfi = map->fldrv_priv;
939 int ret;
941 adr += chip->start;
943 /* Ensure cmd read/writes are aligned. */
944 cmd_addr = adr & ~(map_bankwidth(map)-1);
946 mutex_lock(&chip->mutex);
947 ret = get_chip(map, chip, cmd_addr, FL_READY);
948 if (ret) {
949 mutex_unlock(&chip->mutex);
950 return ret;
953 if (chip->state != FL_POINT && chip->state != FL_READY) {
954 map_write(map, CMD(0xf0), cmd_addr);
955 chip->state = FL_READY;
958 map_copy_from(map, buf, adr, len);
960 put_chip(map, chip, cmd_addr);
962 mutex_unlock(&chip->mutex);
963 return 0;
967 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
969 struct map_info *map = mtd->priv;
970 struct cfi_private *cfi = map->fldrv_priv;
971 unsigned long ofs;
972 int chipnum;
973 int ret = 0;
975 /* ofs: offset within the first chip that the first read should start */
977 chipnum = (from >> cfi->chipshift);
978 ofs = from - (chipnum << cfi->chipshift);
981 *retlen = 0;
983 while (len) {
984 unsigned long thislen;
986 if (chipnum >= cfi->numchips)
987 break;
989 if ((len + ofs -1) >> cfi->chipshift)
990 thislen = (1<<cfi->chipshift) - ofs;
991 else
992 thislen = len;
994 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
995 if (ret)
996 break;
998 *retlen += thislen;
999 len -= thislen;
1000 buf += thislen;
1002 ofs = 0;
1003 chipnum++;
1005 return ret;
1009 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1011 DECLARE_WAITQUEUE(wait, current);
1012 unsigned long timeo = jiffies + HZ;
1013 struct cfi_private *cfi = map->fldrv_priv;
1015 retry:
1016 mutex_lock(&chip->mutex);
1018 if (chip->state != FL_READY){
1019 set_current_state(TASK_UNINTERRUPTIBLE);
1020 add_wait_queue(&chip->wq, &wait);
1022 mutex_unlock(&chip->mutex);
1024 schedule();
1025 remove_wait_queue(&chip->wq, &wait);
1026 timeo = jiffies + HZ;
1028 goto retry;
1031 adr += chip->start;
1033 chip->state = FL_READY;
1035 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1036 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1037 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1039 map_copy_from(map, buf, adr, len);
1041 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1042 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1043 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1044 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1046 wake_up(&chip->wq);
1047 mutex_unlock(&chip->mutex);
1049 return 0;
1052 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1054 struct map_info *map = mtd->priv;
1055 struct cfi_private *cfi = map->fldrv_priv;
1056 unsigned long ofs;
1057 int chipnum;
1058 int ret = 0;
1061 /* ofs: offset within the first chip that the first read should start */
1063 /* 8 secsi bytes per chip */
1064 chipnum=from>>3;
1065 ofs=from & 7;
1068 *retlen = 0;
1070 while (len) {
1071 unsigned long thislen;
1073 if (chipnum >= cfi->numchips)
1074 break;
1076 if ((len + ofs -1) >> 3)
1077 thislen = (1<<3) - ofs;
1078 else
1079 thislen = len;
1081 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1082 if (ret)
1083 break;
1085 *retlen += thislen;
1086 len -= thislen;
1087 buf += thislen;
1089 ofs = 0;
1090 chipnum++;
1092 return ret;
1096 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1098 struct cfi_private *cfi = map->fldrv_priv;
1099 unsigned long timeo = jiffies + HZ;
1101 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1102 * have a max write time of a few hundreds usec). However, we should
1103 * use the maximum timeout value given by the chip at probe time
1104 * instead. Unfortunately, struct flchip does have a field for
1105 * maximum timeout, only for typical which can be far too short
1106 * depending of the conditions. The ' + 1' is to avoid having a
1107 * timeout of 0 jiffies if HZ is smaller than 1000.
1109 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1110 int ret = 0;
1111 map_word oldd;
1112 int retry_cnt = 0;
1114 adr += chip->start;
1116 mutex_lock(&chip->mutex);
1117 ret = get_chip(map, chip, adr, FL_WRITING);
1118 if (ret) {
1119 mutex_unlock(&chip->mutex);
1120 return ret;
1123 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1124 __func__, adr, datum.x[0] );
1127 * Check for a NOP for the case when the datum to write is already
1128 * present - it saves time and works around buggy chips that corrupt
1129 * data at other locations when 0xff is written to a location that
1130 * already contains 0xff.
1132 oldd = map_read(map, adr);
1133 if (map_word_equal(map, oldd, datum)) {
1134 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1135 __func__);
1136 goto op_done;
1139 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1140 ENABLE_VPP(map);
1141 xip_disable(map, chip, adr);
1142 retry:
1143 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1144 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1145 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1146 map_write(map, datum, adr);
1147 chip->state = FL_WRITING;
1149 INVALIDATE_CACHE_UDELAY(map, chip,
1150 adr, map_bankwidth(map),
1151 chip->word_write_time);
1153 /* See comment above for timeout value. */
1154 timeo = jiffies + uWriteTimeout;
1155 for (;;) {
1156 if (chip->state != FL_WRITING) {
1157 /* Someone's suspended the write. Sleep */
1158 DECLARE_WAITQUEUE(wait, current);
1160 set_current_state(TASK_UNINTERRUPTIBLE);
1161 add_wait_queue(&chip->wq, &wait);
1162 mutex_unlock(&chip->mutex);
1163 schedule();
1164 remove_wait_queue(&chip->wq, &wait);
1165 timeo = jiffies + (HZ / 2);
1166 mutex_lock(&chip->mutex);
1167 continue;
1170 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1171 xip_enable(map, chip, adr);
1172 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1173 xip_disable(map, chip, adr);
1174 break;
1177 if (chip_ready(map, adr))
1178 break;
1180 /* Latency issues. Drop the lock, wait a while and retry */
1181 UDELAY(map, chip, adr, 1);
1183 /* Did we succeed? */
1184 if (!chip_good(map, adr, datum)) {
1185 /* reset on all failures. */
1186 map_write( map, CMD(0xF0), chip->start );
1188 if (++retry_cnt <= MAX_WORD_RETRIES)
1189 goto retry;
1191 ret = -EIO;
1193 xip_enable(map, chip, adr);
1194 op_done:
1195 chip->state = FL_READY;
1196 put_chip(map, chip, adr);
1197 mutex_unlock(&chip->mutex);
1199 return ret;
1203 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1204 size_t *retlen, const u_char *buf)
1206 struct map_info *map = mtd->priv;
1207 struct cfi_private *cfi = map->fldrv_priv;
1208 int ret = 0;
1209 int chipnum;
1210 unsigned long ofs, chipstart;
1211 DECLARE_WAITQUEUE(wait, current);
1213 *retlen = 0;
1214 if (!len)
1215 return 0;
1217 chipnum = to >> cfi->chipshift;
1218 ofs = to - (chipnum << cfi->chipshift);
1219 chipstart = cfi->chips[chipnum].start;
1221 /* If it's not bus-aligned, do the first byte write */
1222 if (ofs & (map_bankwidth(map)-1)) {
1223 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1224 int i = ofs - bus_ofs;
1225 int n = 0;
1226 map_word tmp_buf;
1228 retry:
1229 mutex_lock(&cfi->chips[chipnum].mutex);
1231 if (cfi->chips[chipnum].state != FL_READY) {
1232 set_current_state(TASK_UNINTERRUPTIBLE);
1233 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1235 mutex_unlock(&cfi->chips[chipnum].mutex);
1237 schedule();
1238 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1239 goto retry;
1242 /* Load 'tmp_buf' with old contents of flash */
1243 tmp_buf = map_read(map, bus_ofs+chipstart);
1245 mutex_unlock(&cfi->chips[chipnum].mutex);
1247 /* Number of bytes to copy from buffer */
1248 n = min_t(int, len, map_bankwidth(map)-i);
1250 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1252 ret = do_write_oneword(map, &cfi->chips[chipnum],
1253 bus_ofs, tmp_buf);
1254 if (ret)
1255 return ret;
1257 ofs += n;
1258 buf += n;
1259 (*retlen) += n;
1260 len -= n;
1262 if (ofs >> cfi->chipshift) {
1263 chipnum ++;
1264 ofs = 0;
1265 if (chipnum == cfi->numchips)
1266 return 0;
1270 /* We are now aligned, write as much as possible */
1271 while(len >= map_bankwidth(map)) {
1272 map_word datum;
1274 datum = map_word_load(map, buf);
1276 ret = do_write_oneword(map, &cfi->chips[chipnum],
1277 ofs, datum);
1278 if (ret)
1279 return ret;
1281 ofs += map_bankwidth(map);
1282 buf += map_bankwidth(map);
1283 (*retlen) += map_bankwidth(map);
1284 len -= map_bankwidth(map);
1286 if (ofs >> cfi->chipshift) {
1287 chipnum ++;
1288 ofs = 0;
1289 if (chipnum == cfi->numchips)
1290 return 0;
1291 chipstart = cfi->chips[chipnum].start;
1295 /* Write the trailing bytes if any */
1296 if (len & (map_bankwidth(map)-1)) {
1297 map_word tmp_buf;
1299 retry1:
1300 mutex_lock(&cfi->chips[chipnum].mutex);
1302 if (cfi->chips[chipnum].state != FL_READY) {
1303 set_current_state(TASK_UNINTERRUPTIBLE);
1304 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1306 mutex_unlock(&cfi->chips[chipnum].mutex);
1308 schedule();
1309 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1310 goto retry1;
1313 tmp_buf = map_read(map, ofs + chipstart);
1315 mutex_unlock(&cfi->chips[chipnum].mutex);
1317 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1319 ret = do_write_oneword(map, &cfi->chips[chipnum],
1320 ofs, tmp_buf);
1321 if (ret)
1322 return ret;
1324 (*retlen) += len;
1327 return 0;
1331 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1332 unsigned long adr, const u_char *buf,
1333 int len)
1335 struct cfi_private *cfi = map->fldrv_priv;
1336 unsigned long timeo = jiffies + HZ;
1337 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1338 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1339 int ret = -EIO;
1340 unsigned long cmd_adr;
1341 int z, words;
1342 map_word datum;
1344 adr += chip->start;
1345 cmd_adr = adr;
1347 mutex_lock(&chip->mutex);
1348 ret = get_chip(map, chip, adr, FL_WRITING);
1349 if (ret) {
1350 mutex_unlock(&chip->mutex);
1351 return ret;
1354 datum = map_word_load(map, buf);
1356 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1357 __func__, adr, datum.x[0] );
1359 XIP_INVAL_CACHED_RANGE(map, adr, len);
1360 ENABLE_VPP(map);
1361 xip_disable(map, chip, cmd_adr);
1363 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1364 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1365 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1367 /* Write Buffer Load */
1368 map_write(map, CMD(0x25), cmd_adr);
1370 chip->state = FL_WRITING_TO_BUFFER;
1372 /* Write length of data to come */
1373 words = len / map_bankwidth(map);
1374 map_write(map, CMD(words - 1), cmd_adr);
1375 /* Write data */
1376 z = 0;
1377 while(z < words * map_bankwidth(map)) {
1378 datum = map_word_load(map, buf);
1379 map_write(map, datum, adr + z);
1381 z += map_bankwidth(map);
1382 buf += map_bankwidth(map);
1384 z -= map_bankwidth(map);
1386 adr += z;
1388 /* Write Buffer Program Confirm: GO GO GO */
1389 map_write(map, CMD(0x29), cmd_adr);
1390 chip->state = FL_WRITING;
1392 INVALIDATE_CACHE_UDELAY(map, chip,
1393 adr, map_bankwidth(map),
1394 chip->word_write_time);
1396 timeo = jiffies + uWriteTimeout;
1398 for (;;) {
1399 if (chip->state != FL_WRITING) {
1400 /* Someone's suspended the write. Sleep */
1401 DECLARE_WAITQUEUE(wait, current);
1403 set_current_state(TASK_UNINTERRUPTIBLE);
1404 add_wait_queue(&chip->wq, &wait);
1405 mutex_unlock(&chip->mutex);
1406 schedule();
1407 remove_wait_queue(&chip->wq, &wait);
1408 timeo = jiffies + (HZ / 2);
1409 mutex_lock(&chip->mutex);
1410 continue;
1413 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1414 break;
1416 if (chip_ready(map, adr)) {
1417 xip_enable(map, chip, adr);
1418 goto op_done;
1421 /* Latency issues. Drop the lock, wait a while and retry */
1422 UDELAY(map, chip, adr, 1);
1425 /* reset on all failures. */
1426 map_write( map, CMD(0xF0), chip->start );
1427 xip_enable(map, chip, adr);
1429 printk(KERN_WARNING "MTD %s(): software timeout\n",
1430 __func__ );
1432 ret = -EIO;
1433 op_done:
1434 chip->state = FL_READY;
1435 put_chip(map, chip, adr);
1436 mutex_unlock(&chip->mutex);
1438 return ret;
1442 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1443 size_t *retlen, const u_char *buf)
1445 struct map_info *map = mtd->priv;
1446 struct cfi_private *cfi = map->fldrv_priv;
1447 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1448 int ret = 0;
1449 int chipnum;
1450 unsigned long ofs;
1452 *retlen = 0;
1453 if (!len)
1454 return 0;
1456 chipnum = to >> cfi->chipshift;
1457 ofs = to - (chipnum << cfi->chipshift);
1459 /* If it's not bus-aligned, do the first word write */
1460 if (ofs & (map_bankwidth(map)-1)) {
1461 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1462 if (local_len > len)
1463 local_len = len;
1464 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1465 local_len, retlen, buf);
1466 if (ret)
1467 return ret;
1468 ofs += local_len;
1469 buf += local_len;
1470 len -= local_len;
1472 if (ofs >> cfi->chipshift) {
1473 chipnum ++;
1474 ofs = 0;
1475 if (chipnum == cfi->numchips)
1476 return 0;
1480 /* Write buffer is worth it only if more than one word to write... */
1481 while (len >= map_bankwidth(map) * 2) {
1482 /* We must not cross write block boundaries */
1483 int size = wbufsize - (ofs & (wbufsize-1));
1485 if (size > len)
1486 size = len;
1487 if (size % map_bankwidth(map))
1488 size -= size % map_bankwidth(map);
1490 ret = do_write_buffer(map, &cfi->chips[chipnum],
1491 ofs, buf, size);
1492 if (ret)
1493 return ret;
1495 ofs += size;
1496 buf += size;
1497 (*retlen) += size;
1498 len -= size;
1500 if (ofs >> cfi->chipshift) {
1501 chipnum ++;
1502 ofs = 0;
1503 if (chipnum == cfi->numchips)
1504 return 0;
1508 if (len) {
1509 size_t retlen_dregs = 0;
1511 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1512 len, &retlen_dregs, buf);
1514 *retlen += retlen_dregs;
1515 return ret;
1518 return 0;
1523 * Handle devices with one erase region, that only implement
1524 * the chip erase command.
1526 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1528 struct cfi_private *cfi = map->fldrv_priv;
1529 unsigned long timeo = jiffies + HZ;
1530 unsigned long int adr;
1531 DECLARE_WAITQUEUE(wait, current);
1532 int ret = 0;
1534 adr = cfi->addr_unlock1;
1536 mutex_lock(&chip->mutex);
1537 ret = get_chip(map, chip, adr, FL_WRITING);
1538 if (ret) {
1539 mutex_unlock(&chip->mutex);
1540 return ret;
1543 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1544 __func__, chip->start );
1546 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1547 ENABLE_VPP(map);
1548 xip_disable(map, chip, adr);
1550 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1551 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1552 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1553 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1554 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1555 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1557 chip->state = FL_ERASING;
1558 chip->erase_suspended = 0;
1559 chip->in_progress_block_addr = adr;
1561 INVALIDATE_CACHE_UDELAY(map, chip,
1562 adr, map->size,
1563 chip->erase_time*500);
1565 timeo = jiffies + (HZ*20);
1567 for (;;) {
1568 if (chip->state != FL_ERASING) {
1569 /* Someone's suspended the erase. Sleep */
1570 set_current_state(TASK_UNINTERRUPTIBLE);
1571 add_wait_queue(&chip->wq, &wait);
1572 mutex_unlock(&chip->mutex);
1573 schedule();
1574 remove_wait_queue(&chip->wq, &wait);
1575 mutex_lock(&chip->mutex);
1576 continue;
1578 if (chip->erase_suspended) {
1579 /* This erase was suspended and resumed.
1580 Adjust the timeout */
1581 timeo = jiffies + (HZ*20);
1582 chip->erase_suspended = 0;
1585 if (chip_ready(map, adr))
1586 break;
1588 if (time_after(jiffies, timeo)) {
1589 printk(KERN_WARNING "MTD %s(): software timeout\n",
1590 __func__ );
1591 break;
1594 /* Latency issues. Drop the lock, wait a while and retry */
1595 UDELAY(map, chip, adr, 1000000/HZ);
1597 /* Did we succeed? */
1598 if (!chip_good(map, adr, map_word_ff(map))) {
1599 /* reset on all failures. */
1600 map_write( map, CMD(0xF0), chip->start );
1602 ret = -EIO;
1605 chip->state = FL_READY;
1606 xip_enable(map, chip, adr);
1607 put_chip(map, chip, adr);
1608 mutex_unlock(&chip->mutex);
1610 return ret;
1614 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1616 struct cfi_private *cfi = map->fldrv_priv;
1617 unsigned long timeo = jiffies + HZ;
1618 DECLARE_WAITQUEUE(wait, current);
1619 int ret = 0;
1621 adr += chip->start;
1623 mutex_lock(&chip->mutex);
1624 ret = get_chip(map, chip, adr, FL_ERASING);
1625 if (ret) {
1626 mutex_unlock(&chip->mutex);
1627 return ret;
1630 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1631 __func__, adr );
1633 XIP_INVAL_CACHED_RANGE(map, adr, len);
1634 ENABLE_VPP(map);
1635 xip_disable(map, chip, adr);
1637 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1638 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1639 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1640 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1641 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1642 map_write(map, CMD(0x30), adr);
1644 chip->state = FL_ERASING;
1645 chip->erase_suspended = 0;
1646 chip->in_progress_block_addr = adr;
1648 INVALIDATE_CACHE_UDELAY(map, chip,
1649 adr, len,
1650 chip->erase_time*500);
1652 timeo = jiffies + (HZ*20);
1654 for (;;) {
1655 if (chip->state != FL_ERASING) {
1656 /* Someone's suspended the erase. Sleep */
1657 set_current_state(TASK_UNINTERRUPTIBLE);
1658 add_wait_queue(&chip->wq, &wait);
1659 mutex_unlock(&chip->mutex);
1660 schedule();
1661 remove_wait_queue(&chip->wq, &wait);
1662 mutex_lock(&chip->mutex);
1663 continue;
1665 if (chip->erase_suspended) {
1666 /* This erase was suspended and resumed.
1667 Adjust the timeout */
1668 timeo = jiffies + (HZ*20);
1669 chip->erase_suspended = 0;
1672 if (chip_ready(map, adr)) {
1673 xip_enable(map, chip, adr);
1674 break;
1677 if (time_after(jiffies, timeo)) {
1678 xip_enable(map, chip, adr);
1679 printk(KERN_WARNING "MTD %s(): software timeout\n",
1680 __func__ );
1681 break;
1684 /* Latency issues. Drop the lock, wait a while and retry */
1685 UDELAY(map, chip, adr, 1000000/HZ);
1687 /* Did we succeed? */
1688 if (!chip_good(map, adr, map_word_ff(map))) {
1689 /* reset on all failures. */
1690 map_write( map, CMD(0xF0), chip->start );
1692 ret = -EIO;
1695 chip->state = FL_READY;
1696 put_chip(map, chip, adr);
1697 mutex_unlock(&chip->mutex);
1698 return ret;
1702 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1704 unsigned long ofs, len;
1705 int ret;
1707 ofs = instr->addr;
1708 len = instr->len;
1710 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1711 if (ret)
1712 return ret;
1714 instr->state = MTD_ERASE_DONE;
1715 mtd_erase_callback(instr);
1717 return 0;
1721 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1723 struct map_info *map = mtd->priv;
1724 struct cfi_private *cfi = map->fldrv_priv;
1725 int ret = 0;
1727 if (instr->addr != 0)
1728 return -EINVAL;
1730 if (instr->len != mtd->size)
1731 return -EINVAL;
1733 ret = do_erase_chip(map, &cfi->chips[0]);
1734 if (ret)
1735 return ret;
1737 instr->state = MTD_ERASE_DONE;
1738 mtd_erase_callback(instr);
1740 return 0;
1743 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1744 unsigned long adr, int len, void *thunk)
1746 struct cfi_private *cfi = map->fldrv_priv;
1747 int ret;
1749 mutex_lock(&chip->mutex);
1750 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1751 if (ret)
1752 goto out_unlock;
1753 chip->state = FL_LOCKING;
1755 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1756 __func__, adr, len);
1758 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1759 cfi->device_type, NULL);
1760 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1761 cfi->device_type, NULL);
1762 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1763 cfi->device_type, NULL);
1764 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1765 cfi->device_type, NULL);
1766 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1767 cfi->device_type, NULL);
1768 map_write(map, CMD(0x40), chip->start + adr);
1770 chip->state = FL_READY;
1771 put_chip(map, chip, adr + chip->start);
1772 ret = 0;
1774 out_unlock:
1775 mutex_unlock(&chip->mutex);
1776 return ret;
1779 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1780 unsigned long adr, int len, void *thunk)
1782 struct cfi_private *cfi = map->fldrv_priv;
1783 int ret;
1785 mutex_lock(&chip->mutex);
1786 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1787 if (ret)
1788 goto out_unlock;
1789 chip->state = FL_UNLOCKING;
1791 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1792 __func__, adr, len);
1794 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1795 cfi->device_type, NULL);
1796 map_write(map, CMD(0x70), adr);
1798 chip->state = FL_READY;
1799 put_chip(map, chip, adr + chip->start);
1800 ret = 0;
1802 out_unlock:
1803 mutex_unlock(&chip->mutex);
1804 return ret;
1807 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1809 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1812 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1814 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1818 static void cfi_amdstd_sync (struct mtd_info *mtd)
1820 struct map_info *map = mtd->priv;
1821 struct cfi_private *cfi = map->fldrv_priv;
1822 int i;
1823 struct flchip *chip;
1824 int ret = 0;
1825 DECLARE_WAITQUEUE(wait, current);
1827 for (i=0; !ret && i<cfi->numchips; i++) {
1828 chip = &cfi->chips[i];
1830 retry:
1831 mutex_lock(&chip->mutex);
1833 switch(chip->state) {
1834 case FL_READY:
1835 case FL_STATUS:
1836 case FL_CFI_QUERY:
1837 case FL_JEDEC_QUERY:
1838 chip->oldstate = chip->state;
1839 chip->state = FL_SYNCING;
1840 /* No need to wake_up() on this state change -
1841 * as the whole point is that nobody can do anything
1842 * with the chip now anyway.
1844 case FL_SYNCING:
1845 mutex_unlock(&chip->mutex);
1846 break;
1848 default:
1849 /* Not an idle state */
1850 set_current_state(TASK_UNINTERRUPTIBLE);
1851 add_wait_queue(&chip->wq, &wait);
1853 mutex_unlock(&chip->mutex);
1855 schedule();
1857 remove_wait_queue(&chip->wq, &wait);
1859 goto retry;
1863 /* Unlock the chips again */
1865 for (i--; i >=0; i--) {
1866 chip = &cfi->chips[i];
1868 mutex_lock(&chip->mutex);
1870 if (chip->state == FL_SYNCING) {
1871 chip->state = chip->oldstate;
1872 wake_up(&chip->wq);
1874 mutex_unlock(&chip->mutex);
1879 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1881 struct map_info *map = mtd->priv;
1882 struct cfi_private *cfi = map->fldrv_priv;
1883 int i;
1884 struct flchip *chip;
1885 int ret = 0;
1887 for (i=0; !ret && i<cfi->numchips; i++) {
1888 chip = &cfi->chips[i];
1890 mutex_lock(&chip->mutex);
1892 switch(chip->state) {
1893 case FL_READY:
1894 case FL_STATUS:
1895 case FL_CFI_QUERY:
1896 case FL_JEDEC_QUERY:
1897 chip->oldstate = chip->state;
1898 chip->state = FL_PM_SUSPENDED;
1899 /* No need to wake_up() on this state change -
1900 * as the whole point is that nobody can do anything
1901 * with the chip now anyway.
1903 case FL_PM_SUSPENDED:
1904 break;
1906 default:
1907 ret = -EAGAIN;
1908 break;
1910 mutex_unlock(&chip->mutex);
1913 /* Unlock the chips again */
1915 if (ret) {
1916 for (i--; i >=0; i--) {
1917 chip = &cfi->chips[i];
1919 mutex_lock(&chip->mutex);
1921 if (chip->state == FL_PM_SUSPENDED) {
1922 chip->state = chip->oldstate;
1923 wake_up(&chip->wq);
1925 mutex_unlock(&chip->mutex);
1929 return ret;
1933 static void cfi_amdstd_resume(struct mtd_info *mtd)
1935 struct map_info *map = mtd->priv;
1936 struct cfi_private *cfi = map->fldrv_priv;
1937 int i;
1938 struct flchip *chip;
1940 for (i=0; i<cfi->numchips; i++) {
1942 chip = &cfi->chips[i];
1944 mutex_lock(&chip->mutex);
1946 if (chip->state == FL_PM_SUSPENDED) {
1947 chip->state = FL_READY;
1948 map_write(map, CMD(0xF0), chip->start);
1949 wake_up(&chip->wq);
1951 else
1952 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1954 mutex_unlock(&chip->mutex);
1960 * Ensure that the flash device is put back into read array mode before
1961 * unloading the driver or rebooting. On some systems, rebooting while
1962 * the flash is in query/program/erase mode will prevent the CPU from
1963 * fetching the bootloader code, requiring a hard reset or power cycle.
1965 static int cfi_amdstd_reset(struct mtd_info *mtd)
1967 struct map_info *map = mtd->priv;
1968 struct cfi_private *cfi = map->fldrv_priv;
1969 int i, ret;
1970 struct flchip *chip;
1972 for (i = 0; i < cfi->numchips; i++) {
1974 chip = &cfi->chips[i];
1976 mutex_lock(&chip->mutex);
1978 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
1979 if (!ret) {
1980 map_write(map, CMD(0xF0), chip->start);
1981 chip->state = FL_SHUTDOWN;
1982 put_chip(map, chip, chip->start);
1985 mutex_unlock(&chip->mutex);
1988 return 0;
1992 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
1993 void *v)
1995 struct mtd_info *mtd;
1997 mtd = container_of(nb, struct mtd_info, reboot_notifier);
1998 cfi_amdstd_reset(mtd);
1999 return NOTIFY_DONE;
2003 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2005 struct map_info *map = mtd->priv;
2006 struct cfi_private *cfi = map->fldrv_priv;
2008 cfi_amdstd_reset(mtd);
2009 unregister_reboot_notifier(&mtd->reboot_notifier);
2010 kfree(cfi->cmdset_priv);
2011 kfree(cfi->cfiq);
2012 kfree(cfi);
2013 kfree(mtd->eraseregions);
2016 MODULE_LICENSE("GPL");
2017 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2018 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2019 MODULE_ALIAS("cfi_cmdset_0006");
2020 MODULE_ALIAS("cfi_cmdset_0701");