2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6
51 static int cfi_amdstd_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
52 static int cfi_amdstd_write_words(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
53 static int cfi_amdstd_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
54 static int cfi_amdstd_erase_chip(struct mtd_info
*, struct erase_info
*);
55 static int cfi_amdstd_erase_varsize(struct mtd_info
*, struct erase_info
*);
56 static void cfi_amdstd_sync (struct mtd_info
*);
57 static int cfi_amdstd_suspend (struct mtd_info
*);
58 static void cfi_amdstd_resume (struct mtd_info
*);
59 static int cfi_amdstd_reboot(struct notifier_block
*, unsigned long, void *);
60 static int cfi_amdstd_secsi_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
62 static void cfi_amdstd_destroy(struct mtd_info
*);
64 struct mtd_info
*cfi_cmdset_0002(struct map_info
*, int);
65 static struct mtd_info
*cfi_amdstd_setup (struct mtd_info
*);
67 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
);
68 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
);
71 static int cfi_atmel_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
72 static int cfi_atmel_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
74 static struct mtd_chip_driver cfi_amdstd_chipdrv
= {
75 .probe
= NULL
, /* Not usable directly */
76 .destroy
= cfi_amdstd_destroy
,
77 .name
= "cfi_cmdset_0002",
82 /* #define DEBUG_CFI_FEATURES */
85 #ifdef DEBUG_CFI_FEATURES
86 static void cfi_tell_features(struct cfi_pri_amdstd
*extp
)
88 const char* erase_suspend
[3] = {
89 "Not supported", "Read only", "Read/write"
91 const char* top_bottom
[6] = {
92 "No WP", "8x8KiB sectors at top & bottom, no WP",
93 "Bottom boot", "Top boot",
94 "Uniform, Bottom WP", "Uniform, Top WP"
97 printk(" Silicon revision: %d\n", extp
->SiliconRevision
>> 1);
98 printk(" Address sensitive unlock: %s\n",
99 (extp
->SiliconRevision
& 1) ? "Not required" : "Required");
101 if (extp
->EraseSuspend
< ARRAY_SIZE(erase_suspend
))
102 printk(" Erase Suspend: %s\n", erase_suspend
[extp
->EraseSuspend
]);
104 printk(" Erase Suspend: Unknown value %d\n", extp
->EraseSuspend
);
106 if (extp
->BlkProt
== 0)
107 printk(" Block protection: Not supported\n");
109 printk(" Block protection: %d sectors per group\n", extp
->BlkProt
);
112 printk(" Temporary block unprotect: %s\n",
113 extp
->TmpBlkUnprotect
? "Supported" : "Not supported");
114 printk(" Block protect/unprotect scheme: %d\n", extp
->BlkProtUnprot
);
115 printk(" Number of simultaneous operations: %d\n", extp
->SimultaneousOps
);
116 printk(" Burst mode: %s\n",
117 extp
->BurstMode
? "Supported" : "Not supported");
118 if (extp
->PageMode
== 0)
119 printk(" Page mode: Not supported\n");
121 printk(" Page mode: %d word page\n", extp
->PageMode
<< 2);
123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124 extp
->VppMin
>> 4, extp
->VppMin
& 0xf);
125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126 extp
->VppMax
>> 4, extp
->VppMax
& 0xf);
128 if (extp
->TopBottom
< ARRAY_SIZE(top_bottom
))
129 printk(" Top/Bottom Boot Block: %s\n", top_bottom
[extp
->TopBottom
]);
131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp
->TopBottom
);
135 #ifdef AMD_BOOTLOC_BUG
136 /* Wheee. Bring me the head of someone at AMD. */
137 static void fixup_amd_bootblock(struct mtd_info
*mtd
, void* param
)
139 struct map_info
*map
= mtd
->priv
;
140 struct cfi_private
*cfi
= map
->fldrv_priv
;
141 struct cfi_pri_amdstd
*extp
= cfi
->cmdset_priv
;
142 __u8 major
= extp
->MajorVersion
;
143 __u8 minor
= extp
->MinorVersion
;
145 if (((major
<< 8) | minor
) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */
148 DEBUG(MTD_DEBUG_LEVEL1
,
149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
150 map
->name
, cfi
->mfr
, cfi
->id
);
152 /* AFAICS all 29LV400 with a bottom boot block have a device ID
153 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
154 * These were badly detected as they have the 0x80 bit set
155 * so treat them as a special case.
157 if (((cfi
->id
== 0xBA) || (cfi
->id
== 0x22BA)) &&
159 /* Macronix added CFI to their 2nd generation
160 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
161 * Fujitsu, Spansion, EON, ESI and older Macronix)
164 * Therefore also check the manufacturer.
165 * This reduces the risk of false detection due to
166 * the 8-bit device ID.
168 (cfi
->mfr
== CFI_MFR_MACRONIX
)) {
169 DEBUG(MTD_DEBUG_LEVEL1
,
170 "%s: Macronix MX29LV400C with bottom boot block"
171 " detected\n", map
->name
);
172 extp
->TopBottom
= 2; /* bottom boot */
174 if (cfi
->id
& 0x80) {
175 printk(KERN_WARNING
"%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map
->name
, cfi
->id
);
176 extp
->TopBottom
= 3; /* top boot */
178 extp
->TopBottom
= 2; /* bottom boot */
181 DEBUG(MTD_DEBUG_LEVEL1
,
182 "%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map
->name
, major
, minor
,
184 extp
->TopBottom
== 2 ? "bottom" : "top");
189 static void fixup_use_write_buffers(struct mtd_info
*mtd
, void *param
)
191 struct map_info
*map
= mtd
->priv
;
192 struct cfi_private
*cfi
= map
->fldrv_priv
;
193 if (cfi
->cfiq
->BufWriteTimeoutTyp
) {
194 DEBUG(MTD_DEBUG_LEVEL1
, "Using buffer write method\n" );
195 mtd
->write
= cfi_amdstd_write_buffers
;
199 /* Atmel chips don't use the same PRI format as AMD chips */
200 static void fixup_convert_atmel_pri(struct mtd_info
*mtd
, void *param
)
202 struct map_info
*map
= mtd
->priv
;
203 struct cfi_private
*cfi
= map
->fldrv_priv
;
204 struct cfi_pri_amdstd
*extp
= cfi
->cmdset_priv
;
205 struct cfi_pri_atmel atmel_pri
;
207 memcpy(&atmel_pri
, extp
, sizeof(atmel_pri
));
208 memset((char *)extp
+ 5, 0, sizeof(*extp
) - 5);
210 if (atmel_pri
.Features
& 0x02)
211 extp
->EraseSuspend
= 2;
213 /* Some chips got it backwards... */
214 if (cfi
->id
== AT49BV6416
) {
215 if (atmel_pri
.BottomBoot
)
220 if (atmel_pri
.BottomBoot
)
226 /* burst write mode not supported */
227 cfi
->cfiq
->BufWriteTimeoutTyp
= 0;
228 cfi
->cfiq
->BufWriteTimeoutMax
= 0;
231 static void fixup_use_secsi(struct mtd_info
*mtd
, void *param
)
233 /* Setup for chips with a secsi area */
234 mtd
->read_user_prot_reg
= cfi_amdstd_secsi_read
;
235 mtd
->read_fact_prot_reg
= cfi_amdstd_secsi_read
;
238 static void fixup_use_erase_chip(struct mtd_info
*mtd
, void *param
)
240 struct map_info
*map
= mtd
->priv
;
241 struct cfi_private
*cfi
= map
->fldrv_priv
;
242 if ((cfi
->cfiq
->NumEraseRegions
== 1) &&
243 ((cfi
->cfiq
->EraseRegionInfo
[0] & 0xffff) == 0)) {
244 mtd
->erase
= cfi_amdstd_erase_chip
;
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
253 static void fixup_use_atmel_lock(struct mtd_info
*mtd
, void *param
)
255 mtd
->lock
= cfi_atmel_lock
;
256 mtd
->unlock
= cfi_atmel_unlock
;
257 mtd
->flags
|= MTD_POWERUP_LOCK
;
260 static void fixup_old_sst_eraseregion(struct mtd_info
*mtd
)
262 struct map_info
*map
= mtd
->priv
;
263 struct cfi_private
*cfi
= map
->fldrv_priv
;
266 * These flashes report two seperate eraseblock regions based on the
267 * sector_erase-size and block_erase-size, although they both operate on the
268 * same memory. This is not allowed according to CFI, so we just pick the
271 cfi
->cfiq
->NumEraseRegions
= 1;
274 static void fixup_sst39vf(struct mtd_info
*mtd
, void *param
)
276 struct map_info
*map
= mtd
->priv
;
277 struct cfi_private
*cfi
= map
->fldrv_priv
;
279 fixup_old_sst_eraseregion(mtd
);
281 cfi
->addr_unlock1
= 0x5555;
282 cfi
->addr_unlock2
= 0x2AAA;
285 static void fixup_sst39vf_rev_b(struct mtd_info
*mtd
, void *param
)
287 struct map_info
*map
= mtd
->priv
;
288 struct cfi_private
*cfi
= map
->fldrv_priv
;
290 fixup_old_sst_eraseregion(mtd
);
292 cfi
->addr_unlock1
= 0x555;
293 cfi
->addr_unlock2
= 0x2AA;
296 static void fixup_s29gl064n_sectors(struct mtd_info
*mtd
, void *param
)
298 struct map_info
*map
= mtd
->priv
;
299 struct cfi_private
*cfi
= map
->fldrv_priv
;
301 if ((cfi
->cfiq
->EraseRegionInfo
[0] & 0xffff) == 0x003f) {
302 cfi
->cfiq
->EraseRegionInfo
[0] |= 0x0040;
303 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd
->name
);
307 static void fixup_s29gl032n_sectors(struct mtd_info
*mtd
, void *param
)
309 struct map_info
*map
= mtd
->priv
;
310 struct cfi_private
*cfi
= map
->fldrv_priv
;
312 if ((cfi
->cfiq
->EraseRegionInfo
[1] & 0xffff) == 0x007e) {
313 cfi
->cfiq
->EraseRegionInfo
[1] &= ~0x0040;
314 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd
->name
);
318 /* Used to fix CFI-Tables of chips without Extended Query Tables */
319 static struct cfi_fixup cfi_nopri_fixup_table
[] = {
320 { CFI_MFR_SST
, 0x234A, fixup_sst39vf
, NULL
, }, // SST39VF1602
321 { CFI_MFR_SST
, 0x234B, fixup_sst39vf
, NULL
, }, // SST39VF1601
322 { CFI_MFR_SST
, 0x235A, fixup_sst39vf
, NULL
, }, // SST39VF3202
323 { CFI_MFR_SST
, 0x235B, fixup_sst39vf
, NULL
, }, // SST39VF3201
324 { CFI_MFR_SST
, 0x235C, fixup_sst39vf_rev_b
, NULL
, }, // SST39VF3202B
325 { CFI_MFR_SST
, 0x235D, fixup_sst39vf_rev_b
, NULL
, }, // SST39VF3201B
326 { CFI_MFR_SST
, 0x236C, fixup_sst39vf_rev_b
, NULL
, }, // SST39VF6402B
327 { CFI_MFR_SST
, 0x236D, fixup_sst39vf_rev_b
, NULL
, }, // SST39VF6401B
331 static struct cfi_fixup cfi_fixup_table
[] = {
332 { CFI_MFR_ATMEL
, CFI_ID_ANY
, fixup_convert_atmel_pri
, NULL
},
333 #ifdef AMD_BOOTLOC_BUG
334 { CFI_MFR_AMD
, CFI_ID_ANY
, fixup_amd_bootblock
, NULL
},
335 { CFI_MFR_MACRONIX
, CFI_ID_ANY
, fixup_amd_bootblock
, NULL
},
337 { CFI_MFR_AMD
, 0x0050, fixup_use_secsi
, NULL
, },
338 { CFI_MFR_AMD
, 0x0053, fixup_use_secsi
, NULL
, },
339 { CFI_MFR_AMD
, 0x0055, fixup_use_secsi
, NULL
, },
340 { CFI_MFR_AMD
, 0x0056, fixup_use_secsi
, NULL
, },
341 { CFI_MFR_AMD
, 0x005C, fixup_use_secsi
, NULL
, },
342 { CFI_MFR_AMD
, 0x005F, fixup_use_secsi
, NULL
, },
343 { CFI_MFR_AMD
, 0x0c01, fixup_s29gl064n_sectors
, NULL
, },
344 { CFI_MFR_AMD
, 0x1301, fixup_s29gl064n_sectors
, NULL
, },
345 { CFI_MFR_AMD
, 0x1a00, fixup_s29gl032n_sectors
, NULL
, },
346 { CFI_MFR_AMD
, 0x1a01, fixup_s29gl032n_sectors
, NULL
, },
347 #if !FORCE_WORD_WRITE
348 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_write_buffers
, NULL
, },
352 static struct cfi_fixup jedec_fixup_table
[] = {
353 { CFI_MFR_SST
, SST49LF004B
, fixup_use_fwh_lock
, NULL
, },
354 { CFI_MFR_SST
, SST49LF040B
, fixup_use_fwh_lock
, NULL
, },
355 { CFI_MFR_SST
, SST49LF008A
, fixup_use_fwh_lock
, NULL
, },
359 static struct cfi_fixup fixup_table
[] = {
360 /* The CFI vendor ids and the JEDEC vendor IDs appear
361 * to be common. It is like the devices id's are as
362 * well. This table is to pick all cases where
363 * we know that is the case.
365 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_erase_chip
, NULL
},
366 { CFI_MFR_ATMEL
, AT49BV6416
, fixup_use_atmel_lock
, NULL
},
371 static void cfi_fixup_major_minor(struct cfi_private
*cfi
,
372 struct cfi_pri_amdstd
*extp
)
374 if (cfi
->mfr
== CFI_MFR_SAMSUNG
&& cfi
->id
== 0x257e &&
375 extp
->MajorVersion
== '0')
376 extp
->MajorVersion
= '1';
379 struct mtd_info
*cfi_cmdset_0002(struct map_info
*map
, int primary
)
381 struct cfi_private
*cfi
= map
->fldrv_priv
;
382 struct mtd_info
*mtd
;
385 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
387 printk(KERN_WARNING
"Failed to allocate memory for MTD device\n");
391 mtd
->type
= MTD_NORFLASH
;
393 /* Fill in the default mtd operations */
394 mtd
->erase
= cfi_amdstd_erase_varsize
;
395 mtd
->write
= cfi_amdstd_write_words
;
396 mtd
->read
= cfi_amdstd_read
;
397 mtd
->sync
= cfi_amdstd_sync
;
398 mtd
->suspend
= cfi_amdstd_suspend
;
399 mtd
->resume
= cfi_amdstd_resume
;
400 mtd
->flags
= MTD_CAP_NORFLASH
;
401 mtd
->name
= map
->name
;
404 mtd
->reboot_notifier
.notifier_call
= cfi_amdstd_reboot
;
406 if (cfi
->cfi_mode
==CFI_MODE_CFI
){
407 unsigned char bootloc
;
408 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
409 struct cfi_pri_amdstd
*extp
;
411 extp
= (struct cfi_pri_amdstd
*)cfi_read_pri(map
, adr
, sizeof(*extp
), "Amd/Fujitsu");
414 * It's a real CFI chip, not one for which the probe
415 * routine faked a CFI structure.
417 cfi_fixup_major_minor(cfi
, extp
);
420 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
421 * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19
422 * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf
423 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
425 if (extp
->MajorVersion
!= '1' ||
426 (extp
->MajorVersion
== '1' && (extp
->MinorVersion
< '0' || extp
->MinorVersion
> '4'))) {
427 printk(KERN_ERR
" Unknown Amd/Fujitsu Extended Query "
428 "version %c.%c (%#02x/%#02x).\n",
429 extp
->MajorVersion
, extp
->MinorVersion
,
430 extp
->MajorVersion
, extp
->MinorVersion
);
436 printk(KERN_INFO
" Amd/Fujitsu Extended Query version %c.%c.\n",
437 extp
->MajorVersion
, extp
->MinorVersion
);
439 /* Install our own private info structure */
440 cfi
->cmdset_priv
= extp
;
442 /* Apply cfi device specific fixups */
443 cfi_fixup(mtd
, cfi_fixup_table
);
445 #ifdef DEBUG_CFI_FEATURES
446 /* Tell the user about it in lots of lovely detail */
447 cfi_tell_features(extp
);
450 bootloc
= extp
->TopBottom
;
451 if ((bootloc
< 2) || (bootloc
> 5)) {
452 printk(KERN_WARNING
"%s: CFI contains unrecognised boot "
453 "bank location (%d). Assuming bottom.\n",
458 if (bootloc
== 3 && cfi
->cfiq
->NumEraseRegions
> 1) {
459 printk(KERN_WARNING
"%s: Swapping erase regions for top-boot CFI table.\n", map
->name
);
461 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
/ 2; i
++) {
462 int j
= (cfi
->cfiq
->NumEraseRegions
-1)-i
;
465 swap
= cfi
->cfiq
->EraseRegionInfo
[i
];
466 cfi
->cfiq
->EraseRegionInfo
[i
] = cfi
->cfiq
->EraseRegionInfo
[j
];
467 cfi
->cfiq
->EraseRegionInfo
[j
] = swap
;
470 /* Set the default CFI lock/unlock addresses */
471 cfi
->addr_unlock1
= 0x555;
472 cfi
->addr_unlock2
= 0x2aa;
474 cfi_fixup(mtd
, cfi_nopri_fixup_table
);
476 if (!cfi
->addr_unlock1
|| !cfi
->addr_unlock2
) {
482 else if (cfi
->cfi_mode
== CFI_MODE_JEDEC
) {
483 /* Apply jedec specific fixups */
484 cfi_fixup(mtd
, jedec_fixup_table
);
486 /* Apply generic fixups */
487 cfi_fixup(mtd
, fixup_table
);
489 for (i
=0; i
< cfi
->numchips
; i
++) {
490 cfi
->chips
[i
].word_write_time
= 1<<cfi
->cfiq
->WordWriteTimeoutTyp
;
491 cfi
->chips
[i
].buffer_write_time
= 1<<cfi
->cfiq
->BufWriteTimeoutTyp
;
492 cfi
->chips
[i
].erase_time
= 1<<cfi
->cfiq
->BlockEraseTimeoutTyp
;
493 cfi
->chips
[i
].ref_point_counter
= 0;
494 init_waitqueue_head(&(cfi
->chips
[i
].wq
));
497 map
->fldrv
= &cfi_amdstd_chipdrv
;
499 return cfi_amdstd_setup(mtd
);
501 struct mtd_info
*cfi_cmdset_0006(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0002")));
502 struct mtd_info
*cfi_cmdset_0701(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0002")));
503 EXPORT_SYMBOL_GPL(cfi_cmdset_0002
);
504 EXPORT_SYMBOL_GPL(cfi_cmdset_0006
);
505 EXPORT_SYMBOL_GPL(cfi_cmdset_0701
);
507 static struct mtd_info
*cfi_amdstd_setup(struct mtd_info
*mtd
)
509 struct map_info
*map
= mtd
->priv
;
510 struct cfi_private
*cfi
= map
->fldrv_priv
;
511 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
512 unsigned long offset
= 0;
515 printk(KERN_NOTICE
"number of %s chips: %d\n",
516 (cfi
->cfi_mode
== CFI_MODE_CFI
)?"CFI":"JEDEC",cfi
->numchips
);
517 /* Select the correct geometry setup */
518 mtd
->size
= devsize
* cfi
->numchips
;
520 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
521 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
522 * mtd
->numeraseregions
, GFP_KERNEL
);
523 if (!mtd
->eraseregions
) {
524 printk(KERN_WARNING
"Failed to allocate memory for MTD erase region info\n");
528 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
529 unsigned long ernum
, ersize
;
530 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
531 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
533 if (mtd
->erasesize
< ersize
) {
534 mtd
->erasesize
= ersize
;
536 for (j
=0; j
<cfi
->numchips
; j
++) {
537 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
538 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
539 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
541 offset
+= (ersize
* ernum
);
543 if (offset
!= devsize
) {
545 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
549 __module_get(THIS_MODULE
);
550 register_reboot_notifier(&mtd
->reboot_notifier
);
554 kfree(mtd
->eraseregions
);
556 kfree(cfi
->cmdset_priv
);
562 * Return true if the chip is ready.
564 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
565 * non-suspended sector) and is indicated by no toggle bits toggling.
567 * Note that anything more complicated than checking if no bits are toggling
568 * (including checking DQ5 for an error status) is tricky to get working
569 * correctly and is therefore not done (particulary with interleaved chips
570 * as each chip must be checked independantly of the others).
572 static int __xipram
chip_ready(struct map_info
*map
, unsigned long addr
)
576 d
= map_read(map
, addr
);
577 t
= map_read(map
, addr
);
579 return map_word_equal(map
, d
, t
);
583 * Return true if the chip is ready and has the correct value.
585 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
586 * non-suspended sector) and it is indicated by no bits toggling.
588 * Error are indicated by toggling bits or bits held with the wrong value,
589 * or with bits toggling.
591 * Note that anything more complicated than checking if no bits are toggling
592 * (including checking DQ5 for an error status) is tricky to get working
593 * correctly and is therefore not done (particulary with interleaved chips
594 * as each chip must be checked independantly of the others).
597 static int __xipram
chip_good(struct map_info
*map
, unsigned long addr
, map_word expected
)
601 oldd
= map_read(map
, addr
);
602 curd
= map_read(map
, addr
);
604 return map_word_equal(map
, oldd
, curd
) &&
605 map_word_equal(map
, curd
, expected
);
608 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
610 DECLARE_WAITQUEUE(wait
, current
);
611 struct cfi_private
*cfi
= map
->fldrv_priv
;
613 struct cfi_pri_amdstd
*cfip
= (struct cfi_pri_amdstd
*)cfi
->cmdset_priv
;
616 timeo
= jiffies
+ HZ
;
618 switch (chip
->state
) {
622 if (chip_ready(map
, adr
))
625 if (time_after(jiffies
, timeo
)) {
626 printk(KERN_ERR
"Waiting for chip to be ready timed out.\n");
629 mutex_unlock(&chip
->mutex
);
631 mutex_lock(&chip
->mutex
);
632 /* Someone else might have been playing with it. */
642 if (!cfip
|| !(cfip
->EraseSuspend
& (0x1|0x2)) ||
643 !(mode
== FL_READY
|| mode
== FL_POINT
||
644 (mode
== FL_WRITING
&& (cfip
->EraseSuspend
& 0x2))))
647 /* We could check to see if we're trying to access the sector
648 * that is currently being erased. However, no user will try
649 * anything like that so we just wait for the timeout. */
652 /* It's harmless to issue the Erase-Suspend and Erase-Resume
653 * commands when the erase algorithm isn't in progress. */
654 map_write(map
, CMD(0xB0), chip
->in_progress_block_addr
);
655 chip
->oldstate
= FL_ERASING
;
656 chip
->state
= FL_ERASE_SUSPENDING
;
657 chip
->erase_suspended
= 1;
659 if (chip_ready(map
, adr
))
662 if (time_after(jiffies
, timeo
)) {
663 /* Should have suspended the erase by now.
664 * Send an Erase-Resume command as either
665 * there was an error (so leave the erase
666 * routine to recover from it) or we trying to
667 * use the erase-in-progress sector. */
668 map_write(map
, CMD(0x30), chip
->in_progress_block_addr
);
669 chip
->state
= FL_ERASING
;
670 chip
->oldstate
= FL_READY
;
671 printk(KERN_ERR
"MTD %s(): chip not ready after erase suspend\n", __func__
);
675 mutex_unlock(&chip
->mutex
);
677 mutex_lock(&chip
->mutex
);
678 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
679 So we can just loop here. */
681 chip
->state
= FL_READY
;
684 case FL_XIP_WHILE_ERASING
:
685 if (mode
!= FL_READY
&& mode
!= FL_POINT
&&
686 (!cfip
|| !(cfip
->EraseSuspend
&2)))
688 chip
->oldstate
= chip
->state
;
689 chip
->state
= FL_READY
;
693 /* The machine is rebooting */
697 /* Only if there's no operation suspended... */
698 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
703 set_current_state(TASK_UNINTERRUPTIBLE
);
704 add_wait_queue(&chip
->wq
, &wait
);
705 mutex_unlock(&chip
->mutex
);
707 remove_wait_queue(&chip
->wq
, &wait
);
708 mutex_lock(&chip
->mutex
);
714 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
716 struct cfi_private
*cfi
= map
->fldrv_priv
;
718 switch(chip
->oldstate
) {
720 chip
->state
= chip
->oldstate
;
721 map_write(map
, CMD(0x30), chip
->in_progress_block_addr
);
722 chip
->oldstate
= FL_READY
;
723 chip
->state
= FL_ERASING
;
726 case FL_XIP_WHILE_ERASING
:
727 chip
->state
= chip
->oldstate
;
728 chip
->oldstate
= FL_READY
;
733 /* We should really make set_vpp() count, rather than doing this */
737 printk(KERN_ERR
"MTD: put_chip() called with oldstate %d!!\n", chip
->oldstate
);
742 #ifdef CONFIG_MTD_XIP
745 * No interrupt what so ever can be serviced while the flash isn't in array
746 * mode. This is ensured by the xip_disable() and xip_enable() functions
747 * enclosing any code path where the flash is known not to be in array mode.
748 * And within a XIP disabled code path, only functions marked with __xipram
749 * may be called and nothing else (it's a good thing to inspect generated
750 * assembly to make sure inline functions were actually inlined and that gcc
751 * didn't emit calls to its own support functions). Also configuring MTD CFI
752 * support to a single buswidth and a single interleave is also recommended.
755 static void xip_disable(struct map_info
*map
, struct flchip
*chip
,
758 /* TODO: chips with no XIP use should ignore and return */
759 (void) map_read(map
, adr
); /* ensure mmu mapping is up to date */
763 static void __xipram
xip_enable(struct map_info
*map
, struct flchip
*chip
,
766 struct cfi_private
*cfi
= map
->fldrv_priv
;
768 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
769 map_write(map
, CMD(0xf0), adr
);
770 chip
->state
= FL_READY
;
772 (void) map_read(map
, adr
);
778 * When a delay is required for the flash operation to complete, the
779 * xip_udelay() function is polling for both the given timeout and pending
780 * (but still masked) hardware interrupts. Whenever there is an interrupt
781 * pending then the flash erase operation is suspended, array mode restored
782 * and interrupts unmasked. Task scheduling might also happen at that
783 * point. The CPU eventually returns from the interrupt or the call to
784 * schedule() and the suspended flash operation is resumed for the remaining
785 * of the delay period.
787 * Warning: this function _will_ fool interrupt latency tracing tools.
790 static void __xipram
xip_udelay(struct map_info
*map
, struct flchip
*chip
,
791 unsigned long adr
, int usec
)
793 struct cfi_private
*cfi
= map
->fldrv_priv
;
794 struct cfi_pri_amdstd
*extp
= cfi
->cmdset_priv
;
795 map_word status
, OK
= CMD(0x80);
796 unsigned long suspended
, start
= xip_currtime();
801 if (xip_irqpending() && extp
&&
802 ((chip
->state
== FL_ERASING
&& (extp
->EraseSuspend
& 2))) &&
803 (cfi_interleave_is_1(cfi
) || chip
->oldstate
== FL_READY
)) {
805 * Let's suspend the erase operation when supported.
806 * Note that we currently don't try to suspend
807 * interleaved chips if there is already another
808 * operation suspended (imagine what happens
809 * when one chip was already done with the current
810 * operation while another chip suspended it, then
811 * we resume the whole thing at once). Yes, it
814 map_write(map
, CMD(0xb0), adr
);
815 usec
-= xip_elapsed_since(start
);
816 suspended
= xip_currtime();
818 if (xip_elapsed_since(suspended
) > 100000) {
820 * The chip doesn't want to suspend
821 * after waiting for 100 msecs.
822 * This is a critical error but there
823 * is not much we can do here.
827 status
= map_read(map
, adr
);
828 } while (!map_word_andequal(map
, status
, OK
, OK
));
830 /* Suspend succeeded */
831 oldstate
= chip
->state
;
832 if (!map_word_bitsset(map
, status
, CMD(0x40)))
834 chip
->state
= FL_XIP_WHILE_ERASING
;
835 chip
->erase_suspended
= 1;
836 map_write(map
, CMD(0xf0), adr
);
837 (void) map_read(map
, adr
);
840 mutex_unlock(&chip
->mutex
);
845 * We're back. However someone else might have
846 * decided to go write to the chip if we are in
847 * a suspended erase state. If so let's wait
850 mutex_lock(&chip
->mutex
);
851 while (chip
->state
!= FL_XIP_WHILE_ERASING
) {
852 DECLARE_WAITQUEUE(wait
, current
);
853 set_current_state(TASK_UNINTERRUPTIBLE
);
854 add_wait_queue(&chip
->wq
, &wait
);
855 mutex_unlock(&chip
->mutex
);
857 remove_wait_queue(&chip
->wq
, &wait
);
858 mutex_lock(&chip
->mutex
);
860 /* Disallow XIP again */
863 /* Resume the write or erase operation */
864 map_write(map
, CMD(0x30), adr
);
865 chip
->state
= oldstate
;
866 start
= xip_currtime();
867 } else if (usec
>= 1000000/HZ
) {
869 * Try to save on CPU power when waiting delay
870 * is at least a system timer tick period.
871 * No need to be extremely accurate here.
875 status
= map_read(map
, adr
);
876 } while (!map_word_andequal(map
, status
, OK
, OK
)
877 && xip_elapsed_since(start
) < usec
);
880 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
883 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
884 * the flash is actively programming or erasing since we have to poll for
885 * the operation to complete anyway. We can't do that in a generic way with
886 * a XIP setup so do it before the actual flash operation in this case
887 * and stub it out from INVALIDATE_CACHE_UDELAY.
889 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
890 INVALIDATE_CACHED_RANGE(map, from, size)
892 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
893 UDELAY(map, chip, adr, usec)
898 * Activating this XIP support changes the way the code works a bit. For
899 * example the code to suspend the current process when concurrent access
900 * happens is never executed because xip_udelay() will always return with the
901 * same chip state as it was entered with. This is why there is no care for
902 * the presence of add_wait_queue() or schedule() calls from within a couple
903 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
904 * The queueing and scheduling are always happening within xip_udelay().
906 * Similarly, get_chip() and put_chip() just happen to always be executed
907 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
908 * is in array mode, therefore never executing many cases therein and not
909 * causing any problem with XIP.
914 #define xip_disable(map, chip, adr)
915 #define xip_enable(map, chip, adr)
916 #define XIP_INVAL_CACHED_RANGE(x...)
918 #define UDELAY(map, chip, adr, usec) \
920 mutex_unlock(&chip->mutex); \
922 mutex_lock(&chip->mutex); \
925 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
927 mutex_unlock(&chip->mutex); \
928 INVALIDATE_CACHED_RANGE(map, adr, len); \
930 mutex_lock(&chip->mutex); \
935 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
937 unsigned long cmd_addr
;
938 struct cfi_private
*cfi
= map
->fldrv_priv
;
943 /* Ensure cmd read/writes are aligned. */
944 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
946 mutex_lock(&chip
->mutex
);
947 ret
= get_chip(map
, chip
, cmd_addr
, FL_READY
);
949 mutex_unlock(&chip
->mutex
);
953 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
954 map_write(map
, CMD(0xf0), cmd_addr
);
955 chip
->state
= FL_READY
;
958 map_copy_from(map
, buf
, adr
, len
);
960 put_chip(map
, chip
, cmd_addr
);
962 mutex_unlock(&chip
->mutex
);
967 static int cfi_amdstd_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
969 struct map_info
*map
= mtd
->priv
;
970 struct cfi_private
*cfi
= map
->fldrv_priv
;
975 /* ofs: offset within the first chip that the first read should start */
977 chipnum
= (from
>> cfi
->chipshift
);
978 ofs
= from
- (chipnum
<< cfi
->chipshift
);
984 unsigned long thislen
;
986 if (chipnum
>= cfi
->numchips
)
989 if ((len
+ ofs
-1) >> cfi
->chipshift
)
990 thislen
= (1<<cfi
->chipshift
) - ofs
;
994 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
1009 static inline int do_read_secsi_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
1011 DECLARE_WAITQUEUE(wait
, current
);
1012 unsigned long timeo
= jiffies
+ HZ
;
1013 struct cfi_private
*cfi
= map
->fldrv_priv
;
1016 mutex_lock(&chip
->mutex
);
1018 if (chip
->state
!= FL_READY
){
1019 set_current_state(TASK_UNINTERRUPTIBLE
);
1020 add_wait_queue(&chip
->wq
, &wait
);
1022 mutex_unlock(&chip
->mutex
);
1025 remove_wait_queue(&chip
->wq
, &wait
);
1026 timeo
= jiffies
+ HZ
;
1033 chip
->state
= FL_READY
;
1035 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1036 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1037 cfi_send_gen_cmd(0x88, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1039 map_copy_from(map
, buf
, adr
, len
);
1041 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1042 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1043 cfi_send_gen_cmd(0x90, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1044 cfi_send_gen_cmd(0x00, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1047 mutex_unlock(&chip
->mutex
);
1052 static int cfi_amdstd_secsi_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
1054 struct map_info
*map
= mtd
->priv
;
1055 struct cfi_private
*cfi
= map
->fldrv_priv
;
1061 /* ofs: offset within the first chip that the first read should start */
1063 /* 8 secsi bytes per chip */
1071 unsigned long thislen
;
1073 if (chipnum
>= cfi
->numchips
)
1076 if ((len
+ ofs
-1) >> 3)
1077 thislen
= (1<<3) - ofs
;
1081 ret
= do_read_secsi_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
1096 static int __xipram
do_write_oneword(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, map_word datum
)
1098 struct cfi_private
*cfi
= map
->fldrv_priv
;
1099 unsigned long timeo
= jiffies
+ HZ
;
1101 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1102 * have a max write time of a few hundreds usec). However, we should
1103 * use the maximum timeout value given by the chip at probe time
1104 * instead. Unfortunately, struct flchip does have a field for
1105 * maximum timeout, only for typical which can be far too short
1106 * depending of the conditions. The ' + 1' is to avoid having a
1107 * timeout of 0 jiffies if HZ is smaller than 1000.
1109 unsigned long uWriteTimeout
= ( HZ
/ 1000 ) + 1;
1116 mutex_lock(&chip
->mutex
);
1117 ret
= get_chip(map
, chip
, adr
, FL_WRITING
);
1119 mutex_unlock(&chip
->mutex
);
1123 DEBUG( MTD_DEBUG_LEVEL3
, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1124 __func__
, adr
, datum
.x
[0] );
1127 * Check for a NOP for the case when the datum to write is already
1128 * present - it saves time and works around buggy chips that corrupt
1129 * data at other locations when 0xff is written to a location that
1130 * already contains 0xff.
1132 oldd
= map_read(map
, adr
);
1133 if (map_word_equal(map
, oldd
, datum
)) {
1134 DEBUG( MTD_DEBUG_LEVEL3
, "MTD %s(): NOP\n",
1139 XIP_INVAL_CACHED_RANGE(map
, adr
, map_bankwidth(map
));
1141 xip_disable(map
, chip
, adr
);
1143 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1144 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1145 cfi_send_gen_cmd(0xA0, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1146 map_write(map
, datum
, adr
);
1147 chip
->state
= FL_WRITING
;
1149 INVALIDATE_CACHE_UDELAY(map
, chip
,
1150 adr
, map_bankwidth(map
),
1151 chip
->word_write_time
);
1153 /* See comment above for timeout value. */
1154 timeo
= jiffies
+ uWriteTimeout
;
1156 if (chip
->state
!= FL_WRITING
) {
1157 /* Someone's suspended the write. Sleep */
1158 DECLARE_WAITQUEUE(wait
, current
);
1160 set_current_state(TASK_UNINTERRUPTIBLE
);
1161 add_wait_queue(&chip
->wq
, &wait
);
1162 mutex_unlock(&chip
->mutex
);
1164 remove_wait_queue(&chip
->wq
, &wait
);
1165 timeo
= jiffies
+ (HZ
/ 2);
1166 mutex_lock(&chip
->mutex
);
1170 if (time_after(jiffies
, timeo
) && !chip_ready(map
, adr
)){
1171 xip_enable(map
, chip
, adr
);
1172 printk(KERN_WARNING
"MTD %s(): software timeout\n", __func__
);
1173 xip_disable(map
, chip
, adr
);
1177 if (chip_ready(map
, adr
))
1180 /* Latency issues. Drop the lock, wait a while and retry */
1181 UDELAY(map
, chip
, adr
, 1);
1183 /* Did we succeed? */
1184 if (!chip_good(map
, adr
, datum
)) {
1185 /* reset on all failures. */
1186 map_write( map
, CMD(0xF0), chip
->start
);
1188 if (++retry_cnt
<= MAX_WORD_RETRIES
)
1193 xip_enable(map
, chip
, adr
);
1195 chip
->state
= FL_READY
;
1196 put_chip(map
, chip
, adr
);
1197 mutex_unlock(&chip
->mutex
);
1203 static int cfi_amdstd_write_words(struct mtd_info
*mtd
, loff_t to
, size_t len
,
1204 size_t *retlen
, const u_char
*buf
)
1206 struct map_info
*map
= mtd
->priv
;
1207 struct cfi_private
*cfi
= map
->fldrv_priv
;
1210 unsigned long ofs
, chipstart
;
1211 DECLARE_WAITQUEUE(wait
, current
);
1217 chipnum
= to
>> cfi
->chipshift
;
1218 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1219 chipstart
= cfi
->chips
[chipnum
].start
;
1221 /* If it's not bus-aligned, do the first byte write */
1222 if (ofs
& (map_bankwidth(map
)-1)) {
1223 unsigned long bus_ofs
= ofs
& ~(map_bankwidth(map
)-1);
1224 int i
= ofs
- bus_ofs
;
1229 mutex_lock(&cfi
->chips
[chipnum
].mutex
);
1231 if (cfi
->chips
[chipnum
].state
!= FL_READY
) {
1232 set_current_state(TASK_UNINTERRUPTIBLE
);
1233 add_wait_queue(&cfi
->chips
[chipnum
].wq
, &wait
);
1235 mutex_unlock(&cfi
->chips
[chipnum
].mutex
);
1238 remove_wait_queue(&cfi
->chips
[chipnum
].wq
, &wait
);
1242 /* Load 'tmp_buf' with old contents of flash */
1243 tmp_buf
= map_read(map
, bus_ofs
+chipstart
);
1245 mutex_unlock(&cfi
->chips
[chipnum
].mutex
);
1247 /* Number of bytes to copy from buffer */
1248 n
= min_t(int, len
, map_bankwidth(map
)-i
);
1250 tmp_buf
= map_word_load_partial(map
, tmp_buf
, buf
, i
, n
);
1252 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1262 if (ofs
>> cfi
->chipshift
) {
1265 if (chipnum
== cfi
->numchips
)
1270 /* We are now aligned, write as much as possible */
1271 while(len
>= map_bankwidth(map
)) {
1274 datum
= map_word_load(map
, buf
);
1276 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1281 ofs
+= map_bankwidth(map
);
1282 buf
+= map_bankwidth(map
);
1283 (*retlen
) += map_bankwidth(map
);
1284 len
-= map_bankwidth(map
);
1286 if (ofs
>> cfi
->chipshift
) {
1289 if (chipnum
== cfi
->numchips
)
1291 chipstart
= cfi
->chips
[chipnum
].start
;
1295 /* Write the trailing bytes if any */
1296 if (len
& (map_bankwidth(map
)-1)) {
1300 mutex_lock(&cfi
->chips
[chipnum
].mutex
);
1302 if (cfi
->chips
[chipnum
].state
!= FL_READY
) {
1303 set_current_state(TASK_UNINTERRUPTIBLE
);
1304 add_wait_queue(&cfi
->chips
[chipnum
].wq
, &wait
);
1306 mutex_unlock(&cfi
->chips
[chipnum
].mutex
);
1309 remove_wait_queue(&cfi
->chips
[chipnum
].wq
, &wait
);
1313 tmp_buf
= map_read(map
, ofs
+ chipstart
);
1315 mutex_unlock(&cfi
->chips
[chipnum
].mutex
);
1317 tmp_buf
= map_word_load_partial(map
, tmp_buf
, buf
, 0, len
);
1319 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1331 static int __xipram
do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
1332 unsigned long adr
, const u_char
*buf
,
1335 struct cfi_private
*cfi
= map
->fldrv_priv
;
1336 unsigned long timeo
= jiffies
+ HZ
;
1337 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1338 unsigned long uWriteTimeout
= ( HZ
/ 1000 ) + 1;
1340 unsigned long cmd_adr
;
1347 mutex_lock(&chip
->mutex
);
1348 ret
= get_chip(map
, chip
, adr
, FL_WRITING
);
1350 mutex_unlock(&chip
->mutex
);
1354 datum
= map_word_load(map
, buf
);
1356 DEBUG( MTD_DEBUG_LEVEL3
, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1357 __func__
, adr
, datum
.x
[0] );
1359 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1361 xip_disable(map
, chip
, cmd_adr
);
1363 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1364 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1365 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1367 /* Write Buffer Load */
1368 map_write(map
, CMD(0x25), cmd_adr
);
1370 chip
->state
= FL_WRITING_TO_BUFFER
;
1372 /* Write length of data to come */
1373 words
= len
/ map_bankwidth(map
);
1374 map_write(map
, CMD(words
- 1), cmd_adr
);
1377 while(z
< words
* map_bankwidth(map
)) {
1378 datum
= map_word_load(map
, buf
);
1379 map_write(map
, datum
, adr
+ z
);
1381 z
+= map_bankwidth(map
);
1382 buf
+= map_bankwidth(map
);
1384 z
-= map_bankwidth(map
);
1388 /* Write Buffer Program Confirm: GO GO GO */
1389 map_write(map
, CMD(0x29), cmd_adr
);
1390 chip
->state
= FL_WRITING
;
1392 INVALIDATE_CACHE_UDELAY(map
, chip
,
1393 adr
, map_bankwidth(map
),
1394 chip
->word_write_time
);
1396 timeo
= jiffies
+ uWriteTimeout
;
1399 if (chip
->state
!= FL_WRITING
) {
1400 /* Someone's suspended the write. Sleep */
1401 DECLARE_WAITQUEUE(wait
, current
);
1403 set_current_state(TASK_UNINTERRUPTIBLE
);
1404 add_wait_queue(&chip
->wq
, &wait
);
1405 mutex_unlock(&chip
->mutex
);
1407 remove_wait_queue(&chip
->wq
, &wait
);
1408 timeo
= jiffies
+ (HZ
/ 2);
1409 mutex_lock(&chip
->mutex
);
1413 if (time_after(jiffies
, timeo
) && !chip_ready(map
, adr
))
1416 if (chip_ready(map
, adr
)) {
1417 xip_enable(map
, chip
, adr
);
1421 /* Latency issues. Drop the lock, wait a while and retry */
1422 UDELAY(map
, chip
, adr
, 1);
1425 /* reset on all failures. */
1426 map_write( map
, CMD(0xF0), chip
->start
);
1427 xip_enable(map
, chip
, adr
);
1429 printk(KERN_WARNING
"MTD %s(): software timeout\n",
1434 chip
->state
= FL_READY
;
1435 put_chip(map
, chip
, adr
);
1436 mutex_unlock(&chip
->mutex
);
1442 static int cfi_amdstd_write_buffers(struct mtd_info
*mtd
, loff_t to
, size_t len
,
1443 size_t *retlen
, const u_char
*buf
)
1445 struct map_info
*map
= mtd
->priv
;
1446 struct cfi_private
*cfi
= map
->fldrv_priv
;
1447 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1456 chipnum
= to
>> cfi
->chipshift
;
1457 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1459 /* If it's not bus-aligned, do the first word write */
1460 if (ofs
& (map_bankwidth(map
)-1)) {
1461 size_t local_len
= (-ofs
)&(map_bankwidth(map
)-1);
1462 if (local_len
> len
)
1464 ret
= cfi_amdstd_write_words(mtd
, ofs
+ (chipnum
<<cfi
->chipshift
),
1465 local_len
, retlen
, buf
);
1472 if (ofs
>> cfi
->chipshift
) {
1475 if (chipnum
== cfi
->numchips
)
1480 /* Write buffer is worth it only if more than one word to write... */
1481 while (len
>= map_bankwidth(map
) * 2) {
1482 /* We must not cross write block boundaries */
1483 int size
= wbufsize
- (ofs
& (wbufsize
-1));
1487 if (size
% map_bankwidth(map
))
1488 size
-= size
% map_bankwidth(map
);
1490 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
1500 if (ofs
>> cfi
->chipshift
) {
1503 if (chipnum
== cfi
->numchips
)
1509 size_t retlen_dregs
= 0;
1511 ret
= cfi_amdstd_write_words(mtd
, ofs
+ (chipnum
<<cfi
->chipshift
),
1512 len
, &retlen_dregs
, buf
);
1514 *retlen
+= retlen_dregs
;
1523 * Handle devices with one erase region, that only implement
1524 * the chip erase command.
1526 static int __xipram
do_erase_chip(struct map_info
*map
, struct flchip
*chip
)
1528 struct cfi_private
*cfi
= map
->fldrv_priv
;
1529 unsigned long timeo
= jiffies
+ HZ
;
1530 unsigned long int adr
;
1531 DECLARE_WAITQUEUE(wait
, current
);
1534 adr
= cfi
->addr_unlock1
;
1536 mutex_lock(&chip
->mutex
);
1537 ret
= get_chip(map
, chip
, adr
, FL_WRITING
);
1539 mutex_unlock(&chip
->mutex
);
1543 DEBUG( MTD_DEBUG_LEVEL3
, "MTD %s(): ERASE 0x%.8lx\n",
1544 __func__
, chip
->start
);
1546 XIP_INVAL_CACHED_RANGE(map
, adr
, map
->size
);
1548 xip_disable(map
, chip
, adr
);
1550 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1551 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1552 cfi_send_gen_cmd(0x80, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1553 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1554 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1555 cfi_send_gen_cmd(0x10, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1557 chip
->state
= FL_ERASING
;
1558 chip
->erase_suspended
= 0;
1559 chip
->in_progress_block_addr
= adr
;
1561 INVALIDATE_CACHE_UDELAY(map
, chip
,
1563 chip
->erase_time
*500);
1565 timeo
= jiffies
+ (HZ
*20);
1568 if (chip
->state
!= FL_ERASING
) {
1569 /* Someone's suspended the erase. Sleep */
1570 set_current_state(TASK_UNINTERRUPTIBLE
);
1571 add_wait_queue(&chip
->wq
, &wait
);
1572 mutex_unlock(&chip
->mutex
);
1574 remove_wait_queue(&chip
->wq
, &wait
);
1575 mutex_lock(&chip
->mutex
);
1578 if (chip
->erase_suspended
) {
1579 /* This erase was suspended and resumed.
1580 Adjust the timeout */
1581 timeo
= jiffies
+ (HZ
*20);
1582 chip
->erase_suspended
= 0;
1585 if (chip_ready(map
, adr
))
1588 if (time_after(jiffies
, timeo
)) {
1589 printk(KERN_WARNING
"MTD %s(): software timeout\n",
1594 /* Latency issues. Drop the lock, wait a while and retry */
1595 UDELAY(map
, chip
, adr
, 1000000/HZ
);
1597 /* Did we succeed? */
1598 if (!chip_good(map
, adr
, map_word_ff(map
))) {
1599 /* reset on all failures. */
1600 map_write( map
, CMD(0xF0), chip
->start
);
1605 chip
->state
= FL_READY
;
1606 xip_enable(map
, chip
, adr
);
1607 put_chip(map
, chip
, adr
);
1608 mutex_unlock(&chip
->mutex
);
1614 static int __xipram
do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int len
, void *thunk
)
1616 struct cfi_private
*cfi
= map
->fldrv_priv
;
1617 unsigned long timeo
= jiffies
+ HZ
;
1618 DECLARE_WAITQUEUE(wait
, current
);
1623 mutex_lock(&chip
->mutex
);
1624 ret
= get_chip(map
, chip
, adr
, FL_ERASING
);
1626 mutex_unlock(&chip
->mutex
);
1630 DEBUG( MTD_DEBUG_LEVEL3
, "MTD %s(): ERASE 0x%.8lx\n",
1633 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1635 xip_disable(map
, chip
, adr
);
1637 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1638 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1639 cfi_send_gen_cmd(0x80, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1640 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1641 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
1642 map_write(map
, CMD(0x30), adr
);
1644 chip
->state
= FL_ERASING
;
1645 chip
->erase_suspended
= 0;
1646 chip
->in_progress_block_addr
= adr
;
1648 INVALIDATE_CACHE_UDELAY(map
, chip
,
1650 chip
->erase_time
*500);
1652 timeo
= jiffies
+ (HZ
*20);
1655 if (chip
->state
!= FL_ERASING
) {
1656 /* Someone's suspended the erase. Sleep */
1657 set_current_state(TASK_UNINTERRUPTIBLE
);
1658 add_wait_queue(&chip
->wq
, &wait
);
1659 mutex_unlock(&chip
->mutex
);
1661 remove_wait_queue(&chip
->wq
, &wait
);
1662 mutex_lock(&chip
->mutex
);
1665 if (chip
->erase_suspended
) {
1666 /* This erase was suspended and resumed.
1667 Adjust the timeout */
1668 timeo
= jiffies
+ (HZ
*20);
1669 chip
->erase_suspended
= 0;
1672 if (chip_ready(map
, adr
)) {
1673 xip_enable(map
, chip
, adr
);
1677 if (time_after(jiffies
, timeo
)) {
1678 xip_enable(map
, chip
, adr
);
1679 printk(KERN_WARNING
"MTD %s(): software timeout\n",
1684 /* Latency issues. Drop the lock, wait a while and retry */
1685 UDELAY(map
, chip
, adr
, 1000000/HZ
);
1687 /* Did we succeed? */
1688 if (!chip_good(map
, adr
, map_word_ff(map
))) {
1689 /* reset on all failures. */
1690 map_write( map
, CMD(0xF0), chip
->start
);
1695 chip
->state
= FL_READY
;
1696 put_chip(map
, chip
, adr
);
1697 mutex_unlock(&chip
->mutex
);
1702 static int cfi_amdstd_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
1704 unsigned long ofs
, len
;
1710 ret
= cfi_varsize_frob(mtd
, do_erase_oneblock
, ofs
, len
, NULL
);
1714 instr
->state
= MTD_ERASE_DONE
;
1715 mtd_erase_callback(instr
);
1721 static int cfi_amdstd_erase_chip(struct mtd_info
*mtd
, struct erase_info
*instr
)
1723 struct map_info
*map
= mtd
->priv
;
1724 struct cfi_private
*cfi
= map
->fldrv_priv
;
1727 if (instr
->addr
!= 0)
1730 if (instr
->len
!= mtd
->size
)
1733 ret
= do_erase_chip(map
, &cfi
->chips
[0]);
1737 instr
->state
= MTD_ERASE_DONE
;
1738 mtd_erase_callback(instr
);
1743 static int do_atmel_lock(struct map_info
*map
, struct flchip
*chip
,
1744 unsigned long adr
, int len
, void *thunk
)
1746 struct cfi_private
*cfi
= map
->fldrv_priv
;
1749 mutex_lock(&chip
->mutex
);
1750 ret
= get_chip(map
, chip
, adr
+ chip
->start
, FL_LOCKING
);
1753 chip
->state
= FL_LOCKING
;
1755 DEBUG(MTD_DEBUG_LEVEL3
, "MTD %s(): LOCK 0x%08lx len %d\n",
1756 __func__
, adr
, len
);
1758 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
,
1759 cfi
->device_type
, NULL
);
1760 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
,
1761 cfi
->device_type
, NULL
);
1762 cfi_send_gen_cmd(0x80, cfi
->addr_unlock1
, chip
->start
, map
, cfi
,
1763 cfi
->device_type
, NULL
);
1764 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
,
1765 cfi
->device_type
, NULL
);
1766 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
,
1767 cfi
->device_type
, NULL
);
1768 map_write(map
, CMD(0x40), chip
->start
+ adr
);
1770 chip
->state
= FL_READY
;
1771 put_chip(map
, chip
, adr
+ chip
->start
);
1775 mutex_unlock(&chip
->mutex
);
1779 static int do_atmel_unlock(struct map_info
*map
, struct flchip
*chip
,
1780 unsigned long adr
, int len
, void *thunk
)
1782 struct cfi_private
*cfi
= map
->fldrv_priv
;
1785 mutex_lock(&chip
->mutex
);
1786 ret
= get_chip(map
, chip
, adr
+ chip
->start
, FL_UNLOCKING
);
1789 chip
->state
= FL_UNLOCKING
;
1791 DEBUG(MTD_DEBUG_LEVEL3
, "MTD %s(): LOCK 0x%08lx len %d\n",
1792 __func__
, adr
, len
);
1794 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
,
1795 cfi
->device_type
, NULL
);
1796 map_write(map
, CMD(0x70), adr
);
1798 chip
->state
= FL_READY
;
1799 put_chip(map
, chip
, adr
+ chip
->start
);
1803 mutex_unlock(&chip
->mutex
);
1807 static int cfi_atmel_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
1809 return cfi_varsize_frob(mtd
, do_atmel_lock
, ofs
, len
, NULL
);
1812 static int cfi_atmel_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
1814 return cfi_varsize_frob(mtd
, do_atmel_unlock
, ofs
, len
, NULL
);
1818 static void cfi_amdstd_sync (struct mtd_info
*mtd
)
1820 struct map_info
*map
= mtd
->priv
;
1821 struct cfi_private
*cfi
= map
->fldrv_priv
;
1823 struct flchip
*chip
;
1825 DECLARE_WAITQUEUE(wait
, current
);
1827 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1828 chip
= &cfi
->chips
[i
];
1831 mutex_lock(&chip
->mutex
);
1833 switch(chip
->state
) {
1837 case FL_JEDEC_QUERY
:
1838 chip
->oldstate
= chip
->state
;
1839 chip
->state
= FL_SYNCING
;
1840 /* No need to wake_up() on this state change -
1841 * as the whole point is that nobody can do anything
1842 * with the chip now anyway.
1845 mutex_unlock(&chip
->mutex
);
1849 /* Not an idle state */
1850 set_current_state(TASK_UNINTERRUPTIBLE
);
1851 add_wait_queue(&chip
->wq
, &wait
);
1853 mutex_unlock(&chip
->mutex
);
1857 remove_wait_queue(&chip
->wq
, &wait
);
1863 /* Unlock the chips again */
1865 for (i
--; i
>=0; i
--) {
1866 chip
= &cfi
->chips
[i
];
1868 mutex_lock(&chip
->mutex
);
1870 if (chip
->state
== FL_SYNCING
) {
1871 chip
->state
= chip
->oldstate
;
1874 mutex_unlock(&chip
->mutex
);
1879 static int cfi_amdstd_suspend(struct mtd_info
*mtd
)
1881 struct map_info
*map
= mtd
->priv
;
1882 struct cfi_private
*cfi
= map
->fldrv_priv
;
1884 struct flchip
*chip
;
1887 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1888 chip
= &cfi
->chips
[i
];
1890 mutex_lock(&chip
->mutex
);
1892 switch(chip
->state
) {
1896 case FL_JEDEC_QUERY
:
1897 chip
->oldstate
= chip
->state
;
1898 chip
->state
= FL_PM_SUSPENDED
;
1899 /* No need to wake_up() on this state change -
1900 * as the whole point is that nobody can do anything
1901 * with the chip now anyway.
1903 case FL_PM_SUSPENDED
:
1910 mutex_unlock(&chip
->mutex
);
1913 /* Unlock the chips again */
1916 for (i
--; i
>=0; i
--) {
1917 chip
= &cfi
->chips
[i
];
1919 mutex_lock(&chip
->mutex
);
1921 if (chip
->state
== FL_PM_SUSPENDED
) {
1922 chip
->state
= chip
->oldstate
;
1925 mutex_unlock(&chip
->mutex
);
1933 static void cfi_amdstd_resume(struct mtd_info
*mtd
)
1935 struct map_info
*map
= mtd
->priv
;
1936 struct cfi_private
*cfi
= map
->fldrv_priv
;
1938 struct flchip
*chip
;
1940 for (i
=0; i
<cfi
->numchips
; i
++) {
1942 chip
= &cfi
->chips
[i
];
1944 mutex_lock(&chip
->mutex
);
1946 if (chip
->state
== FL_PM_SUSPENDED
) {
1947 chip
->state
= FL_READY
;
1948 map_write(map
, CMD(0xF0), chip
->start
);
1952 printk(KERN_ERR
"Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1954 mutex_unlock(&chip
->mutex
);
1960 * Ensure that the flash device is put back into read array mode before
1961 * unloading the driver or rebooting. On some systems, rebooting while
1962 * the flash is in query/program/erase mode will prevent the CPU from
1963 * fetching the bootloader code, requiring a hard reset or power cycle.
1965 static int cfi_amdstd_reset(struct mtd_info
*mtd
)
1967 struct map_info
*map
= mtd
->priv
;
1968 struct cfi_private
*cfi
= map
->fldrv_priv
;
1970 struct flchip
*chip
;
1972 for (i
= 0; i
< cfi
->numchips
; i
++) {
1974 chip
= &cfi
->chips
[i
];
1976 mutex_lock(&chip
->mutex
);
1978 ret
= get_chip(map
, chip
, chip
->start
, FL_SHUTDOWN
);
1980 map_write(map
, CMD(0xF0), chip
->start
);
1981 chip
->state
= FL_SHUTDOWN
;
1982 put_chip(map
, chip
, chip
->start
);
1985 mutex_unlock(&chip
->mutex
);
1992 static int cfi_amdstd_reboot(struct notifier_block
*nb
, unsigned long val
,
1995 struct mtd_info
*mtd
;
1997 mtd
= container_of(nb
, struct mtd_info
, reboot_notifier
);
1998 cfi_amdstd_reset(mtd
);
2003 static void cfi_amdstd_destroy(struct mtd_info
*mtd
)
2005 struct map_info
*map
= mtd
->priv
;
2006 struct cfi_private
*cfi
= map
->fldrv_priv
;
2008 cfi_amdstd_reset(mtd
);
2009 unregister_reboot_notifier(&mtd
->reboot_notifier
);
2010 kfree(cfi
->cmdset_priv
);
2013 kfree(mtd
->eraseregions
);
2016 MODULE_LICENSE("GPL");
2017 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2018 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2019 MODULE_ALIAS("cfi_cmdset_0006");
2020 MODULE_ALIAS("cfi_cmdset_0701");