MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / drivers / mtd / chips / cfi_cmdset_0001.c
blob7b7c6845263cca66e067e75b4ffabb6c5baddccc
1 /*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
45 #define MANUFACTURER_INTEL 0x0089
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define MANUFACTURER_ST 0x0020
49 #define M50LPW080 0x002F
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
73 static void cfi_intelext_destroy(struct mtd_info *);
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 size_t len);
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
92 * *********** SETUP AND PROBE BITS ***********
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
99 .module = THIS_MODULE
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
108 int i;
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
154 #endif
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
168 #endif
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
182 #endif
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 { 0, 0, NULL, NULL }
238 static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 { 0, 0, NULL, NULL }
244 static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 { 0, 0, NULL, NULL }
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
260 again:
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 if (!extp)
263 return NULL;
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280 unsigned int extra_size = 0;
281 int nb_parts, i;
283 /* Protection Register info */
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
287 /* Burst Read info */
288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
293 /* Number of hardware-partitions */
294 extra_size += 1;
295 if (extp_size < sizeof(*extp) + extra_size)
296 goto need_more;
297 nb_parts = extp->extra[extra_size - 1];
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
308 goto need_more;
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
317 if (extp_size < sizeof(*extp) + extra_size) {
318 need_more:
319 extp_size = sizeof(*extp) + extra_size;
320 kfree(extp);
321 if (extp_size > 4096) {
322 printk(KERN_ERR
323 "%s: cfi_pri_intelext is too fat\n",
324 __FUNCTION__);
325 return NULL;
327 goto again;
331 return extp;
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
336 struct cfi_private *cfi = map->fldrv_priv;
337 struct mtd_info *mtd;
338 int i;
340 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
341 if (!mtd) {
342 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343 return NULL;
345 memset(mtd, 0, sizeof(*mtd));
346 mtd->priv = map;
347 mtd->type = MTD_NORFLASH;
349 /* Fill in the default mtd operations */
350 mtd->erase = cfi_intelext_erase_varsize;
351 mtd->read = cfi_intelext_read;
352 mtd->write = cfi_intelext_write_words;
353 mtd->sync = cfi_intelext_sync;
354 mtd->lock = cfi_intelext_lock;
355 mtd->unlock = cfi_intelext_unlock;
356 mtd->suspend = cfi_intelext_suspend;
357 mtd->resume = cfi_intelext_resume;
358 mtd->flags = MTD_CAP_NORFLASH;
359 mtd->name = map->name;
360 mtd->writesize = 1;
362 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
364 if (cfi->cfi_mode == CFI_MODE_CFI) {
366 * It's a real CFI chip, not one for which the probe
367 * routine faked a CFI structure. So we read the feature
368 * table from it.
370 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
371 struct cfi_pri_intelext *extp;
373 extp = read_pri_intelext(map, adr);
374 if (!extp) {
375 kfree(mtd);
376 return NULL;
379 /* Install our own private info structure */
380 cfi->cmdset_priv = extp;
382 cfi_fixup(mtd, cfi_fixup_table);
384 #ifdef DEBUG_CFI_FEATURES
385 /* Tell the user about it in lots of lovely detail */
386 cfi_tell_features(extp);
387 #endif
389 if(extp->SuspendCmdSupport & 1) {
390 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
393 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
394 /* Apply jedec specific fixups */
395 cfi_fixup(mtd, jedec_fixup_table);
397 /* Apply generic fixups */
398 cfi_fixup(mtd, fixup_table);
400 for (i=0; i< cfi->numchips; i++) {
401 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
402 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
403 cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
404 cfi->chips[i].ref_point_counter = 0;
405 init_waitqueue_head(&(cfi->chips[i].wq));
408 map->fldrv = &cfi_intelext_chipdrv;
410 return cfi_intelext_setup(mtd);
412 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
414 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
415 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
416 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
418 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
420 struct map_info *map = mtd->priv;
421 struct cfi_private *cfi = map->fldrv_priv;
422 unsigned long offset = 0;
423 int i,j;
424 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
426 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
428 mtd->size = devsize * cfi->numchips;
430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432 * mtd->numeraseregions, GFP_KERNEL);
433 if (!mtd->eraseregions) {
434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435 goto setup_err;
438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439 unsigned long ernum, ersize;
440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
443 if (mtd->erasesize < ersize) {
444 mtd->erasesize = ersize;
446 for (j=0; j<cfi->numchips; j++) {
447 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
451 offset += (ersize * ernum);
454 if (offset != devsize) {
455 /* Argh */
456 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457 goto setup_err;
460 for (i=0; i<mtd->numeraseregions;i++){
461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
462 i,mtd->eraseregions[i].offset,
463 mtd->eraseregions[i].erasesize,
464 mtd->eraseregions[i].numblocks);
467 #ifdef CONFIG_MTD_OTP
468 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
469 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
474 #endif
476 /* This function has the potential to distort the reality
477 a bit and therefore should be called last. */
478 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479 goto setup_err;
481 __module_get(THIS_MODULE);
482 register_reboot_notifier(&mtd->reboot_notifier);
483 return mtd;
485 setup_err:
486 if(mtd) {
487 kfree(mtd->eraseregions);
488 kfree(mtd);
490 kfree(cfi->cmdset_priv);
491 return NULL;
494 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
495 struct cfi_private **pcfi)
497 struct map_info *map = mtd->priv;
498 struct cfi_private *cfi = *pcfi;
499 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
502 * Probing of multi-partition flash ships.
504 * To support multiple partitions when available, we simply arrange
505 * for each of them to have their own flchip structure even if they
506 * are on the same physical chip. This means completely recreating
507 * a new cfi_private structure right here which is a blatent code
508 * layering violation, but this is still the least intrusive
509 * arrangement at this point. This can be rearranged in the future
510 * if someone feels motivated enough. --nico
512 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
513 && extp->FeatureSupport & (1 << 9)) {
514 struct cfi_private *newcfi;
515 struct flchip *chip;
516 struct flchip_shared *shared;
517 int offs, numregions, numparts, partshift, numvirtchips, i, j;
519 /* Protection Register info */
520 offs = (extp->NumProtectionFields - 1) *
521 sizeof(struct cfi_intelext_otpinfo);
523 /* Burst Read info */
524 offs += extp->extra[offs+1]+2;
526 /* Number of partition regions */
527 numregions = extp->extra[offs];
528 offs += 1;
530 /* skip the sizeof(partregion) field in CFI 1.4 */
531 if (extp->MinorVersion >= '4')
532 offs += 2;
534 /* Number of hardware partitions */
535 numparts = 0;
536 for (i = 0; i < numregions; i++) {
537 struct cfi_intelext_regioninfo *rinfo;
538 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
539 numparts += rinfo->NumIdentPartitions;
540 offs += sizeof(*rinfo)
541 + (rinfo->NumBlockTypes - 1) *
542 sizeof(struct cfi_intelext_blockinfo);
545 /* Programming Region info */
546 if (extp->MinorVersion >= '4') {
547 struct cfi_intelext_programming_regioninfo *prinfo;
548 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
550 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 mtd->flags &= ~MTD_BIT_WRITEABLE;
553 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 map->name, mtd->writesize,
555 MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
560 * All functions below currently rely on all chips having
561 * the same geometry so we'll just assume that all hardware
562 * partitions are of the same size too.
564 partshift = cfi->chipshift - __ffs(numparts);
566 if ((1 << partshift) < mtd->erasesize) {
567 printk( KERN_ERR
568 "%s: bad number of hw partitions (%d)\n",
569 __FUNCTION__, numparts);
570 return -EINVAL;
573 numvirtchips = cfi->numchips * numparts;
574 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
575 if (!newcfi)
576 return -ENOMEM;
577 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
578 if (!shared) {
579 kfree(newcfi);
580 return -ENOMEM;
582 memcpy(newcfi, cfi, sizeof(struct cfi_private));
583 newcfi->numchips = numvirtchips;
584 newcfi->chipshift = partshift;
586 chip = &newcfi->chips[0];
587 for (i = 0; i < cfi->numchips; i++) {
588 shared[i].writing = shared[i].erasing = NULL;
589 spin_lock_init(&shared[i].lock);
590 for (j = 0; j < numparts; j++) {
591 *chip = cfi->chips[i];
592 chip->start += j << partshift;
593 chip->priv = &shared[i];
594 /* those should be reset too since
595 they create memory references. */
596 init_waitqueue_head(&chip->wq);
597 spin_lock_init(&chip->_spinlock);
598 chip->mutex = &chip->_spinlock;
599 chip++;
603 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
604 "--> %d partitions of %d KiB\n",
605 map->name, cfi->numchips, cfi->interleave,
606 newcfi->numchips, 1<<(newcfi->chipshift-10));
608 map->fldrv_priv = newcfi;
609 *pcfi = newcfi;
610 kfree(cfi);
611 } else {
612 struct flchip *chip;
613 int i;
614 for (i = 0; i < cfi->numchips; i++) {
615 chip = &cfi->chips[i];
616 init_waitqueue_head(&chip->wq);
617 spin_lock_init(&chip->_spinlock);
618 chip->mutex = &chip->_spinlock;
622 return 0;
626 * *********** CHIP ACCESS FUNCTIONS ***********
629 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
631 DECLARE_WAITQUEUE(wait, current);
632 struct cfi_private *cfi = map->fldrv_priv;
633 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
634 unsigned long timeo;
635 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
637 resettime:
638 timeo = jiffies + HZ;
639 retry:
640 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
642 * OK. We have possibility for contension on the write/erase
643 * operations which are global to the real chip and not per
644 * partition. So let's fight it over in the partition which
645 * currently has authority on the operation.
647 * The rules are as follows:
649 * - any write operation must own shared->writing.
651 * - any erase operation must own _both_ shared->writing and
652 * shared->erasing.
654 * - contension arbitration is handled in the owner's context.
656 * The 'shared' struct can be read and/or written only when
657 * its lock is taken.
659 struct flchip_shared *shared = chip->priv;
660 struct flchip *contender;
661 spin_lock(&shared->lock);
662 contender = shared->writing;
663 if (contender && contender != chip) {
665 * The engine to perform desired operation on this
666 * partition is already in use by someone else.
667 * Let's fight over it in the context of the chip
668 * currently using it. If it is possible to suspend,
669 * that other partition will do just that, otherwise
670 * it'll happily send us to sleep. In any case, when
671 * get_chip returns success we're clear to go ahead.
673 int ret = spin_trylock(contender->mutex);
674 spin_unlock(&shared->lock);
675 if (!ret)
676 goto retry;
677 spin_unlock(chip->mutex);
678 ret = get_chip(map, contender, contender->start, mode);
679 spin_lock(chip->mutex);
680 if (ret) {
681 spin_unlock(contender->mutex);
682 return ret;
684 timeo = jiffies + HZ;
685 spin_lock(&shared->lock);
686 spin_unlock(contender->mutex);
689 /* We now own it */
690 shared->writing = chip;
691 if (mode == FL_ERASING)
692 shared->erasing = chip;
693 spin_unlock(&shared->lock);
696 switch (chip->state) {
698 case FL_STATUS:
699 for (;;) {
700 status = map_read(map, adr);
701 if (map_word_andequal(map, status, status_OK, status_OK))
702 break;
704 /* At this point we're fine with write operations
705 in other partitions as they don't conflict. */
706 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
707 break;
709 if (time_after(jiffies, timeo)) {
710 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
711 map->name, status.x[0]);
712 return -EIO;
714 spin_unlock(chip->mutex);
715 cfi_udelay(1);
716 spin_lock(chip->mutex);
717 /* Someone else might have been playing with it. */
718 goto retry;
721 case FL_READY:
722 case FL_CFI_QUERY:
723 case FL_JEDEC_QUERY:
724 return 0;
726 case FL_ERASING:
727 if (!cfip ||
728 !(cfip->FeatureSupport & 2) ||
729 !(mode == FL_READY || mode == FL_POINT ||
730 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
731 goto sleep;
734 /* Erase suspend */
735 map_write(map, CMD(0xB0), adr);
737 /* If the flash has finished erasing, then 'erase suspend'
738 * appears to make some (28F320) flash devices switch to
739 * 'read' mode. Make sure that we switch to 'read status'
740 * mode so we get the right data. --rmk
742 map_write(map, CMD(0x70), adr);
743 chip->oldstate = FL_ERASING;
744 chip->state = FL_ERASE_SUSPENDING;
745 chip->erase_suspended = 1;
746 for (;;) {
747 status = map_read(map, adr);
748 if (map_word_andequal(map, status, status_OK, status_OK))
749 break;
751 if (time_after(jiffies, timeo)) {
752 /* Urgh. Resume and pretend we weren't here. */
753 map_write(map, CMD(0xd0), adr);
754 /* Make sure we're in 'read status' mode if it had finished */
755 map_write(map, CMD(0x70), adr);
756 chip->state = FL_ERASING;
757 chip->oldstate = FL_READY;
758 printk(KERN_ERR "%s: Chip not ready after erase "
759 "suspended: status = 0x%lx\n", map->name, status.x[0]);
760 return -EIO;
763 spin_unlock(chip->mutex);
764 cfi_udelay(1);
765 spin_lock(chip->mutex);
766 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
767 So we can just loop here. */
769 chip->state = FL_STATUS;
770 return 0;
772 case FL_XIP_WHILE_ERASING:
773 if (mode != FL_READY && mode != FL_POINT &&
774 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
775 goto sleep;
776 chip->oldstate = chip->state;
777 chip->state = FL_READY;
778 return 0;
780 case FL_POINT:
781 /* Only if there's no operation suspended... */
782 if (mode == FL_READY && chip->oldstate == FL_READY)
783 return 0;
785 default:
786 sleep:
787 set_current_state(TASK_UNINTERRUPTIBLE);
788 add_wait_queue(&chip->wq, &wait);
789 spin_unlock(chip->mutex);
790 schedule();
791 remove_wait_queue(&chip->wq, &wait);
792 spin_lock(chip->mutex);
793 goto resettime;
797 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
799 struct cfi_private *cfi = map->fldrv_priv;
801 if (chip->priv) {
802 struct flchip_shared *shared = chip->priv;
803 spin_lock(&shared->lock);
804 if (shared->writing == chip && chip->oldstate == FL_READY) {
805 /* We own the ability to write, but we're done */
806 shared->writing = shared->erasing;
807 if (shared->writing && shared->writing != chip) {
808 /* give back ownership to who we loaned it from */
809 struct flchip *loaner = shared->writing;
810 spin_lock(loaner->mutex);
811 spin_unlock(&shared->lock);
812 spin_unlock(chip->mutex);
813 put_chip(map, loaner, loaner->start);
814 spin_lock(chip->mutex);
815 spin_unlock(loaner->mutex);
816 wake_up(&chip->wq);
817 return;
819 shared->erasing = NULL;
820 shared->writing = NULL;
821 } else if (shared->erasing == chip && shared->writing != chip) {
823 * We own the ability to erase without the ability
824 * to write, which means the erase was suspended
825 * and some other partition is currently writing.
826 * Don't let the switch below mess things up since
827 * we don't have ownership to resume anything.
829 spin_unlock(&shared->lock);
830 wake_up(&chip->wq);
831 return;
833 spin_unlock(&shared->lock);
836 switch(chip->oldstate) {
837 case FL_ERASING:
838 chip->state = chip->oldstate;
839 /* What if one interleaved chip has finished and the
840 other hasn't? The old code would leave the finished
841 one in READY mode. That's bad, and caused -EROFS
842 errors to be returned from do_erase_oneblock because
843 that's the only bit it checked for at the time.
844 As the state machine appears to explicitly allow
845 sending the 0x70 (Read Status) command to an erasing
846 chip and expecting it to be ignored, that's what we
847 do. */
848 map_write(map, CMD(0xd0), adr);
849 map_write(map, CMD(0x70), adr);
850 chip->oldstate = FL_READY;
851 chip->state = FL_ERASING;
852 break;
854 case FL_XIP_WHILE_ERASING:
855 chip->state = chip->oldstate;
856 chip->oldstate = FL_READY;
857 break;
859 case FL_READY:
860 case FL_STATUS:
861 case FL_JEDEC_QUERY:
862 /* We should really make set_vpp() count, rather than doing this */
863 DISABLE_VPP(map);
864 break;
865 default:
866 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
868 wake_up(&chip->wq);
871 #ifdef CONFIG_MTD_XIP
874 * No interrupt what so ever can be serviced while the flash isn't in array
875 * mode. This is ensured by the xip_disable() and xip_enable() functions
876 * enclosing any code path where the flash is known not to be in array mode.
877 * And within a XIP disabled code path, only functions marked with __xipram
878 * may be called and nothing else (it's a good thing to inspect generated
879 * assembly to make sure inline functions were actually inlined and that gcc
880 * didn't emit calls to its own support functions). Also configuring MTD CFI
881 * support to a single buswidth and a single interleave is also recommended.
884 static void xip_disable(struct map_info *map, struct flchip *chip,
885 unsigned long adr)
887 /* TODO: chips with no XIP use should ignore and return */
888 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
889 local_irq_disable();
892 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
893 unsigned long adr)
895 struct cfi_private *cfi = map->fldrv_priv;
896 if (chip->state != FL_POINT && chip->state != FL_READY) {
897 map_write(map, CMD(0xff), adr);
898 chip->state = FL_READY;
900 (void) map_read(map, adr);
901 xip_iprefetch();
902 local_irq_enable();
906 * When a delay is required for the flash operation to complete, the
907 * xip_wait_for_operation() function is polling for both the given timeout
908 * and pending (but still masked) hardware interrupts. Whenever there is an
909 * interrupt pending then the flash erase or write operation is suspended,
910 * array mode restored and interrupts unmasked. Task scheduling might also
911 * happen at that point. The CPU eventually returns from the interrupt or
912 * the call to schedule() and the suspended flash operation is resumed for
913 * the remaining of the delay period.
915 * Warning: this function _will_ fool interrupt latency tracing tools.
918 static int __xipram xip_wait_for_operation(
919 struct map_info *map, struct flchip *chip,
920 unsigned long adr, unsigned int chip_op_time )
922 struct cfi_private *cfi = map->fldrv_priv;
923 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
924 map_word status, OK = CMD(0x80);
925 unsigned long usec, suspended, start, done;
926 flstate_t oldstate, newstate;
928 start = xip_currtime();
929 usec = chip_op_time * 8;
930 if (usec == 0)
931 usec = 500000;
932 done = 0;
934 do {
935 cpu_relax();
936 if (xip_irqpending() && cfip &&
937 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
938 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
939 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
941 * Let's suspend the erase or write operation when
942 * supported. Note that we currently don't try to
943 * suspend interleaved chips if there is already
944 * another operation suspended (imagine what happens
945 * when one chip was already done with the current
946 * operation while another chip suspended it, then
947 * we resume the whole thing at once). Yes, it
948 * can happen!
950 usec -= done;
951 map_write(map, CMD(0xb0), adr);
952 map_write(map, CMD(0x70), adr);
953 suspended = xip_currtime();
954 do {
955 if (xip_elapsed_since(suspended) > 100000) {
957 * The chip doesn't want to suspend
958 * after waiting for 100 msecs.
959 * This is a critical error but there
960 * is not much we can do here.
962 return -EIO;
964 status = map_read(map, adr);
965 } while (!map_word_andequal(map, status, OK, OK));
967 /* Suspend succeeded */
968 oldstate = chip->state;
969 if (oldstate == FL_ERASING) {
970 if (!map_word_bitsset(map, status, CMD(0x40)))
971 break;
972 newstate = FL_XIP_WHILE_ERASING;
973 chip->erase_suspended = 1;
974 } else {
975 if (!map_word_bitsset(map, status, CMD(0x04)))
976 break;
977 newstate = FL_XIP_WHILE_WRITING;
978 chip->write_suspended = 1;
980 chip->state = newstate;
981 map_write(map, CMD(0xff), adr);
982 (void) map_read(map, adr);
983 asm volatile (".rep 8; nop; .endr");
984 local_irq_enable();
985 spin_unlock(chip->mutex);
986 asm volatile (".rep 8; nop; .endr");
987 cond_resched();
990 * We're back. However someone else might have
991 * decided to go write to the chip if we are in
992 * a suspended erase state. If so let's wait
993 * until it's done.
995 spin_lock(chip->mutex);
996 while (chip->state != newstate) {
997 DECLARE_WAITQUEUE(wait, current);
998 set_current_state(TASK_UNINTERRUPTIBLE);
999 add_wait_queue(&chip->wq, &wait);
1000 spin_unlock(chip->mutex);
1001 schedule();
1002 remove_wait_queue(&chip->wq, &wait);
1003 spin_lock(chip->mutex);
1005 /* Disallow XIP again */
1006 local_irq_disable();
1008 /* Resume the write or erase operation */
1009 map_write(map, CMD(0xd0), adr);
1010 map_write(map, CMD(0x70), adr);
1011 chip->state = oldstate;
1012 start = xip_currtime();
1013 } else if (usec >= 1000000/HZ) {
1015 * Try to save on CPU power when waiting delay
1016 * is at least a system timer tick period.
1017 * No need to be extremely accurate here.
1019 xip_cpu_idle();
1021 status = map_read(map, adr);
1022 done = xip_elapsed_since(start);
1023 } while (!map_word_andequal(map, status, OK, OK)
1024 && done < usec);
1026 return (done >= usec) ? -ETIME : 0;
1030 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1031 * the flash is actively programming or erasing since we have to poll for
1032 * the operation to complete anyway. We can't do that in a generic way with
1033 * a XIP setup so do it before the actual flash operation in this case
1034 * and stub it out from INVAL_CACHE_AND_WAIT.
1036 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1037 INVALIDATE_CACHED_RANGE(map, from, size)
1039 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1040 xip_wait_for_operation(map, chip, cmd_adr, usec)
1042 #else
1044 #define xip_disable(map, chip, adr)
1045 #define xip_enable(map, chip, adr)
1046 #define XIP_INVAL_CACHED_RANGE(x...)
1047 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1049 static int inval_cache_and_wait_for_operation(
1050 struct map_info *map, struct flchip *chip,
1051 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1052 unsigned int chip_op_time)
1054 struct cfi_private *cfi = map->fldrv_priv;
1055 map_word status, status_OK = CMD(0x80);
1056 int chip_state = chip->state;
1057 unsigned int timeo, sleep_time;
1059 spin_unlock(chip->mutex);
1060 if (inval_len)
1061 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1062 spin_lock(chip->mutex);
1064 /* set our timeout to 8 times the expected delay */
1065 timeo = chip_op_time * 8;
1066 if (!timeo)
1067 timeo = 500000;
1068 sleep_time = chip_op_time / 2;
1070 for (;;) {
1071 status = map_read(map, cmd_adr);
1072 if (map_word_andequal(map, status, status_OK, status_OK))
1073 break;
1075 if (!timeo) {
1076 map_write(map, CMD(0x70), cmd_adr);
1077 chip->state = FL_STATUS;
1078 return -ETIME;
1081 /* OK Still waiting. Drop the lock, wait a while and retry. */
1082 spin_unlock(chip->mutex);
1083 if (sleep_time >= 1000000/HZ) {
1085 * Half of the normal delay still remaining
1086 * can be performed with a sleeping delay instead
1087 * of busy waiting.
1089 msleep(sleep_time/1000);
1090 timeo -= sleep_time;
1091 sleep_time = 1000000/HZ;
1092 } else {
1093 udelay(1);
1094 cond_resched();
1095 timeo--;
1097 spin_lock(chip->mutex);
1099 while (chip->state != chip_state) {
1100 /* Someone's suspended the operation: sleep */
1101 DECLARE_WAITQUEUE(wait, current);
1102 set_current_state(TASK_UNINTERRUPTIBLE);
1103 add_wait_queue(&chip->wq, &wait);
1104 spin_unlock(chip->mutex);
1105 schedule();
1106 remove_wait_queue(&chip->wq, &wait);
1107 spin_lock(chip->mutex);
1111 /* Done and happy. */
1112 chip->state = FL_STATUS;
1113 return 0;
1116 #endif
1118 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1119 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1122 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1124 unsigned long cmd_addr;
1125 struct cfi_private *cfi = map->fldrv_priv;
1126 int ret = 0;
1128 adr += chip->start;
1130 /* Ensure cmd read/writes are aligned. */
1131 cmd_addr = adr & ~(map_bankwidth(map)-1);
1133 spin_lock(chip->mutex);
1135 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1137 if (!ret) {
1138 if (chip->state != FL_POINT && chip->state != FL_READY)
1139 map_write(map, CMD(0xff), cmd_addr);
1141 chip->state = FL_POINT;
1142 chip->ref_point_counter++;
1144 spin_unlock(chip->mutex);
1146 return ret;
1149 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1151 struct map_info *map = mtd->priv;
1152 struct cfi_private *cfi = map->fldrv_priv;
1153 unsigned long ofs;
1154 int chipnum;
1155 int ret = 0;
1157 if (!map->virt || (from + len > mtd->size))
1158 return -EINVAL;
1160 *mtdbuf = (void *)map->virt + from;
1161 *retlen = 0;
1163 /* Now lock the chip(s) to POINT state */
1165 /* ofs: offset within the first chip that the first read should start */
1166 chipnum = (from >> cfi->chipshift);
1167 ofs = from - (chipnum << cfi->chipshift);
1169 while (len) {
1170 unsigned long thislen;
1172 if (chipnum >= cfi->numchips)
1173 break;
1175 if ((len + ofs -1) >> cfi->chipshift)
1176 thislen = (1<<cfi->chipshift) - ofs;
1177 else
1178 thislen = len;
1180 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1181 if (ret)
1182 break;
1184 *retlen += thislen;
1185 len -= thislen;
1187 ofs = 0;
1188 chipnum++;
1190 return 0;
1193 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1195 struct map_info *map = mtd->priv;
1196 struct cfi_private *cfi = map->fldrv_priv;
1197 unsigned long ofs;
1198 int chipnum;
1200 /* Now unlock the chip(s) POINT state */
1202 /* ofs: offset within the first chip that the first read should start */
1203 chipnum = (from >> cfi->chipshift);
1204 ofs = from - (chipnum << cfi->chipshift);
1206 while (len) {
1207 unsigned long thislen;
1208 struct flchip *chip;
1210 chip = &cfi->chips[chipnum];
1211 if (chipnum >= cfi->numchips)
1212 break;
1214 if ((len + ofs -1) >> cfi->chipshift)
1215 thislen = (1<<cfi->chipshift) - ofs;
1216 else
1217 thislen = len;
1219 spin_lock(chip->mutex);
1220 if (chip->state == FL_POINT) {
1221 chip->ref_point_counter--;
1222 if(chip->ref_point_counter == 0)
1223 chip->state = FL_READY;
1224 } else
1225 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1227 put_chip(map, chip, chip->start);
1228 spin_unlock(chip->mutex);
1230 len -= thislen;
1231 ofs = 0;
1232 chipnum++;
1236 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1238 unsigned long cmd_addr;
1239 struct cfi_private *cfi = map->fldrv_priv;
1240 int ret;
1242 adr += chip->start;
1244 /* Ensure cmd read/writes are aligned. */
1245 cmd_addr = adr & ~(map_bankwidth(map)-1);
1247 spin_lock(chip->mutex);
1248 ret = get_chip(map, chip, cmd_addr, FL_READY);
1249 if (ret) {
1250 spin_unlock(chip->mutex);
1251 return ret;
1254 if (chip->state != FL_POINT && chip->state != FL_READY) {
1255 map_write(map, CMD(0xff), cmd_addr);
1257 chip->state = FL_READY;
1260 map_copy_from(map, buf, adr, len);
1262 put_chip(map, chip, cmd_addr);
1264 spin_unlock(chip->mutex);
1265 return 0;
1268 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1270 struct map_info *map = mtd->priv;
1271 struct cfi_private *cfi = map->fldrv_priv;
1272 unsigned long ofs;
1273 int chipnum;
1274 int ret = 0;
1276 /* ofs: offset within the first chip that the first read should start */
1277 chipnum = (from >> cfi->chipshift);
1278 ofs = from - (chipnum << cfi->chipshift);
1280 *retlen = 0;
1282 while (len) {
1283 unsigned long thislen;
1285 if (chipnum >= cfi->numchips)
1286 break;
1288 if ((len + ofs -1) >> cfi->chipshift)
1289 thislen = (1<<cfi->chipshift) - ofs;
1290 else
1291 thislen = len;
1293 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1294 if (ret)
1295 break;
1297 *retlen += thislen;
1298 len -= thislen;
1299 buf += thislen;
1301 ofs = 0;
1302 chipnum++;
1304 return ret;
1307 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1308 unsigned long adr, map_word datum, int mode)
1310 struct cfi_private *cfi = map->fldrv_priv;
1311 map_word status, write_cmd;
1312 int ret=0;
1314 adr += chip->start;
1316 switch (mode) {
1317 case FL_WRITING:
1318 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1319 break;
1320 case FL_OTP_WRITE:
1321 write_cmd = CMD(0xc0);
1322 break;
1323 default:
1324 return -EINVAL;
1327 spin_lock(chip->mutex);
1328 ret = get_chip(map, chip, adr, mode);
1329 if (ret) {
1330 spin_unlock(chip->mutex);
1331 return ret;
1334 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1335 ENABLE_VPP(map);
1336 xip_disable(map, chip, adr);
1337 map_write(map, write_cmd, adr);
1338 map_write(map, datum, adr);
1339 chip->state = mode;
1341 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1342 adr, map_bankwidth(map),
1343 chip->word_write_time);
1344 if (ret) {
1345 xip_enable(map, chip, adr);
1346 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1347 goto out;
1350 /* check for errors */
1351 status = map_read(map, adr);
1352 if (map_word_bitsset(map, status, CMD(0x1a))) {
1353 unsigned long chipstatus = MERGESTATUS(status);
1355 /* reset status */
1356 map_write(map, CMD(0x50), adr);
1357 map_write(map, CMD(0x70), adr);
1358 xip_enable(map, chip, adr);
1360 if (chipstatus & 0x02) {
1361 ret = -EROFS;
1362 } else if (chipstatus & 0x08) {
1363 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1364 ret = -EIO;
1365 } else {
1366 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1367 ret = -EINVAL;
1370 goto out;
1373 xip_enable(map, chip, adr);
1374 out: put_chip(map, chip, adr);
1375 spin_unlock(chip->mutex);
1376 return ret;
1380 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1382 struct map_info *map = mtd->priv;
1383 struct cfi_private *cfi = map->fldrv_priv;
1384 int ret = 0;
1385 int chipnum;
1386 unsigned long ofs;
1388 *retlen = 0;
1389 if (!len)
1390 return 0;
1392 chipnum = to >> cfi->chipshift;
1393 ofs = to - (chipnum << cfi->chipshift);
1395 /* If it's not bus-aligned, do the first byte write */
1396 if (ofs & (map_bankwidth(map)-1)) {
1397 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1398 int gap = ofs - bus_ofs;
1399 int n;
1400 map_word datum;
1402 n = min_t(int, len, map_bankwidth(map)-gap);
1403 datum = map_word_ff(map);
1404 datum = map_word_load_partial(map, datum, buf, gap, n);
1406 ret = do_write_oneword(map, &cfi->chips[chipnum],
1407 bus_ofs, datum, FL_WRITING);
1408 if (ret)
1409 return ret;
1411 len -= n;
1412 ofs += n;
1413 buf += n;
1414 (*retlen) += n;
1416 if (ofs >> cfi->chipshift) {
1417 chipnum ++;
1418 ofs = 0;
1419 if (chipnum == cfi->numchips)
1420 return 0;
1424 while(len >= map_bankwidth(map)) {
1425 map_word datum = map_word_load(map, buf);
1427 ret = do_write_oneword(map, &cfi->chips[chipnum],
1428 ofs, datum, FL_WRITING);
1429 if (ret)
1430 return ret;
1432 ofs += map_bankwidth(map);
1433 buf += map_bankwidth(map);
1434 (*retlen) += map_bankwidth(map);
1435 len -= map_bankwidth(map);
1437 if (ofs >> cfi->chipshift) {
1438 chipnum ++;
1439 ofs = 0;
1440 if (chipnum == cfi->numchips)
1441 return 0;
1445 if (len & (map_bankwidth(map)-1)) {
1446 map_word datum;
1448 datum = map_word_ff(map);
1449 datum = map_word_load_partial(map, datum, buf, 0, len);
1451 ret = do_write_oneword(map, &cfi->chips[chipnum],
1452 ofs, datum, FL_WRITING);
1453 if (ret)
1454 return ret;
1456 (*retlen) += len;
1459 return 0;
1463 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1464 unsigned long adr, const struct kvec **pvec,
1465 unsigned long *pvec_seek, int len)
1467 struct cfi_private *cfi = map->fldrv_priv;
1468 map_word status, write_cmd, datum;
1469 unsigned long cmd_adr;
1470 int ret, wbufsize, word_gap, words;
1471 const struct kvec *vec;
1472 unsigned long vec_seek;
1474 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1475 adr += chip->start;
1476 cmd_adr = adr & ~(wbufsize-1);
1478 /* Let's determine this according to the interleave only once */
1479 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1481 spin_lock(chip->mutex);
1482 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1483 if (ret) {
1484 spin_unlock(chip->mutex);
1485 return ret;
1488 XIP_INVAL_CACHED_RANGE(map, adr, len);
1489 ENABLE_VPP(map);
1490 xip_disable(map, chip, cmd_adr);
1492 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1493 [...], the device will not accept any more Write to Buffer commands".
1494 So we must check here and reset those bits if they're set. Otherwise
1495 we're just pissing in the wind */
1496 if (chip->state != FL_STATUS) {
1497 map_write(map, CMD(0x70), cmd_adr);
1498 chip->state = FL_STATUS;
1500 status = map_read(map, cmd_adr);
1501 if (map_word_bitsset(map, status, CMD(0x30))) {
1502 xip_enable(map, chip, cmd_adr);
1503 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1504 xip_disable(map, chip, cmd_adr);
1505 map_write(map, CMD(0x50), cmd_adr);
1506 map_write(map, CMD(0x70), cmd_adr);
1509 chip->state = FL_WRITING_TO_BUFFER;
1510 map_write(map, write_cmd, cmd_adr);
1511 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1512 if (ret) {
1513 /* Argh. Not ready for write to buffer */
1514 map_word Xstatus = map_read(map, cmd_adr);
1515 map_write(map, CMD(0x70), cmd_adr);
1516 chip->state = FL_STATUS;
1517 status = map_read(map, cmd_adr);
1518 map_write(map, CMD(0x50), cmd_adr);
1519 map_write(map, CMD(0x70), cmd_adr);
1520 xip_enable(map, chip, cmd_adr);
1521 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1522 map->name, Xstatus.x[0], status.x[0]);
1523 goto out;
1526 /* Figure out the number of words to write */
1527 word_gap = (-adr & (map_bankwidth(map)-1));
1528 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1529 if (!word_gap) {
1530 words--;
1531 } else {
1532 word_gap = map_bankwidth(map) - word_gap;
1533 adr -= word_gap;
1534 datum = map_word_ff(map);
1537 /* Write length of data to come */
1538 map_write(map, CMD(words), cmd_adr );
1540 /* Write data */
1541 vec = *pvec;
1542 vec_seek = *pvec_seek;
1543 do {
1544 int n = map_bankwidth(map) - word_gap;
1545 if (n > vec->iov_len - vec_seek)
1546 n = vec->iov_len - vec_seek;
1547 if (n > len)
1548 n = len;
1550 if (!word_gap && len < map_bankwidth(map))
1551 datum = map_word_ff(map);
1553 datum = map_word_load_partial(map, datum,
1554 vec->iov_base + vec_seek,
1555 word_gap, n);
1557 len -= n;
1558 word_gap += n;
1559 if (!len || word_gap == map_bankwidth(map)) {
1560 map_write(map, datum, adr);
1561 adr += map_bankwidth(map);
1562 word_gap = 0;
1565 vec_seek += n;
1566 if (vec_seek == vec->iov_len) {
1567 vec++;
1568 vec_seek = 0;
1570 } while (len);
1571 *pvec = vec;
1572 *pvec_seek = vec_seek;
1574 /* GO GO GO */
1575 map_write(map, CMD(0xd0), cmd_adr);
1576 chip->state = FL_WRITING;
1578 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1579 adr, len,
1580 chip->buffer_write_time);
1581 if (ret) {
1582 map_write(map, CMD(0x70), cmd_adr);
1583 chip->state = FL_STATUS;
1584 xip_enable(map, chip, cmd_adr);
1585 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1586 goto out;
1589 /* check for errors */
1590 status = map_read(map, cmd_adr);
1591 if (map_word_bitsset(map, status, CMD(0x1a))) {
1592 unsigned long chipstatus = MERGESTATUS(status);
1594 /* reset status */
1595 map_write(map, CMD(0x50), cmd_adr);
1596 map_write(map, CMD(0x70), cmd_adr);
1597 xip_enable(map, chip, cmd_adr);
1599 if (chipstatus & 0x02) {
1600 ret = -EROFS;
1601 } else if (chipstatus & 0x08) {
1602 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1603 ret = -EIO;
1604 } else {
1605 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1606 ret = -EINVAL;
1609 goto out;
1612 xip_enable(map, chip, cmd_adr);
1613 out: put_chip(map, chip, cmd_adr);
1614 spin_unlock(chip->mutex);
1615 return ret;
1618 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1619 unsigned long count, loff_t to, size_t *retlen)
1621 struct map_info *map = mtd->priv;
1622 struct cfi_private *cfi = map->fldrv_priv;
1623 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1624 int ret = 0;
1625 int chipnum;
1626 unsigned long ofs, vec_seek, i;
1627 size_t len = 0;
1629 for (i = 0; i < count; i++)
1630 len += vecs[i].iov_len;
1632 *retlen = 0;
1633 if (!len)
1634 return 0;
1636 chipnum = to >> cfi->chipshift;
1637 ofs = to - (chipnum << cfi->chipshift);
1638 vec_seek = 0;
1640 do {
1641 /* We must not cross write block boundaries */
1642 int size = wbufsize - (ofs & (wbufsize-1));
1644 if (size > len)
1645 size = len;
1646 ret = do_write_buffer(map, &cfi->chips[chipnum],
1647 ofs, &vecs, &vec_seek, size);
1648 if (ret)
1649 return ret;
1651 ofs += size;
1652 (*retlen) += size;
1653 len -= size;
1655 if (ofs >> cfi->chipshift) {
1656 chipnum ++;
1657 ofs = 0;
1658 if (chipnum == cfi->numchips)
1659 return 0;
1662 /* Be nice and reschedule with the chip in a usable state for other
1663 processes. */
1664 cond_resched();
1666 } while (len);
1668 return 0;
1671 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1672 size_t len, size_t *retlen, const u_char *buf)
1674 struct kvec vec;
1676 vec.iov_base = (void *) buf;
1677 vec.iov_len = len;
1679 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1682 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1683 unsigned long adr, int len, void *thunk)
1685 struct cfi_private *cfi = map->fldrv_priv;
1686 map_word status;
1687 int retries = 3;
1688 int ret;
1690 adr += chip->start;
1692 retry:
1693 spin_lock(chip->mutex);
1694 ret = get_chip(map, chip, adr, FL_ERASING);
1695 if (ret) {
1696 spin_unlock(chip->mutex);
1697 return ret;
1700 XIP_INVAL_CACHED_RANGE(map, adr, len);
1701 ENABLE_VPP(map);
1702 xip_disable(map, chip, adr);
1704 /* Clear the status register first */
1705 map_write(map, CMD(0x50), adr);
1707 /* Now erase */
1708 map_write(map, CMD(0x20), adr);
1709 map_write(map, CMD(0xD0), adr);
1710 chip->state = FL_ERASING;
1711 chip->erase_suspended = 0;
1713 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1714 adr, len,
1715 chip->erase_time);
1716 if (ret) {
1717 map_write(map, CMD(0x70), adr);
1718 chip->state = FL_STATUS;
1719 xip_enable(map, chip, adr);
1720 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1721 goto out;
1724 /* We've broken this before. It doesn't hurt to be safe */
1725 map_write(map, CMD(0x70), adr);
1726 chip->state = FL_STATUS;
1727 status = map_read(map, adr);
1729 /* check for errors */
1730 if (map_word_bitsset(map, status, CMD(0x3a))) {
1731 unsigned long chipstatus = MERGESTATUS(status);
1733 /* Reset the error bits */
1734 map_write(map, CMD(0x50), adr);
1735 map_write(map, CMD(0x70), adr);
1736 xip_enable(map, chip, adr);
1738 if ((chipstatus & 0x30) == 0x30) {
1739 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1740 ret = -EINVAL;
1741 } else if (chipstatus & 0x02) {
1742 /* Protection bit set */
1743 ret = -EROFS;
1744 } else if (chipstatus & 0x8) {
1745 /* Voltage */
1746 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1747 ret = -EIO;
1748 } else if (chipstatus & 0x20 && retries--) {
1749 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1750 put_chip(map, chip, adr);
1751 spin_unlock(chip->mutex);
1752 goto retry;
1753 } else {
1754 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1755 ret = -EIO;
1758 goto out;
1761 xip_enable(map, chip, adr);
1762 out: put_chip(map, chip, adr);
1763 spin_unlock(chip->mutex);
1764 return ret;
1767 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1769 unsigned long ofs, len;
1770 int ret;
1772 ofs = instr->addr;
1773 len = instr->len;
1775 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1776 if (ret)
1777 return ret;
1779 instr->state = MTD_ERASE_DONE;
1780 mtd_erase_callback(instr);
1782 return 0;
1785 static void cfi_intelext_sync (struct mtd_info *mtd)
1787 struct map_info *map = mtd->priv;
1788 struct cfi_private *cfi = map->fldrv_priv;
1789 int i;
1790 struct flchip *chip;
1791 int ret = 0;
1793 for (i=0; !ret && i<cfi->numchips; i++) {
1794 chip = &cfi->chips[i];
1796 spin_lock(chip->mutex);
1797 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1799 if (!ret) {
1800 chip->oldstate = chip->state;
1801 chip->state = FL_SYNCING;
1802 /* No need to wake_up() on this state change -
1803 * as the whole point is that nobody can do anything
1804 * with the chip now anyway.
1807 spin_unlock(chip->mutex);
1810 /* Unlock the chips again */
1812 for (i--; i >=0; i--) {
1813 chip = &cfi->chips[i];
1815 spin_lock(chip->mutex);
1817 if (chip->state == FL_SYNCING) {
1818 chip->state = chip->oldstate;
1819 chip->oldstate = FL_READY;
1820 wake_up(&chip->wq);
1822 spin_unlock(chip->mutex);
1826 #ifdef DEBUG_LOCK_BITS
1827 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1828 struct flchip *chip,
1829 unsigned long adr,
1830 int len, void *thunk)
1832 struct cfi_private *cfi = map->fldrv_priv;
1833 int status, ofs_factor = cfi->interleave * cfi->device_type;
1835 adr += chip->start;
1836 xip_disable(map, chip, adr+(2*ofs_factor));
1837 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1838 chip->state = FL_JEDEC_QUERY;
1839 status = cfi_read_query(map, adr+(2*ofs_factor));
1840 xip_enable(map, chip, 0);
1841 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1842 adr, status);
1843 return 0;
1845 #endif
1847 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1848 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1850 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1851 unsigned long adr, int len, void *thunk)
1853 struct cfi_private *cfi = map->fldrv_priv;
1854 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1855 int udelay;
1856 int ret;
1858 adr += chip->start;
1860 spin_lock(chip->mutex);
1861 ret = get_chip(map, chip, adr, FL_LOCKING);
1862 if (ret) {
1863 spin_unlock(chip->mutex);
1864 return ret;
1867 ENABLE_VPP(map);
1868 xip_disable(map, chip, adr);
1870 map_write(map, CMD(0x60), adr);
1871 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1872 map_write(map, CMD(0x01), adr);
1873 chip->state = FL_LOCKING;
1874 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1875 map_write(map, CMD(0xD0), adr);
1876 chip->state = FL_UNLOCKING;
1877 } else
1878 BUG();
1881 * If Instant Individual Block Locking supported then no need
1882 * to delay.
1884 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000 : 0;
1886 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1887 if (ret) {
1888 map_write(map, CMD(0x70), adr);
1889 chip->state = FL_STATUS;
1890 xip_enable(map, chip, adr);
1891 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1892 goto out;
1895 xip_enable(map, chip, adr);
1896 out: put_chip(map, chip, adr);
1897 spin_unlock(chip->mutex);
1898 return ret;
1901 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1903 int ret;
1905 #ifdef DEBUG_LOCK_BITS
1906 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1907 __FUNCTION__, ofs, len);
1908 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1909 ofs, len, 0);
1910 #endif
1912 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1913 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1915 #ifdef DEBUG_LOCK_BITS
1916 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1917 __FUNCTION__, ret);
1918 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1919 ofs, len, 0);
1920 #endif
1922 return ret;
1925 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1927 int ret;
1929 #ifdef DEBUG_LOCK_BITS
1930 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1931 __FUNCTION__, ofs, len);
1932 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1933 ofs, len, 0);
1934 #endif
1936 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1937 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1939 #ifdef DEBUG_LOCK_BITS
1940 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1941 __FUNCTION__, ret);
1942 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1943 ofs, len, 0);
1944 #endif
1946 return ret;
1949 #ifdef CONFIG_MTD_OTP
1951 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1952 u_long data_offset, u_char *buf, u_int size,
1953 u_long prot_offset, u_int groupno, u_int groupsize);
1955 static int __xipram
1956 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1957 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1959 struct cfi_private *cfi = map->fldrv_priv;
1960 int ret;
1962 spin_lock(chip->mutex);
1963 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1964 if (ret) {
1965 spin_unlock(chip->mutex);
1966 return ret;
1969 /* let's ensure we're not reading back cached data from array mode */
1970 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1972 xip_disable(map, chip, chip->start);
1973 if (chip->state != FL_JEDEC_QUERY) {
1974 map_write(map, CMD(0x90), chip->start);
1975 chip->state = FL_JEDEC_QUERY;
1977 map_copy_from(map, buf, chip->start + offset, size);
1978 xip_enable(map, chip, chip->start);
1980 /* then ensure we don't keep OTP data in the cache */
1981 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1983 put_chip(map, chip, chip->start);
1984 spin_unlock(chip->mutex);
1985 return 0;
1988 static int
1989 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1990 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1992 int ret;
1994 while (size) {
1995 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1996 int gap = offset - bus_ofs;
1997 int n = min_t(int, size, map_bankwidth(map)-gap);
1998 map_word datum = map_word_ff(map);
2000 datum = map_word_load_partial(map, datum, buf, gap, n);
2001 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2002 if (ret)
2003 return ret;
2005 offset += n;
2006 buf += n;
2007 size -= n;
2010 return 0;
2013 static int
2014 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2015 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2017 struct cfi_private *cfi = map->fldrv_priv;
2018 map_word datum;
2020 /* make sure area matches group boundaries */
2021 if (size != grpsz)
2022 return -EXDEV;
2024 datum = map_word_ff(map);
2025 datum = map_word_clr(map, datum, CMD(1 << grpno));
2026 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2029 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2030 size_t *retlen, u_char *buf,
2031 otp_op_t action, int user_regs)
2033 struct map_info *map = mtd->priv;
2034 struct cfi_private *cfi = map->fldrv_priv;
2035 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2036 struct flchip *chip;
2037 struct cfi_intelext_otpinfo *otp;
2038 u_long devsize, reg_prot_offset, data_offset;
2039 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2040 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2041 int ret;
2043 *retlen = 0;
2045 /* Check that we actually have some OTP registers */
2046 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2047 return -ENODATA;
2049 /* we need real chips here not virtual ones */
2050 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2051 chip_step = devsize >> cfi->chipshift;
2052 chip_num = 0;
2054 /* Some chips have OTP located in the _top_ partition only.
2055 For example: Intel 28F256L18T (T means top-parameter device) */
2056 if (cfi->mfr == MANUFACTURER_INTEL) {
2057 switch (cfi->id) {
2058 case 0x880b:
2059 case 0x880c:
2060 case 0x880d:
2061 chip_num = chip_step - 1;
2065 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2066 chip = &cfi->chips[chip_num];
2067 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2069 /* first OTP region */
2070 field = 0;
2071 reg_prot_offset = extp->ProtRegAddr;
2072 reg_fact_groups = 1;
2073 reg_fact_size = 1 << extp->FactProtRegSize;
2074 reg_user_groups = 1;
2075 reg_user_size = 1 << extp->UserProtRegSize;
2077 while (len > 0) {
2078 /* flash geometry fixup */
2079 data_offset = reg_prot_offset + 1;
2080 data_offset *= cfi->interleave * cfi->device_type;
2081 reg_prot_offset *= cfi->interleave * cfi->device_type;
2082 reg_fact_size *= cfi->interleave;
2083 reg_user_size *= cfi->interleave;
2085 if (user_regs) {
2086 groups = reg_user_groups;
2087 groupsize = reg_user_size;
2088 /* skip over factory reg area */
2089 groupno = reg_fact_groups;
2090 data_offset += reg_fact_groups * reg_fact_size;
2091 } else {
2092 groups = reg_fact_groups;
2093 groupsize = reg_fact_size;
2094 groupno = 0;
2097 while (len > 0 && groups > 0) {
2098 if (!action) {
2100 * Special case: if action is NULL
2101 * we fill buf with otp_info records.
2103 struct otp_info *otpinfo;
2104 map_word lockword;
2105 len -= sizeof(struct otp_info);
2106 if (len <= 0)
2107 return -ENOSPC;
2108 ret = do_otp_read(map, chip,
2109 reg_prot_offset,
2110 (u_char *)&lockword,
2111 map_bankwidth(map),
2112 0, 0, 0);
2113 if (ret)
2114 return ret;
2115 otpinfo = (struct otp_info *)buf;
2116 otpinfo->start = from;
2117 otpinfo->length = groupsize;
2118 otpinfo->locked =
2119 !map_word_bitsset(map, lockword,
2120 CMD(1 << groupno));
2121 from += groupsize;
2122 buf += sizeof(*otpinfo);
2123 *retlen += sizeof(*otpinfo);
2124 } else if (from >= groupsize) {
2125 from -= groupsize;
2126 data_offset += groupsize;
2127 } else {
2128 int size = groupsize;
2129 data_offset += from;
2130 size -= from;
2131 from = 0;
2132 if (size > len)
2133 size = len;
2134 ret = action(map, chip, data_offset,
2135 buf, size, reg_prot_offset,
2136 groupno, groupsize);
2137 if (ret < 0)
2138 return ret;
2139 buf += size;
2140 len -= size;
2141 *retlen += size;
2142 data_offset += size;
2144 groupno++;
2145 groups--;
2148 /* next OTP region */
2149 if (++field == extp->NumProtectionFields)
2150 break;
2151 reg_prot_offset = otp->ProtRegAddr;
2152 reg_fact_groups = otp->FactGroups;
2153 reg_fact_size = 1 << otp->FactProtRegSize;
2154 reg_user_groups = otp->UserGroups;
2155 reg_user_size = 1 << otp->UserProtRegSize;
2156 otp++;
2160 return 0;
2163 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2164 size_t len, size_t *retlen,
2165 u_char *buf)
2167 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2168 buf, do_otp_read, 0);
2171 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2172 size_t len, size_t *retlen,
2173 u_char *buf)
2175 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2176 buf, do_otp_read, 1);
2179 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2180 size_t len, size_t *retlen,
2181 u_char *buf)
2183 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2184 buf, do_otp_write, 1);
2187 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2188 loff_t from, size_t len)
2190 size_t retlen;
2191 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2192 NULL, do_otp_lock, 1);
2195 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2196 struct otp_info *buf, size_t len)
2198 size_t retlen;
2199 int ret;
2201 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2202 return ret ? : retlen;
2205 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2206 struct otp_info *buf, size_t len)
2208 size_t retlen;
2209 int ret;
2211 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2212 return ret ? : retlen;
2215 #endif
2217 static int cfi_intelext_suspend(struct mtd_info *mtd)
2219 struct map_info *map = mtd->priv;
2220 struct cfi_private *cfi = map->fldrv_priv;
2221 int i;
2222 struct flchip *chip;
2223 int ret = 0;
2225 for (i=0; !ret && i<cfi->numchips; i++) {
2226 chip = &cfi->chips[i];
2228 spin_lock(chip->mutex);
2230 switch (chip->state) {
2231 case FL_READY:
2232 case FL_STATUS:
2233 case FL_CFI_QUERY:
2234 case FL_JEDEC_QUERY:
2235 if (chip->oldstate == FL_READY) {
2236 chip->oldstate = chip->state;
2237 chip->state = FL_PM_SUSPENDED;
2238 /* No need to wake_up() on this state change -
2239 * as the whole point is that nobody can do anything
2240 * with the chip now anyway.
2242 } else {
2243 /* There seems to be an operation pending. We must wait for it. */
2244 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2245 ret = -EAGAIN;
2247 break;
2248 default:
2249 /* Should we actually wait? Once upon a time these routines weren't
2250 allowed to. Or should we return -EAGAIN, because the upper layers
2251 ought to have already shut down anything which was using the device
2252 anyway? The latter for now. */
2253 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2254 ret = -EAGAIN;
2255 case FL_PM_SUSPENDED:
2256 break;
2258 spin_unlock(chip->mutex);
2261 /* Unlock the chips again */
2263 if (ret) {
2264 for (i--; i >=0; i--) {
2265 chip = &cfi->chips[i];
2267 spin_lock(chip->mutex);
2269 if (chip->state == FL_PM_SUSPENDED) {
2270 /* No need to force it into a known state here,
2271 because we're returning failure, and it didn't
2272 get power cycled */
2273 chip->state = chip->oldstate;
2274 chip->oldstate = FL_READY;
2275 wake_up(&chip->wq);
2277 spin_unlock(chip->mutex);
2281 return ret;
2284 static void cfi_intelext_resume(struct mtd_info *mtd)
2286 struct map_info *map = mtd->priv;
2287 struct cfi_private *cfi = map->fldrv_priv;
2288 int i;
2289 struct flchip *chip;
2291 for (i=0; i<cfi->numchips; i++) {
2293 chip = &cfi->chips[i];
2295 spin_lock(chip->mutex);
2297 /* Go to known state. Chip may have been power cycled */
2298 if (chip->state == FL_PM_SUSPENDED) {
2299 map_write(map, CMD(0xFF), cfi->chips[i].start);
2300 chip->oldstate = chip->state = FL_READY;
2301 wake_up(&chip->wq);
2304 spin_unlock(chip->mutex);
2308 static int cfi_intelext_reset(struct mtd_info *mtd)
2310 struct map_info *map = mtd->priv;
2311 struct cfi_private *cfi = map->fldrv_priv;
2312 int i, ret;
2314 for (i=0; i < cfi->numchips; i++) {
2315 struct flchip *chip = &cfi->chips[i];
2317 /* force the completion of any ongoing operation
2318 and switch to array mode so any bootloader in
2319 flash is accessible for soft reboot. */
2320 spin_lock(chip->mutex);
2321 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2322 if (!ret) {
2323 map_write(map, CMD(0xff), chip->start);
2324 chip->state = FL_READY;
2326 spin_unlock(chip->mutex);
2329 return 0;
2332 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2333 void *v)
2335 struct mtd_info *mtd;
2337 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2338 cfi_intelext_reset(mtd);
2339 return NOTIFY_DONE;
2342 static void cfi_intelext_destroy(struct mtd_info *mtd)
2344 struct map_info *map = mtd->priv;
2345 struct cfi_private *cfi = map->fldrv_priv;
2346 cfi_intelext_reset(mtd);
2347 unregister_reboot_notifier(&mtd->reboot_notifier);
2348 kfree(cfi->cmdset_priv);
2349 kfree(cfi->cfiq);
2350 kfree(cfi->chips[0].priv);
2351 kfree(cfi);
2352 kfree(mtd->eraseregions);
2355 MODULE_LICENSE("GPL");
2356 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2357 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2358 MODULE_ALIAS("cfi_cmdset_0003");
2359 MODULE_ALIAS("cfi_cmdset_0200");