allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / mtd / chips / cfi_cmdset_0001.c
blob5ac078ce7c8504b1657ea577ddec6f4fe474b9ae
1 /*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
48 #define MANUFACTURER_INTEL 0x0089
49 #define I82802AB 0x00ad
50 #define I82802AC 0x00ac
51 #define MANUFACTURER_ST 0x0020
52 #define M50LPW080 0x002F
54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 #ifdef CONFIG_MTD_OTP
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68 struct otp_info *, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70 struct otp_info *, size_t);
71 #endif
72 static int cfi_intelext_suspend (struct mtd_info *);
73 static void cfi_intelext_resume (struct mtd_info *);
74 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76 static void cfi_intelext_destroy(struct mtd_info *);
78 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84 size_t *retlen, u_char **mtdbuf);
85 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
86 size_t len);
88 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
90 #include "fwh_lock.h"
95 * *********** SETUP AND PROBE BITS ***********
98 static struct mtd_chip_driver cfi_intelext_chipdrv = {
99 .probe = NULL, /* Not usable directly */
100 .destroy = cfi_intelext_destroy,
101 .name = "cfi_cmdset_0001",
102 .module = THIS_MODULE
105 /* #define DEBUG_LOCK_BITS */
106 /* #define DEBUG_CFI_FEATURES */
108 #ifdef DEBUG_CFI_FEATURES
109 static void cfi_tell_features(struct cfi_pri_intelext *extp)
111 int i;
112 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
113 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
114 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
115 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
116 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
117 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
118 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
119 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
120 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
121 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
122 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
123 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
124 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
125 for (i=11; i<32; i++) {
126 if (extp->FeatureSupport & (1<<i))
127 printk(" - Unknown Bit %X: supported\n", i);
130 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
131 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
132 for (i=1; i<8; i++) {
133 if (extp->SuspendCmdSupport & (1<<i))
134 printk(" - Unknown Bit %X: supported\n", i);
137 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
138 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
139 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
140 for (i=2; i<3; i++) {
141 if (extp->BlkStatusRegMask & (1<<i))
142 printk(" - Unknown Bit %X Active: yes\n",i);
144 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
145 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
146 for (i=6; i<16; i++) {
147 if (extp->BlkStatusRegMask & (1<<i))
148 printk(" - Unknown Bit %X Active: yes\n",i);
151 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
153 if (extp->VppOptimal)
154 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
155 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157 #endif
159 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
160 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
161 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
163 struct map_info *map = mtd->priv;
164 struct cfi_private *cfi = map->fldrv_priv;
165 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
167 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
168 "erase on write disabled.\n");
169 extp->SuspendCmdSupport &= ~1;
171 #endif
173 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
174 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
178 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
180 if (cfip && (cfip->FeatureSupport&4)) {
181 cfip->FeatureSupport &= ~4;
182 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
185 #endif
187 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
189 struct map_info *map = mtd->priv;
190 struct cfi_private *cfi = map->fldrv_priv;
192 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
193 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
196 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
198 struct map_info *map = mtd->priv;
199 struct cfi_private *cfi = map->fldrv_priv;
201 /* Note this is done after the region info is endian swapped */
202 cfi->cfiq->EraseRegionInfo[1] =
203 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
206 static void fixup_use_point(struct mtd_info *mtd, void *param)
208 struct map_info *map = mtd->priv;
209 if (!mtd->point && map_is_linear(map)) {
210 mtd->point = cfi_intelext_point;
211 mtd->unpoint = cfi_intelext_unpoint;
215 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
217 struct map_info *map = mtd->priv;
218 struct cfi_private *cfi = map->fldrv_priv;
219 if (cfi->cfiq->BufWriteTimeoutTyp) {
220 printk(KERN_INFO "Using buffer write method\n" );
221 mtd->write = cfi_intelext_write_buffers;
222 mtd->writev = cfi_intelext_writev;
227 * Some chips power-up with all sectors locked by default.
229 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
231 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
232 mtd->flags |= MTD_STUPID_LOCK;
235 static struct cfi_fixup cfi_fixup_table[] = {
236 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
237 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
238 #endif
239 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
240 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
241 #endif
242 #if !FORCE_WORD_WRITE
243 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
244 #endif
245 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
246 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
247 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
248 { 0, 0, NULL, NULL }
251 static struct cfi_fixup jedec_fixup_table[] = {
252 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
253 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
254 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
255 { 0, 0, NULL, NULL }
257 static struct cfi_fixup fixup_table[] = {
258 /* The CFI vendor ids and the JEDEC vendor IDs appear
259 * to be common. It is like the devices id's are as
260 * well. This table is to pick all cases where
261 * we know that is the case.
263 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
264 { 0, 0, NULL, NULL }
267 static inline struct cfi_pri_intelext *
268 read_pri_intelext(struct map_info *map, __u16 adr)
270 struct cfi_pri_intelext *extp;
271 unsigned int extp_size = sizeof(*extp);
273 again:
274 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
275 if (!extp)
276 return NULL;
278 if (extp->MajorVersion != '1' ||
279 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
280 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
281 "version %c.%c.\n", extp->MajorVersion,
282 extp->MinorVersion);
283 kfree(extp);
284 return NULL;
287 /* Do some byteswapping if necessary */
288 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
289 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
290 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
292 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
293 unsigned int extra_size = 0;
294 int nb_parts, i;
296 /* Protection Register info */
297 extra_size += (extp->NumProtectionFields - 1) *
298 sizeof(struct cfi_intelext_otpinfo);
300 /* Burst Read info */
301 extra_size += 2;
302 if (extp_size < sizeof(*extp) + extra_size)
303 goto need_more;
304 extra_size += extp->extra[extra_size-1];
306 /* Number of hardware-partitions */
307 extra_size += 1;
308 if (extp_size < sizeof(*extp) + extra_size)
309 goto need_more;
310 nb_parts = extp->extra[extra_size - 1];
312 /* skip the sizeof(partregion) field in CFI 1.4 */
313 if (extp->MinorVersion >= '4')
314 extra_size += 2;
316 for (i = 0; i < nb_parts; i++) {
317 struct cfi_intelext_regioninfo *rinfo;
318 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
319 extra_size += sizeof(*rinfo);
320 if (extp_size < sizeof(*extp) + extra_size)
321 goto need_more;
322 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
323 extra_size += (rinfo->NumBlockTypes - 1)
324 * sizeof(struct cfi_intelext_blockinfo);
327 if (extp->MinorVersion >= '4')
328 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
330 if (extp_size < sizeof(*extp) + extra_size) {
331 need_more:
332 extp_size = sizeof(*extp) + extra_size;
333 kfree(extp);
334 if (extp_size > 4096) {
335 printk(KERN_ERR
336 "%s: cfi_pri_intelext is too fat\n",
337 __FUNCTION__);
338 return NULL;
340 goto again;
344 return extp;
347 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
349 struct cfi_private *cfi = map->fldrv_priv;
350 struct mtd_info *mtd;
351 int i;
353 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
354 if (!mtd) {
355 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
356 return NULL;
358 mtd->priv = map;
359 mtd->type = MTD_NORFLASH;
361 /* Fill in the default mtd operations */
362 mtd->erase = cfi_intelext_erase_varsize;
363 mtd->read = cfi_intelext_read;
364 mtd->write = cfi_intelext_write_words;
365 mtd->sync = cfi_intelext_sync;
366 mtd->lock = cfi_intelext_lock;
367 mtd->unlock = cfi_intelext_unlock;
368 mtd->suspend = cfi_intelext_suspend;
369 mtd->resume = cfi_intelext_resume;
370 mtd->flags = MTD_CAP_NORFLASH;
371 mtd->name = map->name;
372 mtd->writesize = 1;
374 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
376 if (cfi->cfi_mode == CFI_MODE_CFI) {
378 * It's a real CFI chip, not one for which the probe
379 * routine faked a CFI structure. So we read the feature
380 * table from it.
382 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
383 struct cfi_pri_intelext *extp;
385 extp = read_pri_intelext(map, adr);
386 if (!extp) {
387 kfree(mtd);
388 return NULL;
391 /* Install our own private info structure */
392 cfi->cmdset_priv = extp;
394 cfi_fixup(mtd, cfi_fixup_table);
396 #ifdef DEBUG_CFI_FEATURES
397 /* Tell the user about it in lots of lovely detail */
398 cfi_tell_features(extp);
399 #endif
401 if(extp->SuspendCmdSupport & 1) {
402 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
405 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
406 /* Apply jedec specific fixups */
407 cfi_fixup(mtd, jedec_fixup_table);
409 /* Apply generic fixups */
410 cfi_fixup(mtd, fixup_table);
412 for (i=0; i< cfi->numchips; i++) {
413 if (cfi->cfiq->WordWriteTimeoutTyp)
414 cfi->chips[i].word_write_time =
415 1<<cfi->cfiq->WordWriteTimeoutTyp;
416 else
417 cfi->chips[i].word_write_time = 50000;
419 if (cfi->cfiq->BufWriteTimeoutTyp)
420 cfi->chips[i].buffer_write_time =
421 1<<cfi->cfiq->BufWriteTimeoutTyp;
422 /* No default; if it isn't specified, we won't use it */
424 if (cfi->cfiq->BlockEraseTimeoutTyp)
425 cfi->chips[i].erase_time =
426 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
427 else
428 cfi->chips[i].erase_time = 2000000;
430 cfi->chips[i].ref_point_counter = 0;
431 init_waitqueue_head(&(cfi->chips[i].wq));
434 map->fldrv = &cfi_intelext_chipdrv;
436 return cfi_intelext_setup(mtd);
438 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
439 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
444 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
446 struct map_info *map = mtd->priv;
447 struct cfi_private *cfi = map->fldrv_priv;
448 unsigned long offset = 0;
449 int i,j;
450 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
452 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
454 mtd->size = devsize * cfi->numchips;
456 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
457 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
458 * mtd->numeraseregions, GFP_KERNEL);
459 if (!mtd->eraseregions) {
460 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
461 goto setup_err;
464 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
465 unsigned long ernum, ersize;
466 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
467 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
469 if (mtd->erasesize < ersize) {
470 mtd->erasesize = ersize;
472 for (j=0; j<cfi->numchips; j++) {
473 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
474 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
475 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
478 offset += (ersize * ernum);
481 if (offset != devsize) {
482 /* Argh */
483 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
484 goto setup_err;
487 for (i=0; i<mtd->numeraseregions;i++){
488 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
489 i,mtd->eraseregions[i].offset,
490 mtd->eraseregions[i].erasesize,
491 mtd->eraseregions[i].numblocks);
494 #ifdef CONFIG_MTD_OTP
495 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
496 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
497 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
498 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
499 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
500 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
501 #endif
503 /* This function has the potential to distort the reality
504 a bit and therefore should be called last. */
505 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
506 goto setup_err;
508 __module_get(THIS_MODULE);
509 register_reboot_notifier(&mtd->reboot_notifier);
510 return mtd;
512 setup_err:
513 if(mtd) {
514 kfree(mtd->eraseregions);
515 kfree(mtd);
517 kfree(cfi->cmdset_priv);
518 return NULL;
521 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
522 struct cfi_private **pcfi)
524 struct map_info *map = mtd->priv;
525 struct cfi_private *cfi = *pcfi;
526 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
529 * Probing of multi-partition flash ships.
531 * To support multiple partitions when available, we simply arrange
532 * for each of them to have their own flchip structure even if they
533 * are on the same physical chip. This means completely recreating
534 * a new cfi_private structure right here which is a blatent code
535 * layering violation, but this is still the least intrusive
536 * arrangement at this point. This can be rearranged in the future
537 * if someone feels motivated enough. --nico
539 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
540 && extp->FeatureSupport & (1 << 9)) {
541 struct cfi_private *newcfi;
542 struct flchip *chip;
543 struct flchip_shared *shared;
544 int offs, numregions, numparts, partshift, numvirtchips, i, j;
546 /* Protection Register info */
547 offs = (extp->NumProtectionFields - 1) *
548 sizeof(struct cfi_intelext_otpinfo);
550 /* Burst Read info */
551 offs += extp->extra[offs+1]+2;
553 /* Number of partition regions */
554 numregions = extp->extra[offs];
555 offs += 1;
557 /* skip the sizeof(partregion) field in CFI 1.4 */
558 if (extp->MinorVersion >= '4')
559 offs += 2;
561 /* Number of hardware partitions */
562 numparts = 0;
563 for (i = 0; i < numregions; i++) {
564 struct cfi_intelext_regioninfo *rinfo;
565 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
566 numparts += rinfo->NumIdentPartitions;
567 offs += sizeof(*rinfo)
568 + (rinfo->NumBlockTypes - 1) *
569 sizeof(struct cfi_intelext_blockinfo);
572 /* Programming Region info */
573 if (extp->MinorVersion >= '4') {
574 struct cfi_intelext_programming_regioninfo *prinfo;
575 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
576 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
577 mtd->flags &= ~MTD_BIT_WRITEABLE;
578 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
579 map->name, mtd->writesize,
580 cfi->interleave * prinfo->ControlValid,
581 cfi->interleave * prinfo->ControlInvalid);
585 * All functions below currently rely on all chips having
586 * the same geometry so we'll just assume that all hardware
587 * partitions are of the same size too.
589 partshift = cfi->chipshift - __ffs(numparts);
591 if ((1 << partshift) < mtd->erasesize) {
592 printk( KERN_ERR
593 "%s: bad number of hw partitions (%d)\n",
594 __FUNCTION__, numparts);
595 return -EINVAL;
598 numvirtchips = cfi->numchips * numparts;
599 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
600 if (!newcfi)
601 return -ENOMEM;
602 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
603 if (!shared) {
604 kfree(newcfi);
605 return -ENOMEM;
607 memcpy(newcfi, cfi, sizeof(struct cfi_private));
608 newcfi->numchips = numvirtchips;
609 newcfi->chipshift = partshift;
611 chip = &newcfi->chips[0];
612 for (i = 0; i < cfi->numchips; i++) {
613 shared[i].writing = shared[i].erasing = NULL;
614 spin_lock_init(&shared[i].lock);
615 for (j = 0; j < numparts; j++) {
616 *chip = cfi->chips[i];
617 chip->start += j << partshift;
618 chip->priv = &shared[i];
619 /* those should be reset too since
620 they create memory references. */
621 init_waitqueue_head(&chip->wq);
622 spin_lock_init(&chip->_spinlock);
623 chip->mutex = &chip->_spinlock;
624 chip++;
628 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
629 "--> %d partitions of %d KiB\n",
630 map->name, cfi->numchips, cfi->interleave,
631 newcfi->numchips, 1<<(newcfi->chipshift-10));
633 map->fldrv_priv = newcfi;
634 *pcfi = newcfi;
635 kfree(cfi);
638 return 0;
642 * *********** CHIP ACCESS FUNCTIONS ***********
645 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
647 DECLARE_WAITQUEUE(wait, current);
648 struct cfi_private *cfi = map->fldrv_priv;
649 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
650 unsigned long timeo;
651 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
653 resettime:
654 timeo = jiffies + HZ;
655 retry:
656 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
658 * OK. We have possibility for contension on the write/erase
659 * operations which are global to the real chip and not per
660 * partition. So let's fight it over in the partition which
661 * currently has authority on the operation.
663 * The rules are as follows:
665 * - any write operation must own shared->writing.
667 * - any erase operation must own _both_ shared->writing and
668 * shared->erasing.
670 * - contension arbitration is handled in the owner's context.
672 * The 'shared' struct can be read and/or written only when
673 * its lock is taken.
675 struct flchip_shared *shared = chip->priv;
676 struct flchip *contender;
677 spin_lock(&shared->lock);
678 contender = shared->writing;
679 if (contender && contender != chip) {
681 * The engine to perform desired operation on this
682 * partition is already in use by someone else.
683 * Let's fight over it in the context of the chip
684 * currently using it. If it is possible to suspend,
685 * that other partition will do just that, otherwise
686 * it'll happily send us to sleep. In any case, when
687 * get_chip returns success we're clear to go ahead.
689 int ret = spin_trylock(contender->mutex);
690 spin_unlock(&shared->lock);
691 if (!ret)
692 goto retry;
693 spin_unlock(chip->mutex);
694 ret = get_chip(map, contender, contender->start, mode);
695 spin_lock(chip->mutex);
696 if (ret) {
697 spin_unlock(contender->mutex);
698 return ret;
700 timeo = jiffies + HZ;
701 spin_lock(&shared->lock);
702 spin_unlock(contender->mutex);
705 /* We now own it */
706 shared->writing = chip;
707 if (mode == FL_ERASING)
708 shared->erasing = chip;
709 spin_unlock(&shared->lock);
712 switch (chip->state) {
714 case FL_STATUS:
715 for (;;) {
716 status = map_read(map, adr);
717 if (map_word_andequal(map, status, status_OK, status_OK))
718 break;
720 /* At this point we're fine with write operations
721 in other partitions as they don't conflict. */
722 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
723 break;
725 if (time_after(jiffies, timeo)) {
726 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
727 map->name, status.x[0]);
728 return -EIO;
730 spin_unlock(chip->mutex);
731 cfi_udelay(1);
732 spin_lock(chip->mutex);
733 /* Someone else might have been playing with it. */
734 goto retry;
737 case FL_READY:
738 case FL_CFI_QUERY:
739 case FL_JEDEC_QUERY:
740 return 0;
742 case FL_ERASING:
743 if (!cfip ||
744 !(cfip->FeatureSupport & 2) ||
745 !(mode == FL_READY || mode == FL_POINT ||
746 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
747 goto sleep;
750 /* Erase suspend */
751 map_write(map, CMD(0xB0), adr);
753 /* If the flash has finished erasing, then 'erase suspend'
754 * appears to make some (28F320) flash devices switch to
755 * 'read' mode. Make sure that we switch to 'read status'
756 * mode so we get the right data. --rmk
758 map_write(map, CMD(0x70), adr);
759 chip->oldstate = FL_ERASING;
760 chip->state = FL_ERASE_SUSPENDING;
761 chip->erase_suspended = 1;
762 for (;;) {
763 status = map_read(map, adr);
764 if (map_word_andequal(map, status, status_OK, status_OK))
765 break;
767 if (time_after(jiffies, timeo)) {
768 /* Urgh. Resume and pretend we weren't here.
769 * Make sure we're in 'read status' mode if it had finished */
770 put_chip(map, chip, adr);
771 printk(KERN_ERR "%s: Chip not ready after erase "
772 "suspended: status = 0x%lx\n", map->name, status.x[0]);
773 return -EIO;
776 spin_unlock(chip->mutex);
777 cfi_udelay(1);
778 spin_lock(chip->mutex);
779 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
780 So we can just loop here. */
782 chip->state = FL_STATUS;
783 return 0;
785 case FL_XIP_WHILE_ERASING:
786 if (mode != FL_READY && mode != FL_POINT &&
787 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
788 goto sleep;
789 chip->oldstate = chip->state;
790 chip->state = FL_READY;
791 return 0;
793 case FL_POINT:
794 /* Only if there's no operation suspended... */
795 if (mode == FL_READY && chip->oldstate == FL_READY)
796 return 0;
798 default:
799 sleep:
800 set_current_state(TASK_UNINTERRUPTIBLE);
801 add_wait_queue(&chip->wq, &wait);
802 spin_unlock(chip->mutex);
803 schedule();
804 remove_wait_queue(&chip->wq, &wait);
805 spin_lock(chip->mutex);
806 goto resettime;
810 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
812 struct cfi_private *cfi = map->fldrv_priv;
814 if (chip->priv) {
815 struct flchip_shared *shared = chip->priv;
816 spin_lock(&shared->lock);
817 if (shared->writing == chip && chip->oldstate == FL_READY) {
818 /* We own the ability to write, but we're done */
819 shared->writing = shared->erasing;
820 if (shared->writing && shared->writing != chip) {
821 /* give back ownership to who we loaned it from */
822 struct flchip *loaner = shared->writing;
823 spin_lock(loaner->mutex);
824 spin_unlock(&shared->lock);
825 spin_unlock(chip->mutex);
826 put_chip(map, loaner, loaner->start);
827 spin_lock(chip->mutex);
828 spin_unlock(loaner->mutex);
829 wake_up(&chip->wq);
830 return;
832 shared->erasing = NULL;
833 shared->writing = NULL;
834 } else if (shared->erasing == chip && shared->writing != chip) {
836 * We own the ability to erase without the ability
837 * to write, which means the erase was suspended
838 * and some other partition is currently writing.
839 * Don't let the switch below mess things up since
840 * we don't have ownership to resume anything.
842 spin_unlock(&shared->lock);
843 wake_up(&chip->wq);
844 return;
846 spin_unlock(&shared->lock);
849 switch(chip->oldstate) {
850 case FL_ERASING:
851 /* What if one interleaved chip has finished and the
852 other hasn't? The old code would leave the finished
853 one in READY mode. That's bad, and caused -EROFS
854 errors to be returned from do_erase_oneblock because
855 that's the only bit it checked for at the time.
856 As the state machine appears to explicitly allow
857 sending the 0x70 (Read Status) command to an erasing
858 chip and expecting it to be ignored, that's what we
859 do. */
860 map_write(map, CMD(0xd0), adr);
861 map_write(map, CMD(0x70), adr);
862 chip->oldstate = FL_READY;
863 chip->state = FL_ERASING;
864 break;
866 case FL_XIP_WHILE_ERASING:
867 chip->state = chip->oldstate;
868 chip->oldstate = FL_READY;
869 break;
871 case FL_READY:
872 case FL_STATUS:
873 case FL_JEDEC_QUERY:
874 /* We should really make set_vpp() count, rather than doing this */
875 DISABLE_VPP(map);
876 break;
877 default:
878 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
880 wake_up(&chip->wq);
883 #ifdef CONFIG_MTD_XIP
886 * No interrupt what so ever can be serviced while the flash isn't in array
887 * mode. This is ensured by the xip_disable() and xip_enable() functions
888 * enclosing any code path where the flash is known not to be in array mode.
889 * And within a XIP disabled code path, only functions marked with __xipram
890 * may be called and nothing else (it's a good thing to inspect generated
891 * assembly to make sure inline functions were actually inlined and that gcc
892 * didn't emit calls to its own support functions). Also configuring MTD CFI
893 * support to a single buswidth and a single interleave is also recommended.
896 static void xip_disable(struct map_info *map, struct flchip *chip,
897 unsigned long adr)
899 /* TODO: chips with no XIP use should ignore and return */
900 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
901 local_irq_disable();
904 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
905 unsigned long adr)
907 struct cfi_private *cfi = map->fldrv_priv;
908 if (chip->state != FL_POINT && chip->state != FL_READY) {
909 map_write(map, CMD(0xff), adr);
910 chip->state = FL_READY;
912 (void) map_read(map, adr);
913 xip_iprefetch();
914 local_irq_enable();
918 * When a delay is required for the flash operation to complete, the
919 * xip_wait_for_operation() function is polling for both the given timeout
920 * and pending (but still masked) hardware interrupts. Whenever there is an
921 * interrupt pending then the flash erase or write operation is suspended,
922 * array mode restored and interrupts unmasked. Task scheduling might also
923 * happen at that point. The CPU eventually returns from the interrupt or
924 * the call to schedule() and the suspended flash operation is resumed for
925 * the remaining of the delay period.
927 * Warning: this function _will_ fool interrupt latency tracing tools.
930 static int __xipram xip_wait_for_operation(
931 struct map_info *map, struct flchip *chip,
932 unsigned long adr, int *chip_op_time )
934 struct cfi_private *cfi = map->fldrv_priv;
935 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
936 map_word status, OK = CMD(0x80);
937 unsigned long usec, suspended, start, done;
938 flstate_t oldstate, newstate;
940 start = xip_currtime();
941 usec = *chip_op_time * 8;
942 if (usec == 0)
943 usec = 500000;
944 done = 0;
946 do {
947 cpu_relax();
948 if (xip_irqpending() && cfip &&
949 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
950 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
951 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
953 * Let's suspend the erase or write operation when
954 * supported. Note that we currently don't try to
955 * suspend interleaved chips if there is already
956 * another operation suspended (imagine what happens
957 * when one chip was already done with the current
958 * operation while another chip suspended it, then
959 * we resume the whole thing at once). Yes, it
960 * can happen!
962 usec -= done;
963 map_write(map, CMD(0xb0), adr);
964 map_write(map, CMD(0x70), adr);
965 suspended = xip_currtime();
966 do {
967 if (xip_elapsed_since(suspended) > 100000) {
969 * The chip doesn't want to suspend
970 * after waiting for 100 msecs.
971 * This is a critical error but there
972 * is not much we can do here.
974 return -EIO;
976 status = map_read(map, adr);
977 } while (!map_word_andequal(map, status, OK, OK));
979 /* Suspend succeeded */
980 oldstate = chip->state;
981 if (oldstate == FL_ERASING) {
982 if (!map_word_bitsset(map, status, CMD(0x40)))
983 break;
984 newstate = FL_XIP_WHILE_ERASING;
985 chip->erase_suspended = 1;
986 } else {
987 if (!map_word_bitsset(map, status, CMD(0x04)))
988 break;
989 newstate = FL_XIP_WHILE_WRITING;
990 chip->write_suspended = 1;
992 chip->state = newstate;
993 map_write(map, CMD(0xff), adr);
994 (void) map_read(map, adr);
995 asm volatile (".rep 8; nop; .endr");
996 local_irq_enable();
997 spin_unlock(chip->mutex);
998 asm volatile (".rep 8; nop; .endr");
999 cond_resched();
1002 * We're back. However someone else might have
1003 * decided to go write to the chip if we are in
1004 * a suspended erase state. If so let's wait
1005 * until it's done.
1007 spin_lock(chip->mutex);
1008 while (chip->state != newstate) {
1009 DECLARE_WAITQUEUE(wait, current);
1010 set_current_state(TASK_UNINTERRUPTIBLE);
1011 add_wait_queue(&chip->wq, &wait);
1012 spin_unlock(chip->mutex);
1013 schedule();
1014 remove_wait_queue(&chip->wq, &wait);
1015 spin_lock(chip->mutex);
1017 /* Disallow XIP again */
1018 local_irq_disable();
1020 /* Resume the write or erase operation */
1021 map_write(map, CMD(0xd0), adr);
1022 map_write(map, CMD(0x70), adr);
1023 chip->state = oldstate;
1024 start = xip_currtime();
1025 } else if (usec >= 1000000/HZ) {
1027 * Try to save on CPU power when waiting delay
1028 * is at least a system timer tick period.
1029 * No need to be extremely accurate here.
1031 xip_cpu_idle();
1033 status = map_read(map, adr);
1034 done = xip_elapsed_since(start);
1035 } while (!map_word_andequal(map, status, OK, OK)
1036 && done < usec);
1038 return (done >= usec) ? -ETIME : 0;
1042 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1043 * the flash is actively programming or erasing since we have to poll for
1044 * the operation to complete anyway. We can't do that in a generic way with
1045 * a XIP setup so do it before the actual flash operation in this case
1046 * and stub it out from INVAL_CACHE_AND_WAIT.
1048 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1049 INVALIDATE_CACHED_RANGE(map, from, size)
1051 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
1052 xip_wait_for_operation(map, chip, cmd_adr, p_usec)
1054 #else
1056 #define xip_disable(map, chip, adr)
1057 #define xip_enable(map, chip, adr)
1058 #define XIP_INVAL_CACHED_RANGE(x...)
1059 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1061 static int inval_cache_and_wait_for_operation(
1062 struct map_info *map, struct flchip *chip,
1063 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1064 int *chip_op_time )
1066 struct cfi_private *cfi = map->fldrv_priv;
1067 map_word status, status_OK = CMD(0x80);
1068 int z, chip_state = chip->state;
1069 unsigned long timeo;
1071 spin_unlock(chip->mutex);
1072 if (inval_len)
1073 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1074 if (*chip_op_time)
1075 cfi_udelay(*chip_op_time);
1076 spin_lock(chip->mutex);
1078 timeo = *chip_op_time * 8 * HZ / 1000000;
1079 if (timeo < HZ/2)
1080 timeo = HZ/2;
1081 timeo += jiffies;
1083 z = 0;
1084 for (;;) {
1085 if (chip->state != chip_state) {
1086 /* Someone's suspended the operation: sleep */
1087 DECLARE_WAITQUEUE(wait, current);
1089 set_current_state(TASK_UNINTERRUPTIBLE);
1090 add_wait_queue(&chip->wq, &wait);
1091 spin_unlock(chip->mutex);
1092 schedule();
1093 remove_wait_queue(&chip->wq, &wait);
1094 timeo = jiffies + (HZ / 2); /* FIXME */
1095 spin_lock(chip->mutex);
1096 continue;
1099 status = map_read(map, cmd_adr);
1100 if (map_word_andequal(map, status, status_OK, status_OK))
1101 break;
1103 /* OK Still waiting */
1104 if (time_after(jiffies, timeo)) {
1105 map_write(map, CMD(0x70), cmd_adr);
1106 chip->state = FL_STATUS;
1107 return -ETIME;
1110 /* Latency issues. Drop the lock, wait a while and retry */
1111 z++;
1112 spin_unlock(chip->mutex);
1113 cfi_udelay(1);
1114 spin_lock(chip->mutex);
1117 if (!z) {
1118 if (!--(*chip_op_time))
1119 *chip_op_time = 1;
1120 } else if (z > 1)
1121 ++(*chip_op_time);
1123 /* Done and happy. */
1124 chip->state = FL_STATUS;
1125 return 0;
1128 #endif
1130 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1131 ({ int __udelay = (udelay); \
1132 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
1135 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1137 unsigned long cmd_addr;
1138 struct cfi_private *cfi = map->fldrv_priv;
1139 int ret = 0;
1141 adr += chip->start;
1143 /* Ensure cmd read/writes are aligned. */
1144 cmd_addr = adr & ~(map_bankwidth(map)-1);
1146 spin_lock(chip->mutex);
1148 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1150 if (!ret) {
1151 if (chip->state != FL_POINT && chip->state != FL_READY)
1152 map_write(map, CMD(0xff), cmd_addr);
1154 chip->state = FL_POINT;
1155 chip->ref_point_counter++;
1157 spin_unlock(chip->mutex);
1159 return ret;
1162 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1164 struct map_info *map = mtd->priv;
1165 struct cfi_private *cfi = map->fldrv_priv;
1166 unsigned long ofs;
1167 int chipnum;
1168 int ret = 0;
1170 if (!map->virt || (from + len > mtd->size))
1171 return -EINVAL;
1173 *mtdbuf = (void *)map->virt + from;
1174 *retlen = 0;
1176 /* Now lock the chip(s) to POINT state */
1178 /* ofs: offset within the first chip that the first read should start */
1179 chipnum = (from >> cfi->chipshift);
1180 ofs = from - (chipnum << cfi->chipshift);
1182 while (len) {
1183 unsigned long thislen;
1185 if (chipnum >= cfi->numchips)
1186 break;
1188 if ((len + ofs -1) >> cfi->chipshift)
1189 thislen = (1<<cfi->chipshift) - ofs;
1190 else
1191 thislen = len;
1193 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1194 if (ret)
1195 break;
1197 *retlen += thislen;
1198 len -= thislen;
1200 ofs = 0;
1201 chipnum++;
1203 return 0;
1206 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1208 struct map_info *map = mtd->priv;
1209 struct cfi_private *cfi = map->fldrv_priv;
1210 unsigned long ofs;
1211 int chipnum;
1213 /* Now unlock the chip(s) POINT state */
1215 /* ofs: offset within the first chip that the first read should start */
1216 chipnum = (from >> cfi->chipshift);
1217 ofs = from - (chipnum << cfi->chipshift);
1219 while (len) {
1220 unsigned long thislen;
1221 struct flchip *chip;
1223 chip = &cfi->chips[chipnum];
1224 if (chipnum >= cfi->numchips)
1225 break;
1227 if ((len + ofs -1) >> cfi->chipshift)
1228 thislen = (1<<cfi->chipshift) - ofs;
1229 else
1230 thislen = len;
1232 spin_lock(chip->mutex);
1233 if (chip->state == FL_POINT) {
1234 chip->ref_point_counter--;
1235 if(chip->ref_point_counter == 0)
1236 chip->state = FL_READY;
1237 } else
1238 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1240 put_chip(map, chip, chip->start);
1241 spin_unlock(chip->mutex);
1243 len -= thislen;
1244 ofs = 0;
1245 chipnum++;
1249 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1251 unsigned long cmd_addr;
1252 struct cfi_private *cfi = map->fldrv_priv;
1253 int ret;
1255 adr += chip->start;
1257 /* Ensure cmd read/writes are aligned. */
1258 cmd_addr = adr & ~(map_bankwidth(map)-1);
1260 spin_lock(chip->mutex);
1261 ret = get_chip(map, chip, cmd_addr, FL_READY);
1262 if (ret) {
1263 spin_unlock(chip->mutex);
1264 return ret;
1267 if (chip->state != FL_POINT && chip->state != FL_READY) {
1268 map_write(map, CMD(0xff), cmd_addr);
1270 chip->state = FL_READY;
1273 map_copy_from(map, buf, adr, len);
1275 put_chip(map, chip, cmd_addr);
1277 spin_unlock(chip->mutex);
1278 return 0;
1281 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1283 struct map_info *map = mtd->priv;
1284 struct cfi_private *cfi = map->fldrv_priv;
1285 unsigned long ofs;
1286 int chipnum;
1287 int ret = 0;
1289 /* ofs: offset within the first chip that the first read should start */
1290 chipnum = (from >> cfi->chipshift);
1291 ofs = from - (chipnum << cfi->chipshift);
1293 *retlen = 0;
1295 while (len) {
1296 unsigned long thislen;
1298 if (chipnum >= cfi->numchips)
1299 break;
1301 if ((len + ofs -1) >> cfi->chipshift)
1302 thislen = (1<<cfi->chipshift) - ofs;
1303 else
1304 thislen = len;
1306 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1307 if (ret)
1308 break;
1310 *retlen += thislen;
1311 len -= thislen;
1312 buf += thislen;
1314 ofs = 0;
1315 chipnum++;
1317 return ret;
1320 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1321 unsigned long adr, map_word datum, int mode)
1323 struct cfi_private *cfi = map->fldrv_priv;
1324 map_word status, write_cmd;
1325 int ret=0;
1327 adr += chip->start;
1329 switch (mode) {
1330 case FL_WRITING:
1331 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1332 break;
1333 case FL_OTP_WRITE:
1334 write_cmd = CMD(0xc0);
1335 break;
1336 default:
1337 return -EINVAL;
1340 spin_lock(chip->mutex);
1341 ret = get_chip(map, chip, adr, mode);
1342 if (ret) {
1343 spin_unlock(chip->mutex);
1344 return ret;
1347 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1348 ENABLE_VPP(map);
1349 xip_disable(map, chip, adr);
1350 map_write(map, write_cmd, adr);
1351 map_write(map, datum, adr);
1352 chip->state = mode;
1354 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1355 adr, map_bankwidth(map),
1356 &chip->word_write_time);
1357 if (ret) {
1358 xip_enable(map, chip, adr);
1359 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1360 goto out;
1363 /* check for errors */
1364 status = map_read(map, adr);
1365 if (map_word_bitsset(map, status, CMD(0x1a))) {
1366 unsigned long chipstatus = MERGESTATUS(status);
1368 /* reset status */
1369 map_write(map, CMD(0x50), adr);
1370 map_write(map, CMD(0x70), adr);
1371 xip_enable(map, chip, adr);
1373 if (chipstatus & 0x02) {
1374 ret = -EROFS;
1375 } else if (chipstatus & 0x08) {
1376 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1377 ret = -EIO;
1378 } else {
1379 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1380 ret = -EINVAL;
1383 goto out;
1386 xip_enable(map, chip, adr);
1387 out: put_chip(map, chip, adr);
1388 spin_unlock(chip->mutex);
1389 return ret;
1393 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1395 struct map_info *map = mtd->priv;
1396 struct cfi_private *cfi = map->fldrv_priv;
1397 int ret = 0;
1398 int chipnum;
1399 unsigned long ofs;
1401 *retlen = 0;
1402 if (!len)
1403 return 0;
1405 chipnum = to >> cfi->chipshift;
1406 ofs = to - (chipnum << cfi->chipshift);
1408 /* If it's not bus-aligned, do the first byte write */
1409 if (ofs & (map_bankwidth(map)-1)) {
1410 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1411 int gap = ofs - bus_ofs;
1412 int n;
1413 map_word datum;
1415 n = min_t(int, len, map_bankwidth(map)-gap);
1416 datum = map_word_ff(map);
1417 datum = map_word_load_partial(map, datum, buf, gap, n);
1419 ret = do_write_oneword(map, &cfi->chips[chipnum],
1420 bus_ofs, datum, FL_WRITING);
1421 if (ret)
1422 return ret;
1424 len -= n;
1425 ofs += n;
1426 buf += n;
1427 (*retlen) += n;
1429 if (ofs >> cfi->chipshift) {
1430 chipnum ++;
1431 ofs = 0;
1432 if (chipnum == cfi->numchips)
1433 return 0;
1437 while(len >= map_bankwidth(map)) {
1438 map_word datum = map_word_load(map, buf);
1440 ret = do_write_oneword(map, &cfi->chips[chipnum],
1441 ofs, datum, FL_WRITING);
1442 if (ret)
1443 return ret;
1445 ofs += map_bankwidth(map);
1446 buf += map_bankwidth(map);
1447 (*retlen) += map_bankwidth(map);
1448 len -= map_bankwidth(map);
1450 if (ofs >> cfi->chipshift) {
1451 chipnum ++;
1452 ofs = 0;
1453 if (chipnum == cfi->numchips)
1454 return 0;
1458 if (len & (map_bankwidth(map)-1)) {
1459 map_word datum;
1461 datum = map_word_ff(map);
1462 datum = map_word_load_partial(map, datum, buf, 0, len);
1464 ret = do_write_oneword(map, &cfi->chips[chipnum],
1465 ofs, datum, FL_WRITING);
1466 if (ret)
1467 return ret;
1469 (*retlen) += len;
1472 return 0;
1476 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1477 unsigned long adr, const struct kvec **pvec,
1478 unsigned long *pvec_seek, int len)
1480 struct cfi_private *cfi = map->fldrv_priv;
1481 map_word status, write_cmd, datum;
1482 unsigned long cmd_adr;
1483 int ret, wbufsize, word_gap, words;
1484 const struct kvec *vec;
1485 unsigned long vec_seek;
1486 unsigned long initial_adr;
1487 int initial_len = len;
1489 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1490 adr += chip->start;
1491 initial_adr = adr;
1492 cmd_adr = adr & ~(wbufsize-1);
1494 /* Let's determine this according to the interleave only once */
1495 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1497 spin_lock(chip->mutex);
1498 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1499 if (ret) {
1500 spin_unlock(chip->mutex);
1501 return ret;
1504 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1505 ENABLE_VPP(map);
1506 xip_disable(map, chip, cmd_adr);
1508 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1509 [...], the device will not accept any more Write to Buffer commands".
1510 So we must check here and reset those bits if they're set. Otherwise
1511 we're just pissing in the wind */
1512 if (chip->state != FL_STATUS) {
1513 map_write(map, CMD(0x70), cmd_adr);
1514 chip->state = FL_STATUS;
1516 status = map_read(map, cmd_adr);
1517 if (map_word_bitsset(map, status, CMD(0x30))) {
1518 xip_enable(map, chip, cmd_adr);
1519 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1520 xip_disable(map, chip, cmd_adr);
1521 map_write(map, CMD(0x50), cmd_adr);
1522 map_write(map, CMD(0x70), cmd_adr);
1525 chip->state = FL_WRITING_TO_BUFFER;
1526 map_write(map, write_cmd, cmd_adr);
1527 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1528 if (ret) {
1529 /* Argh. Not ready for write to buffer */
1530 map_word Xstatus = map_read(map, cmd_adr);
1531 map_write(map, CMD(0x70), cmd_adr);
1532 chip->state = FL_STATUS;
1533 status = map_read(map, cmd_adr);
1534 map_write(map, CMD(0x50), cmd_adr);
1535 map_write(map, CMD(0x70), cmd_adr);
1536 xip_enable(map, chip, cmd_adr);
1537 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1538 map->name, Xstatus.x[0], status.x[0]);
1539 goto out;
1542 /* Figure out the number of words to write */
1543 word_gap = (-adr & (map_bankwidth(map)-1));
1544 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1545 if (!word_gap) {
1546 words--;
1547 } else {
1548 word_gap = map_bankwidth(map) - word_gap;
1549 adr -= word_gap;
1550 datum = map_word_ff(map);
1553 /* Write length of data to come */
1554 map_write(map, CMD(words), cmd_adr );
1556 /* Write data */
1557 vec = *pvec;
1558 vec_seek = *pvec_seek;
1559 do {
1560 int n = map_bankwidth(map) - word_gap;
1561 if (n > vec->iov_len - vec_seek)
1562 n = vec->iov_len - vec_seek;
1563 if (n > len)
1564 n = len;
1566 if (!word_gap && len < map_bankwidth(map))
1567 datum = map_word_ff(map);
1569 datum = map_word_load_partial(map, datum,
1570 vec->iov_base + vec_seek,
1571 word_gap, n);
1573 len -= n;
1574 word_gap += n;
1575 if (!len || word_gap == map_bankwidth(map)) {
1576 map_write(map, datum, adr);
1577 adr += map_bankwidth(map);
1578 word_gap = 0;
1581 vec_seek += n;
1582 if (vec_seek == vec->iov_len) {
1583 vec++;
1584 vec_seek = 0;
1586 } while (len);
1587 *pvec = vec;
1588 *pvec_seek = vec_seek;
1590 /* GO GO GO */
1591 map_write(map, CMD(0xd0), cmd_adr);
1592 chip->state = FL_WRITING;
1594 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1595 initial_adr, initial_len,
1596 &chip->buffer_write_time);
1597 if (ret) {
1598 map_write(map, CMD(0x70), cmd_adr);
1599 chip->state = FL_STATUS;
1600 xip_enable(map, chip, cmd_adr);
1601 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1602 goto out;
1605 /* check for errors */
1606 status = map_read(map, cmd_adr);
1607 if (map_word_bitsset(map, status, CMD(0x1a))) {
1608 unsigned long chipstatus = MERGESTATUS(status);
1610 /* reset status */
1611 map_write(map, CMD(0x50), cmd_adr);
1612 map_write(map, CMD(0x70), cmd_adr);
1613 xip_enable(map, chip, cmd_adr);
1615 if (chipstatus & 0x02) {
1616 ret = -EROFS;
1617 } else if (chipstatus & 0x08) {
1618 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1619 ret = -EIO;
1620 } else {
1621 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1622 ret = -EINVAL;
1625 goto out;
1628 xip_enable(map, chip, cmd_adr);
1629 out: put_chip(map, chip, cmd_adr);
1630 spin_unlock(chip->mutex);
1631 return ret;
1634 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1635 unsigned long count, loff_t to, size_t *retlen)
1637 struct map_info *map = mtd->priv;
1638 struct cfi_private *cfi = map->fldrv_priv;
1639 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1640 int ret = 0;
1641 int chipnum;
1642 unsigned long ofs, vec_seek, i;
1643 size_t len = 0;
1645 for (i = 0; i < count; i++)
1646 len += vecs[i].iov_len;
1648 *retlen = 0;
1649 if (!len)
1650 return 0;
1652 chipnum = to >> cfi->chipshift;
1653 ofs = to - (chipnum << cfi->chipshift);
1654 vec_seek = 0;
1656 do {
1657 /* We must not cross write block boundaries */
1658 int size = wbufsize - (ofs & (wbufsize-1));
1660 if (size > len)
1661 size = len;
1662 ret = do_write_buffer(map, &cfi->chips[chipnum],
1663 ofs, &vecs, &vec_seek, size);
1664 if (ret)
1665 return ret;
1667 ofs += size;
1668 (*retlen) += size;
1669 len -= size;
1671 if (ofs >> cfi->chipshift) {
1672 chipnum ++;
1673 ofs = 0;
1674 if (chipnum == cfi->numchips)
1675 return 0;
1678 /* Be nice and reschedule with the chip in a usable state for other
1679 processes. */
1680 cond_resched();
1682 } while (len);
1684 return 0;
1687 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1688 size_t len, size_t *retlen, const u_char *buf)
1690 struct kvec vec;
1692 vec.iov_base = (void *) buf;
1693 vec.iov_len = len;
1695 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1698 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1699 unsigned long adr, int len, void *thunk)
1701 struct cfi_private *cfi = map->fldrv_priv;
1702 map_word status;
1703 int retries = 3;
1704 int ret;
1706 adr += chip->start;
1708 retry:
1709 spin_lock(chip->mutex);
1710 ret = get_chip(map, chip, adr, FL_ERASING);
1711 if (ret) {
1712 spin_unlock(chip->mutex);
1713 return ret;
1716 XIP_INVAL_CACHED_RANGE(map, adr, len);
1717 ENABLE_VPP(map);
1718 xip_disable(map, chip, adr);
1720 /* Clear the status register first */
1721 map_write(map, CMD(0x50), adr);
1723 /* Now erase */
1724 map_write(map, CMD(0x20), adr);
1725 map_write(map, CMD(0xD0), adr);
1726 chip->state = FL_ERASING;
1727 chip->erase_suspended = 0;
1729 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1730 adr, len,
1731 &chip->erase_time);
1732 if (ret) {
1733 map_write(map, CMD(0x70), adr);
1734 chip->state = FL_STATUS;
1735 xip_enable(map, chip, adr);
1736 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1737 goto out;
1740 /* We've broken this before. It doesn't hurt to be safe */
1741 map_write(map, CMD(0x70), adr);
1742 chip->state = FL_STATUS;
1743 status = map_read(map, adr);
1745 /* check for errors */
1746 if (map_word_bitsset(map, status, CMD(0x3a))) {
1747 unsigned long chipstatus = MERGESTATUS(status);
1749 /* Reset the error bits */
1750 map_write(map, CMD(0x50), adr);
1751 map_write(map, CMD(0x70), adr);
1752 xip_enable(map, chip, adr);
1754 if ((chipstatus & 0x30) == 0x30) {
1755 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1756 ret = -EINVAL;
1757 } else if (chipstatus & 0x02) {
1758 /* Protection bit set */
1759 ret = -EROFS;
1760 } else if (chipstatus & 0x8) {
1761 /* Voltage */
1762 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1763 ret = -EIO;
1764 } else if (chipstatus & 0x20 && retries--) {
1765 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1766 put_chip(map, chip, adr);
1767 spin_unlock(chip->mutex);
1768 goto retry;
1769 } else {
1770 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1771 ret = -EIO;
1774 goto out;
1777 xip_enable(map, chip, adr);
1778 out: put_chip(map, chip, adr);
1779 spin_unlock(chip->mutex);
1780 return ret;
1783 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1785 unsigned long ofs, len;
1786 int ret;
1788 ofs = instr->addr;
1789 len = instr->len;
1791 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1792 if (ret)
1793 return ret;
1795 instr->state = MTD_ERASE_DONE;
1796 mtd_erase_callback(instr);
1798 return 0;
1801 static void cfi_intelext_sync (struct mtd_info *mtd)
1803 struct map_info *map = mtd->priv;
1804 struct cfi_private *cfi = map->fldrv_priv;
1805 int i;
1806 struct flchip *chip;
1807 int ret = 0;
1809 for (i=0; !ret && i<cfi->numchips; i++) {
1810 chip = &cfi->chips[i];
1812 spin_lock(chip->mutex);
1813 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1815 if (!ret) {
1816 chip->oldstate = chip->state;
1817 chip->state = FL_SYNCING;
1818 /* No need to wake_up() on this state change -
1819 * as the whole point is that nobody can do anything
1820 * with the chip now anyway.
1823 spin_unlock(chip->mutex);
1826 /* Unlock the chips again */
1828 for (i--; i >=0; i--) {
1829 chip = &cfi->chips[i];
1831 spin_lock(chip->mutex);
1833 if (chip->state == FL_SYNCING) {
1834 chip->state = chip->oldstate;
1835 chip->oldstate = FL_READY;
1836 wake_up(&chip->wq);
1838 spin_unlock(chip->mutex);
1842 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1843 struct flchip *chip,
1844 unsigned long adr,
1845 int len, void *thunk)
1847 struct cfi_private *cfi = map->fldrv_priv;
1848 int status, ofs_factor = cfi->interleave * cfi->device_type;
1850 adr += chip->start;
1851 xip_disable(map, chip, adr+(2*ofs_factor));
1852 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1853 chip->state = FL_JEDEC_QUERY;
1854 status = cfi_read_query(map, adr+(2*ofs_factor));
1855 xip_enable(map, chip, 0);
1856 return status;
1859 #ifdef DEBUG_LOCK_BITS
1860 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1861 struct flchip *chip,
1862 unsigned long adr,
1863 int len, void *thunk)
1865 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1866 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1867 return 0;
1869 #endif
1871 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1872 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1874 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1875 unsigned long adr, int len, void *thunk)
1877 struct cfi_private *cfi = map->fldrv_priv;
1878 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1879 int udelay;
1880 int ret;
1882 adr += chip->start;
1884 spin_lock(chip->mutex);
1885 ret = get_chip(map, chip, adr, FL_LOCKING);
1886 if (ret) {
1887 spin_unlock(chip->mutex);
1888 return ret;
1891 ENABLE_VPP(map);
1892 xip_disable(map, chip, adr);
1894 map_write(map, CMD(0x60), adr);
1895 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1896 map_write(map, CMD(0x01), adr);
1897 chip->state = FL_LOCKING;
1898 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1899 map_write(map, CMD(0xD0), adr);
1900 chip->state = FL_UNLOCKING;
1901 } else
1902 BUG();
1905 * If Instant Individual Block Locking supported then no need
1906 * to delay.
1908 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000 : 0;
1910 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1911 if (ret) {
1912 map_write(map, CMD(0x70), adr);
1913 chip->state = FL_STATUS;
1914 xip_enable(map, chip, adr);
1915 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1916 goto out;
1919 xip_enable(map, chip, adr);
1920 out: put_chip(map, chip, adr);
1921 spin_unlock(chip->mutex);
1922 return ret;
1925 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1927 int ret;
1929 #ifdef DEBUG_LOCK_BITS
1930 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1931 __FUNCTION__, ofs, len);
1932 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1933 ofs, len, 0);
1934 #endif
1936 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1937 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1939 #ifdef DEBUG_LOCK_BITS
1940 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1941 __FUNCTION__, ret);
1942 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1943 ofs, len, 0);
1944 #endif
1946 return ret;
1949 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1951 int ret;
1953 #ifdef DEBUG_LOCK_BITS
1954 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1955 __FUNCTION__, ofs, len);
1956 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1957 ofs, len, 0);
1958 #endif
1960 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1961 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1963 #ifdef DEBUG_LOCK_BITS
1964 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1965 __FUNCTION__, ret);
1966 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1967 ofs, len, 0);
1968 #endif
1970 return ret;
1973 #ifdef CONFIG_MTD_OTP
1975 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1976 u_long data_offset, u_char *buf, u_int size,
1977 u_long prot_offset, u_int groupno, u_int groupsize);
1979 static int __xipram
1980 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1981 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1983 struct cfi_private *cfi = map->fldrv_priv;
1984 int ret;
1986 spin_lock(chip->mutex);
1987 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1988 if (ret) {
1989 spin_unlock(chip->mutex);
1990 return ret;
1993 /* let's ensure we're not reading back cached data from array mode */
1994 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1996 xip_disable(map, chip, chip->start);
1997 if (chip->state != FL_JEDEC_QUERY) {
1998 map_write(map, CMD(0x90), chip->start);
1999 chip->state = FL_JEDEC_QUERY;
2001 map_copy_from(map, buf, chip->start + offset, size);
2002 xip_enable(map, chip, chip->start);
2004 /* then ensure we don't keep OTP data in the cache */
2005 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2007 put_chip(map, chip, chip->start);
2008 spin_unlock(chip->mutex);
2009 return 0;
2012 static int
2013 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2014 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2016 int ret;
2018 while (size) {
2019 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2020 int gap = offset - bus_ofs;
2021 int n = min_t(int, size, map_bankwidth(map)-gap);
2022 map_word datum = map_word_ff(map);
2024 datum = map_word_load_partial(map, datum, buf, gap, n);
2025 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2026 if (ret)
2027 return ret;
2029 offset += n;
2030 buf += n;
2031 size -= n;
2034 return 0;
2037 static int
2038 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2039 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2041 struct cfi_private *cfi = map->fldrv_priv;
2042 map_word datum;
2044 /* make sure area matches group boundaries */
2045 if (size != grpsz)
2046 return -EXDEV;
2048 datum = map_word_ff(map);
2049 datum = map_word_clr(map, datum, CMD(1 << grpno));
2050 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2053 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2054 size_t *retlen, u_char *buf,
2055 otp_op_t action, int user_regs)
2057 struct map_info *map = mtd->priv;
2058 struct cfi_private *cfi = map->fldrv_priv;
2059 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2060 struct flchip *chip;
2061 struct cfi_intelext_otpinfo *otp;
2062 u_long devsize, reg_prot_offset, data_offset;
2063 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2064 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2065 int ret;
2067 *retlen = 0;
2069 /* Check that we actually have some OTP registers */
2070 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2071 return -ENODATA;
2073 /* we need real chips here not virtual ones */
2074 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2075 chip_step = devsize >> cfi->chipshift;
2076 chip_num = 0;
2078 /* Some chips have OTP located in the _top_ partition only.
2079 For example: Intel 28F256L18T (T means top-parameter device) */
2080 if (cfi->mfr == MANUFACTURER_INTEL) {
2081 switch (cfi->id) {
2082 case 0x880b:
2083 case 0x880c:
2084 case 0x880d:
2085 chip_num = chip_step - 1;
2089 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2090 chip = &cfi->chips[chip_num];
2091 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2093 /* first OTP region */
2094 field = 0;
2095 reg_prot_offset = extp->ProtRegAddr;
2096 reg_fact_groups = 1;
2097 reg_fact_size = 1 << extp->FactProtRegSize;
2098 reg_user_groups = 1;
2099 reg_user_size = 1 << extp->UserProtRegSize;
2101 while (len > 0) {
2102 /* flash geometry fixup */
2103 data_offset = reg_prot_offset + 1;
2104 data_offset *= cfi->interleave * cfi->device_type;
2105 reg_prot_offset *= cfi->interleave * cfi->device_type;
2106 reg_fact_size *= cfi->interleave;
2107 reg_user_size *= cfi->interleave;
2109 if (user_regs) {
2110 groups = reg_user_groups;
2111 groupsize = reg_user_size;
2112 /* skip over factory reg area */
2113 groupno = reg_fact_groups;
2114 data_offset += reg_fact_groups * reg_fact_size;
2115 } else {
2116 groups = reg_fact_groups;
2117 groupsize = reg_fact_size;
2118 groupno = 0;
2121 while (len > 0 && groups > 0) {
2122 if (!action) {
2124 * Special case: if action is NULL
2125 * we fill buf with otp_info records.
2127 struct otp_info *otpinfo;
2128 map_word lockword;
2129 len -= sizeof(struct otp_info);
2130 if (len <= 0)
2131 return -ENOSPC;
2132 ret = do_otp_read(map, chip,
2133 reg_prot_offset,
2134 (u_char *)&lockword,
2135 map_bankwidth(map),
2136 0, 0, 0);
2137 if (ret)
2138 return ret;
2139 otpinfo = (struct otp_info *)buf;
2140 otpinfo->start = from;
2141 otpinfo->length = groupsize;
2142 otpinfo->locked =
2143 !map_word_bitsset(map, lockword,
2144 CMD(1 << groupno));
2145 from += groupsize;
2146 buf += sizeof(*otpinfo);
2147 *retlen += sizeof(*otpinfo);
2148 } else if (from >= groupsize) {
2149 from -= groupsize;
2150 data_offset += groupsize;
2151 } else {
2152 int size = groupsize;
2153 data_offset += from;
2154 size -= from;
2155 from = 0;
2156 if (size > len)
2157 size = len;
2158 ret = action(map, chip, data_offset,
2159 buf, size, reg_prot_offset,
2160 groupno, groupsize);
2161 if (ret < 0)
2162 return ret;
2163 buf += size;
2164 len -= size;
2165 *retlen += size;
2166 data_offset += size;
2168 groupno++;
2169 groups--;
2172 /* next OTP region */
2173 if (++field == extp->NumProtectionFields)
2174 break;
2175 reg_prot_offset = otp->ProtRegAddr;
2176 reg_fact_groups = otp->FactGroups;
2177 reg_fact_size = 1 << otp->FactProtRegSize;
2178 reg_user_groups = otp->UserGroups;
2179 reg_user_size = 1 << otp->UserProtRegSize;
2180 otp++;
2184 return 0;
2187 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2188 size_t len, size_t *retlen,
2189 u_char *buf)
2191 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2192 buf, do_otp_read, 0);
2195 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2196 size_t len, size_t *retlen,
2197 u_char *buf)
2199 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2200 buf, do_otp_read, 1);
2203 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2204 size_t len, size_t *retlen,
2205 u_char *buf)
2207 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2208 buf, do_otp_write, 1);
2211 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2212 loff_t from, size_t len)
2214 size_t retlen;
2215 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2216 NULL, do_otp_lock, 1);
2219 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2220 struct otp_info *buf, size_t len)
2222 size_t retlen;
2223 int ret;
2225 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2226 return ret ? : retlen;
2229 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2230 struct otp_info *buf, size_t len)
2232 size_t retlen;
2233 int ret;
2235 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2236 return ret ? : retlen;
2239 #endif
2241 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2243 struct mtd_erase_region_info *region;
2244 int block, status, i;
2245 unsigned long adr;
2246 size_t len;
2248 for (i = 0; i < mtd->numeraseregions; i++) {
2249 region = &mtd->eraseregions[i];
2250 if (!region->lockmap)
2251 continue;
2253 for (block = 0; block < region->numblocks; block++){
2254 len = region->erasesize;
2255 adr = region->offset + block * len;
2257 status = cfi_varsize_frob(mtd,
2258 do_getlockstatus_oneblock, adr, len, 0);
2259 if (status)
2260 set_bit(block, region->lockmap);
2261 else
2262 clear_bit(block, region->lockmap);
2267 static int cfi_intelext_suspend(struct mtd_info *mtd)
2269 struct map_info *map = mtd->priv;
2270 struct cfi_private *cfi = map->fldrv_priv;
2271 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2272 int i;
2273 struct flchip *chip;
2274 int ret = 0;
2276 if ((mtd->flags & MTD_STUPID_LOCK)
2277 && extp && (extp->FeatureSupport & (1 << 5)))
2278 cfi_intelext_save_locks(mtd);
2280 for (i=0; !ret && i<cfi->numchips; i++) {
2281 chip = &cfi->chips[i];
2283 spin_lock(chip->mutex);
2285 switch (chip->state) {
2286 case FL_READY:
2287 case FL_STATUS:
2288 case FL_CFI_QUERY:
2289 case FL_JEDEC_QUERY:
2290 if (chip->oldstate == FL_READY) {
2291 /* place the chip in a known state before suspend */
2292 map_write(map, CMD(0xFF), cfi->chips[i].start);
2293 chip->oldstate = chip->state;
2294 chip->state = FL_PM_SUSPENDED;
2295 /* No need to wake_up() on this state change -
2296 * as the whole point is that nobody can do anything
2297 * with the chip now anyway.
2299 } else {
2300 /* There seems to be an operation pending. We must wait for it. */
2301 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2302 ret = -EAGAIN;
2304 break;
2305 default:
2306 /* Should we actually wait? Once upon a time these routines weren't
2307 allowed to. Or should we return -EAGAIN, because the upper layers
2308 ought to have already shut down anything which was using the device
2309 anyway? The latter for now. */
2310 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2311 ret = -EAGAIN;
2312 case FL_PM_SUSPENDED:
2313 break;
2315 spin_unlock(chip->mutex);
2318 /* Unlock the chips again */
2320 if (ret) {
2321 for (i--; i >=0; i--) {
2322 chip = &cfi->chips[i];
2324 spin_lock(chip->mutex);
2326 if (chip->state == FL_PM_SUSPENDED) {
2327 /* No need to force it into a known state here,
2328 because we're returning failure, and it didn't
2329 get power cycled */
2330 chip->state = chip->oldstate;
2331 chip->oldstate = FL_READY;
2332 wake_up(&chip->wq);
2334 spin_unlock(chip->mutex);
2338 return ret;
2341 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2343 struct mtd_erase_region_info *region;
2344 int block, i;
2345 unsigned long adr;
2346 size_t len;
2348 for (i = 0; i < mtd->numeraseregions; i++) {
2349 region = &mtd->eraseregions[i];
2350 if (!region->lockmap)
2351 continue;
2353 for (block = 0; block < region->numblocks; block++) {
2354 len = region->erasesize;
2355 adr = region->offset + block * len;
2357 if (!test_bit(block, region->lockmap))
2358 cfi_intelext_unlock(mtd, adr, len);
2363 static void cfi_intelext_resume(struct mtd_info *mtd)
2365 struct map_info *map = mtd->priv;
2366 struct cfi_private *cfi = map->fldrv_priv;
2367 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2368 int i;
2369 struct flchip *chip;
2371 for (i=0; i<cfi->numchips; i++) {
2373 chip = &cfi->chips[i];
2375 spin_lock(chip->mutex);
2377 /* Go to known state. Chip may have been power cycled */
2378 if (chip->state == FL_PM_SUSPENDED) {
2379 map_write(map, CMD(0xFF), cfi->chips[i].start);
2380 chip->oldstate = chip->state = FL_READY;
2381 wake_up(&chip->wq);
2384 spin_unlock(chip->mutex);
2387 if ((mtd->flags & MTD_STUPID_LOCK)
2388 && extp && (extp->FeatureSupport & (1 << 5)))
2389 cfi_intelext_restore_locks(mtd);
2392 static int cfi_intelext_reset(struct mtd_info *mtd)
2394 struct map_info *map = mtd->priv;
2395 struct cfi_private *cfi = map->fldrv_priv;
2396 int i, ret;
2398 for (i=0; i < cfi->numchips; i++) {
2399 struct flchip *chip = &cfi->chips[i];
2401 /* force the completion of any ongoing operation
2402 and switch to array mode so any bootloader in
2403 flash is accessible for soft reboot. */
2404 spin_lock(chip->mutex);
2405 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2406 if (!ret) {
2407 map_write(map, CMD(0xff), chip->start);
2408 chip->state = FL_READY;
2410 spin_unlock(chip->mutex);
2413 return 0;
2416 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2417 void *v)
2419 struct mtd_info *mtd;
2421 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2422 cfi_intelext_reset(mtd);
2423 return NOTIFY_DONE;
2426 static void cfi_intelext_destroy(struct mtd_info *mtd)
2428 struct map_info *map = mtd->priv;
2429 struct cfi_private *cfi = map->fldrv_priv;
2430 struct mtd_erase_region_info *region;
2431 int i;
2432 cfi_intelext_reset(mtd);
2433 unregister_reboot_notifier(&mtd->reboot_notifier);
2434 kfree(cfi->cmdset_priv);
2435 kfree(cfi->cfiq);
2436 kfree(cfi->chips[0].priv);
2437 kfree(cfi);
2438 for (i = 0; i < mtd->numeraseregions; i++) {
2439 region = &mtd->eraseregions[i];
2440 if (region->lockmap)
2441 kfree(region->lockmap);
2443 kfree(mtd->eraseregions);
2446 MODULE_LICENSE("GPL");
2447 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2448 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2449 MODULE_ALIAS("cfi_cmdset_0003");
2450 MODULE_ALIAS("cfi_cmdset_0200");