JFFS for WNR3500Lv2
[tomato.git] / release / src-rt / linux / linux-2.6 / drivers / mtd / mtdpart.c
blob21608fc54ba0216a0f8852a4e5c992df5f25087e
1 /*
2 * Simple MTD partitioning layer
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
6 * This code is GPL
8 * $Id: mtdpart.c,v 1.55 2005/11/07 11:14:20 gleixner Exp $
10 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
11 * added support for read_oob, write_oob
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/kmod.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/compatmac.h>
23 #include <asm/div64.h>
25 #define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
27 /* Our partition linked list */
28 static LIST_HEAD(mtd_partitions);
30 /* Our partition node structure */
31 struct mtd_part {
32 struct mtd_info mtd;
33 struct mtd_info *master;
34 u_int32_t offset;
35 int index;
36 struct list_head list;
37 int registered;
41 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
42 * the pointer to that structure with this macro.
44 #define PART(x) ((struct mtd_part *)(x))
48 * MTD methods which simply translate the effective address and pass through
49 * to the _real_ device.
52 static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
53 size_t *retlen, u_char *buf)
55 struct mtd_part *part = PART(mtd);
56 int res;
58 if (from >= mtd->size)
59 len = 0;
60 else if (from + len > mtd->size)
61 len = mtd->size - from;
62 res = part->master->read (part->master, from + part->offset,
63 len, retlen, buf);
64 if (unlikely(res)) {
65 if (res == -EUCLEAN)
66 mtd->ecc_stats.corrected++;
67 if (res == -EBADMSG)
68 mtd->ecc_stats.failed++;
70 return res;
73 static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
74 size_t *retlen, u_char **buf)
76 struct mtd_part *part = PART(mtd);
77 if (from >= mtd->size)
78 len = 0;
79 else if (from + len > mtd->size)
80 len = mtd->size - from;
81 return part->master->point (part->master, from + part->offset,
82 len, retlen, buf);
85 static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
87 struct mtd_part *part = PART(mtd);
89 part->master->unpoint (part->master, addr, from + part->offset, len);
92 static int part_read_oob(struct mtd_info *mtd, loff_t from,
93 struct mtd_oob_ops *ops)
95 struct mtd_part *part = PART(mtd);
96 int res;
98 if (from >= mtd->size)
99 return -EINVAL;
100 if (ops->datbuf && from + ops->len > mtd->size)
101 return -EINVAL;
102 res = part->master->read_oob(part->master, from + part->offset, ops);
104 if (unlikely(res)) {
105 if (res == -EUCLEAN)
106 mtd->ecc_stats.corrected++;
107 if (res == -EBADMSG)
108 mtd->ecc_stats.failed++;
110 return res;
113 static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
114 size_t *retlen, u_char *buf)
116 struct mtd_part *part = PART(mtd);
117 return part->master->read_user_prot_reg (part->master, from,
118 len, retlen, buf);
121 static int part_get_user_prot_info (struct mtd_info *mtd,
122 struct otp_info *buf, size_t len)
124 struct mtd_part *part = PART(mtd);
125 return part->master->get_user_prot_info (part->master, buf, len);
128 static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
129 size_t *retlen, u_char *buf)
131 struct mtd_part *part = PART(mtd);
132 return part->master->read_fact_prot_reg (part->master, from,
133 len, retlen, buf);
136 static int part_get_fact_prot_info (struct mtd_info *mtd,
137 struct otp_info *buf, size_t len)
139 struct mtd_part *part = PART(mtd);
140 return part->master->get_fact_prot_info (part->master, buf, len);
143 static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
144 size_t *retlen, const u_char *buf)
146 struct mtd_part *part = PART(mtd);
147 if (!(mtd->flags & MTD_WRITEABLE))
148 return -EROFS;
149 if (to >= mtd->size)
150 len = 0;
151 else if (to + len > mtd->size)
152 len = mtd->size - to;
153 return part->master->write (part->master, to + part->offset,
154 len, retlen, buf);
157 static int part_write_oob(struct mtd_info *mtd, loff_t to,
158 struct mtd_oob_ops *ops)
160 struct mtd_part *part = PART(mtd);
162 if (!(mtd->flags & MTD_WRITEABLE))
163 return -EROFS;
165 if (to >= mtd->size)
166 return -EINVAL;
167 if (ops->datbuf && to + ops->len > mtd->size)
168 return -EINVAL;
169 return part->master->write_oob(part->master, to + part->offset, ops);
172 static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
173 size_t *retlen, u_char *buf)
175 struct mtd_part *part = PART(mtd);
176 return part->master->write_user_prot_reg (part->master, from,
177 len, retlen, buf);
180 static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
182 struct mtd_part *part = PART(mtd);
183 return part->master->lock_user_prot_reg (part->master, from, len);
186 static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
187 unsigned long count, loff_t to, size_t *retlen)
189 struct mtd_part *part = PART(mtd);
190 if (!(mtd->flags & MTD_WRITEABLE))
191 return -EROFS;
192 return part->master->writev (part->master, vecs, count,
193 to + part->offset, retlen);
196 static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
198 struct mtd_part *part = PART(mtd);
199 int ret;
200 if (!(mtd->flags & MTD_WRITEABLE))
201 return -EROFS;
202 if (instr->addr >= mtd->size)
203 return -EINVAL;
205 instr->partial_start = false;
206 if (mtd->flags & MTD_ERASE_PARTIAL) {
207 size_t readlen = 0;
208 u64 mtd_ofs;
210 instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
211 if (!instr->erase_buf)
212 return -ENOMEM;
214 mtd_ofs = part->offset + instr->addr;
215 instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
217 if (instr->erase_buf_ofs > 0) {
218 instr->addr -= instr->erase_buf_ofs;
219 ret = part->master->read(part->master,
220 instr->addr + part->offset,
221 part->master->erasesize,
222 &readlen, instr->erase_buf);
224 instr->partial_start = true;
225 } else {
226 mtd_ofs = part->offset + part->mtd.size;
227 instr->erase_buf_ofs = part->master->erasesize -
228 do_div(mtd_ofs, part->master->erasesize);
230 if (instr->erase_buf_ofs > 0) {
231 instr->len += instr->erase_buf_ofs;
232 ret = part->master->read(part->master,
233 part->offset + instr->addr +
234 instr->len - part->master->erasesize,
235 part->master->erasesize, &readlen,
236 instr->erase_buf);
237 } else {
238 ret = 0;
241 if (ret < 0) {
242 kfree(instr->erase_buf);
243 return ret;
248 instr->addr += part->offset;
249 ret = part->master->erase(part->master, instr);
250 if (ret) {
251 if (instr->fail_addr != 0xffffffff)
252 instr->fail_addr -= part->offset;
253 instr->addr -= part->offset;
254 if (mtd->flags & MTD_ERASE_PARTIAL)
255 kfree(instr->erase_buf);
258 return ret;
261 void mtd_erase_callback(struct erase_info *instr)
263 if (instr->mtd->erase == part_erase) {
264 struct mtd_part *part = PART(instr->mtd);
265 size_t wrlen = 0;
267 if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
268 if (instr->partial_start) {
269 part->master->write(part->master,
270 instr->addr, instr->erase_buf_ofs,
271 &wrlen, instr->erase_buf);
272 instr->addr += instr->erase_buf_ofs;
273 } else {
274 instr->len -= instr->erase_buf_ofs;
275 part->master->write(part->master,
276 instr->addr + instr->len,
277 instr->erase_buf_ofs, &wrlen,
278 instr->erase_buf +
279 part->master->erasesize -
280 instr->erase_buf_ofs);
282 kfree(instr->erase_buf);
284 if (instr->fail_addr != 0xffffffff)
285 instr->fail_addr -= part->offset;
286 instr->addr -= part->offset;
288 if (instr->callback)
289 instr->callback(instr);
291 EXPORT_SYMBOL_GPL(mtd_erase_callback);
293 static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
295 struct mtd_part *part = PART(mtd);
296 if ((len + ofs) > mtd->size)
297 return -EINVAL;
298 return part->master->lock(part->master, ofs + part->offset, len);
301 static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
303 struct mtd_part *part = PART(mtd);
304 if ((len + ofs) > mtd->size)
305 return -EINVAL;
306 return part->master->unlock(part->master, ofs + part->offset, len);
309 static void part_sync(struct mtd_info *mtd)
311 struct mtd_part *part = PART(mtd);
312 part->master->sync(part->master);
315 static int part_suspend(struct mtd_info *mtd)
317 struct mtd_part *part = PART(mtd);
318 return part->master->suspend(part->master);
321 static void part_resume(struct mtd_info *mtd)
323 struct mtd_part *part = PART(mtd);
324 part->master->resume(part->master);
327 static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
329 struct mtd_part *part = PART(mtd);
330 if (ofs >= mtd->size)
331 return -EINVAL;
332 ofs += part->offset;
333 return part->master->block_isbad(part->master, ofs);
336 static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
338 struct mtd_part *part = PART(mtd);
339 int res;
341 if (!(mtd->flags & MTD_WRITEABLE))
342 return -EROFS;
343 if (ofs >= mtd->size)
344 return -EINVAL;
345 ofs += part->offset;
346 res = part->master->block_markbad(part->master, ofs);
347 if (!res)
348 mtd->ecc_stats.badblocks++;
349 return res;
353 * This function unregisters and destroy all slave MTD objects which are
354 * attached to the given master MTD object.
357 int del_mtd_partitions(struct mtd_info *master)
359 struct mtd_part *slave, *next;
361 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
362 if (slave->master == master) {
363 list_del(&slave->list);
364 if(slave->registered)
365 del_mtd_device(&slave->mtd);
366 kfree(slave);
369 return 0;
373 * This function, given a master MTD object and a partition table, creates
374 * and registers slave MTD objects which are bound to the master according to
375 * the partition definitions.
376 * (Q: should we register the master MTD object as well?)
379 int add_mtd_partitions(struct mtd_info *master,
380 const struct mtd_partition *parts,
381 int nbparts)
383 struct mtd_part *slave;
384 u_int32_t cur_offset = 0;
385 int i;
387 printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
389 for (i = 0; i < nbparts; i++) {
391 /* allocate the partition structure */
392 slave = kzalloc (sizeof(*slave), GFP_KERNEL);
393 if (!slave) {
394 printk ("memory allocation error while creating partitions for \"%s\"\n",
395 master->name);
396 del_mtd_partitions(master);
397 return -ENOMEM;
399 list_add(&slave->list, &mtd_partitions);
401 /* set up the MTD object for this partition */
402 slave->mtd.type = master->type;
403 slave->mtd.flags = master->flags & ~parts[i].mask_flags;
404 slave->mtd.size = parts[i].size;
405 slave->mtd.writesize = master->writesize;
406 slave->mtd.oobsize = master->oobsize;
407 slave->mtd.oobavail = master->oobavail;
408 slave->mtd.subpage_sft = master->subpage_sft;
410 slave->mtd.name = parts[i].name;
411 slave->mtd.owner = master->owner;
413 slave->mtd.read = part_read;
414 slave->mtd.write = part_write;
416 if(master->point && master->unpoint){
417 slave->mtd.point = part_point;
418 slave->mtd.unpoint = part_unpoint;
421 if (master->read_oob)
422 slave->mtd.read_oob = part_read_oob;
423 if (master->write_oob)
424 slave->mtd.write_oob = part_write_oob;
425 if(master->read_user_prot_reg)
426 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
427 if(master->read_fact_prot_reg)
428 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
429 if(master->write_user_prot_reg)
430 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
431 if(master->lock_user_prot_reg)
432 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
433 if(master->get_user_prot_info)
434 slave->mtd.get_user_prot_info = part_get_user_prot_info;
435 if(master->get_fact_prot_info)
436 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
437 if (master->sync)
438 slave->mtd.sync = part_sync;
439 if (!i && master->suspend && master->resume) {
440 slave->mtd.suspend = part_suspend;
441 slave->mtd.resume = part_resume;
443 if (master->writev)
444 slave->mtd.writev = part_writev;
445 if (master->lock)
446 slave->mtd.lock = part_lock;
447 if (master->unlock)
448 slave->mtd.unlock = part_unlock;
449 if (master->block_isbad)
450 slave->mtd.block_isbad = part_block_isbad;
451 if (master->block_markbad)
452 slave->mtd.block_markbad = part_block_markbad;
453 slave->mtd.erase = part_erase;
454 slave->master = master;
455 slave->offset = parts[i].offset;
456 slave->index = i;
458 if (slave->offset == MTDPART_OFS_APPEND)
459 slave->offset = cur_offset;
460 if (slave->offset == MTDPART_OFS_NXTBLK) {
461 slave->offset = cur_offset;
462 if ((cur_offset % master->erasesize) != 0) {
463 /* Round up to next erasesize */
464 slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
465 printk(KERN_NOTICE "Moving partition %d: "
466 "0x%08x -> 0x%08x\n", i,
467 cur_offset, slave->offset);
470 if (slave->mtd.size == MTDPART_SIZ_FULL)
471 slave->mtd.size = master->size - slave->offset;
472 cur_offset = slave->offset + slave->mtd.size;
474 printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
475 slave->offset + slave->mtd.size, slave->mtd.name);
477 /* let's do some sanity checks */
478 if (slave->offset >= master->size) {
479 /* let's register it anyway to preserve ordering */
480 slave->offset = 0;
481 slave->mtd.size = 0;
482 printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
483 parts[i].name);
485 if (slave->offset + slave->mtd.size > master->size) {
486 slave->mtd.size = master->size - slave->offset;
487 printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
488 parts[i].name, master->name, slave->mtd.size);
490 if (master->numeraseregions>1) {
491 /* Deal with variable erase size stuff */
492 int i;
493 struct mtd_erase_region_info *regions = master->eraseregions;
495 /* Find the first erase regions which is part of this partition. */
496 for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
499 for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
500 if (slave->mtd.erasesize < regions[i].erasesize) {
501 slave->mtd.erasesize = regions[i].erasesize;
504 } else {
505 /* Single erase size */
506 slave->mtd.erasesize = master->erasesize;
509 if ((slave->mtd.flags & MTD_WRITEABLE) &&
510 ((slave->offset % slave->mtd.erasesize) ||
511 ((slave->offset + slave->mtd.size) % slave->mtd.erasesize))) {
512 /* Doesn't start or end on a boundary of major erase size */
513 slave->mtd.flags |= MTD_ERASE_PARTIAL;
515 if (((u32) slave->mtd.size) > master->erasesize)
516 slave->mtd.flags &= ~MTD_WRITEABLE;
517 else
518 slave->mtd.erasesize = slave->mtd.size;
520 if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
521 printk(KERN_WARNING "mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
522 parts[i].name);
524 slave->mtd.ecclayout = master->ecclayout;
525 if (master->block_isbad) {
526 uint32_t offs = 0;
528 while(offs < slave->mtd.size) {
529 if (master->block_isbad(master,
530 offs + slave->offset))
531 slave->mtd.ecc_stats.badblocks++;
532 offs += slave->mtd.erasesize;
536 if(parts[i].mtdp)
537 { /* store the object pointer (caller may or may not register it */
538 *parts[i].mtdp = &slave->mtd;
539 slave->registered = 0;
541 else
543 /* register our partition */
544 add_mtd_device(&slave->mtd);
545 slave->registered = 1;
549 return 0;
552 EXPORT_SYMBOL(add_mtd_partitions);
553 EXPORT_SYMBOL(del_mtd_partitions);
555 static DEFINE_SPINLOCK(part_parser_lock);
556 static LIST_HEAD(part_parsers);
558 static struct mtd_part_parser *get_partition_parser(const char *name)
560 struct mtd_part_parser *p, *ret = NULL;
562 spin_lock(&part_parser_lock);
564 list_for_each_entry(p, &part_parsers, list)
565 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
566 ret = p;
567 break;
570 spin_unlock(&part_parser_lock);
572 return ret;
575 int register_mtd_parser(struct mtd_part_parser *p)
577 spin_lock(&part_parser_lock);
578 list_add(&p->list, &part_parsers);
579 spin_unlock(&part_parser_lock);
581 return 0;
584 int deregister_mtd_parser(struct mtd_part_parser *p)
586 spin_lock(&part_parser_lock);
587 list_del(&p->list);
588 spin_unlock(&part_parser_lock);
589 return 0;
592 int parse_mtd_partitions(struct mtd_info *master, const char **types,
593 struct mtd_partition **pparts, unsigned long origin)
595 struct mtd_part_parser *parser;
596 int ret = 0;
598 for ( ; ret <= 0 && *types; types++) {
599 parser = get_partition_parser(*types);
600 #ifdef CONFIG_KMOD
601 if (!parser && !request_module("%s", *types))
602 parser = get_partition_parser(*types);
603 #endif
604 if (!parser) {
605 printk(KERN_NOTICE "%s partition parsing not available\n",
606 *types);
607 continue;
609 ret = (*parser->parse_fn)(master, pparts, origin);
610 if (ret > 0) {
611 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
612 ret, parser->name, master->name);
614 put_partition_parser(parser);
616 return ret;
619 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
620 EXPORT_SYMBOL_GPL(register_mtd_parser);
621 EXPORT_SYMBOL_GPL(deregister_mtd_parser);