hwmon: (w83795) Delay reading limit registers
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / mtd / mtdpart.c
blobdc65585688765c1f7e6e975b8c3c1fa8bc53429f
1 /*
2 * Simple MTD partitioning layer
4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/kmod.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/partitions.h>
33 /* Our partition linked list */
34 static LIST_HEAD(mtd_partitions);
36 /* Our partition node structure */
37 struct mtd_part {
38 struct mtd_info mtd;
39 struct mtd_info *master;
40 uint64_t offset;
41 struct list_head list;
45 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
46 * the pointer to that structure with this macro.
48 #define PART(x) ((struct mtd_part *)(x))
52 * MTD methods which simply translate the effective address and pass through
53 * to the _real_ device.
56 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
57 size_t *retlen, u_char *buf)
59 struct mtd_part *part = PART(mtd);
60 struct mtd_ecc_stats stats;
61 int res;
63 stats = part->master->ecc_stats;
65 if (from >= mtd->size)
66 len = 0;
67 else if (from + len > mtd->size)
68 len = mtd->size - from;
69 res = part->master->read(part->master, from + part->offset,
70 len, retlen, buf);
71 if (unlikely(res)) {
72 if (res == -EUCLEAN)
73 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
74 if (res == -EBADMSG)
75 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
77 return res;
80 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, void **virt, resource_size_t *phys)
83 struct mtd_part *part = PART(mtd);
84 if (from >= mtd->size)
85 len = 0;
86 else if (from + len > mtd->size)
87 len = mtd->size - from;
88 return part->master->point (part->master, from + part->offset,
89 len, retlen, virt, phys);
92 static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
94 struct mtd_part *part = PART(mtd);
96 part->master->unpoint(part->master, from + part->offset, len);
99 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
100 unsigned long len,
101 unsigned long offset,
102 unsigned long flags)
104 struct mtd_part *part = PART(mtd);
106 offset += part->offset;
107 return part->master->get_unmapped_area(part->master, len, offset,
108 flags);
111 static int part_read_oob(struct mtd_info *mtd, loff_t from,
112 struct mtd_oob_ops *ops)
114 struct mtd_part *part = PART(mtd);
115 int res;
117 if (from >= mtd->size)
118 return -EINVAL;
119 if (ops->datbuf && from + ops->len > mtd->size)
120 return -EINVAL;
121 res = part->master->read_oob(part->master, from + part->offset, ops);
123 if (unlikely(res)) {
124 if (res == -EUCLEAN)
125 mtd->ecc_stats.corrected++;
126 if (res == -EBADMSG)
127 mtd->ecc_stats.failed++;
129 return res;
132 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
133 size_t len, size_t *retlen, u_char *buf)
135 struct mtd_part *part = PART(mtd);
136 return part->master->read_user_prot_reg(part->master, from,
137 len, retlen, buf);
140 static int part_get_user_prot_info(struct mtd_info *mtd,
141 struct otp_info *buf, size_t len)
143 struct mtd_part *part = PART(mtd);
144 return part->master->get_user_prot_info(part->master, buf, len);
147 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
148 size_t len, size_t *retlen, u_char *buf)
150 struct mtd_part *part = PART(mtd);
151 return part->master->read_fact_prot_reg(part->master, from,
152 len, retlen, buf);
155 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
156 size_t len)
158 struct mtd_part *part = PART(mtd);
159 return part->master->get_fact_prot_info(part->master, buf, len);
162 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
163 size_t *retlen, const u_char *buf)
165 struct mtd_part *part = PART(mtd);
166 if (!(mtd->flags & MTD_WRITEABLE))
167 return -EROFS;
168 if (to >= mtd->size)
169 len = 0;
170 else if (to + len > mtd->size)
171 len = mtd->size - to;
172 return part->master->write(part->master, to + part->offset,
173 len, retlen, buf);
176 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
177 size_t *retlen, const u_char *buf)
179 struct mtd_part *part = PART(mtd);
180 if (!(mtd->flags & MTD_WRITEABLE))
181 return -EROFS;
182 if (to >= mtd->size)
183 len = 0;
184 else if (to + len > mtd->size)
185 len = mtd->size - to;
186 return part->master->panic_write(part->master, to + part->offset,
187 len, retlen, buf);
190 static int part_write_oob(struct mtd_info *mtd, loff_t to,
191 struct mtd_oob_ops *ops)
193 struct mtd_part *part = PART(mtd);
195 if (!(mtd->flags & MTD_WRITEABLE))
196 return -EROFS;
198 if (to >= mtd->size)
199 return -EINVAL;
200 if (ops->datbuf && to + ops->len > mtd->size)
201 return -EINVAL;
202 return part->master->write_oob(part->master, to + part->offset, ops);
205 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
206 size_t len, size_t *retlen, u_char *buf)
208 struct mtd_part *part = PART(mtd);
209 return part->master->write_user_prot_reg(part->master, from,
210 len, retlen, buf);
213 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
214 size_t len)
216 struct mtd_part *part = PART(mtd);
217 return part->master->lock_user_prot_reg(part->master, from, len);
220 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
221 unsigned long count, loff_t to, size_t *retlen)
223 struct mtd_part *part = PART(mtd);
224 if (!(mtd->flags & MTD_WRITEABLE))
225 return -EROFS;
226 return part->master->writev(part->master, vecs, count,
227 to + part->offset, retlen);
230 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
232 struct mtd_part *part = PART(mtd);
233 int ret;
234 if (!(mtd->flags & MTD_WRITEABLE))
235 return -EROFS;
236 if (instr->addr >= mtd->size)
237 return -EINVAL;
238 instr->addr += part->offset;
239 ret = part->master->erase(part->master, instr);
240 if (ret) {
241 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
242 instr->fail_addr -= part->offset;
243 instr->addr -= part->offset;
245 return ret;
248 void mtd_erase_callback(struct erase_info *instr)
250 if (instr->mtd->erase == part_erase) {
251 struct mtd_part *part = PART(instr->mtd);
253 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
254 instr->fail_addr -= part->offset;
255 instr->addr -= part->offset;
257 if (instr->callback)
258 instr->callback(instr);
260 EXPORT_SYMBOL_GPL(mtd_erase_callback);
262 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
264 struct mtd_part *part = PART(mtd);
265 if ((len + ofs) > mtd->size)
266 return -EINVAL;
267 return part->master->lock(part->master, ofs + part->offset, len);
270 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
272 struct mtd_part *part = PART(mtd);
273 if ((len + ofs) > mtd->size)
274 return -EINVAL;
275 return part->master->unlock(part->master, ofs + part->offset, len);
278 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
280 struct mtd_part *part = PART(mtd);
281 if ((len + ofs) > mtd->size)
282 return -EINVAL;
283 return part->master->is_locked(part->master, ofs + part->offset, len);
286 static void part_sync(struct mtd_info *mtd)
288 struct mtd_part *part = PART(mtd);
289 part->master->sync(part->master);
292 static int part_suspend(struct mtd_info *mtd)
294 struct mtd_part *part = PART(mtd);
295 return part->master->suspend(part->master);
298 static void part_resume(struct mtd_info *mtd)
300 struct mtd_part *part = PART(mtd);
301 part->master->resume(part->master);
304 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
306 struct mtd_part *part = PART(mtd);
307 if (ofs >= mtd->size)
308 return -EINVAL;
309 ofs += part->offset;
310 return part->master->block_isbad(part->master, ofs);
313 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
315 struct mtd_part *part = PART(mtd);
316 int res;
318 if (!(mtd->flags & MTD_WRITEABLE))
319 return -EROFS;
320 if (ofs >= mtd->size)
321 return -EINVAL;
322 ofs += part->offset;
323 res = part->master->block_markbad(part->master, ofs);
324 if (!res)
325 mtd->ecc_stats.badblocks++;
326 return res;
330 * This function unregisters and destroy all slave MTD objects which are
331 * attached to the given master MTD object.
334 int del_mtd_partitions(struct mtd_info *master)
336 struct mtd_part *slave, *next;
338 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
339 if (slave->master == master) {
340 list_del(&slave->list);
341 del_mtd_device(&slave->mtd);
342 kfree(slave);
345 return 0;
347 EXPORT_SYMBOL(del_mtd_partitions);
349 static struct mtd_part *add_one_partition(struct mtd_info *master,
350 const struct mtd_partition *part, int partno,
351 uint64_t cur_offset)
353 struct mtd_part *slave;
355 /* allocate the partition structure */
356 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
357 if (!slave) {
358 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
359 master->name);
360 del_mtd_partitions(master);
361 return NULL;
363 list_add(&slave->list, &mtd_partitions);
365 /* set up the MTD object for this partition */
366 slave->mtd.type = master->type;
367 slave->mtd.flags = master->flags & ~part->mask_flags;
368 slave->mtd.size = part->size;
369 slave->mtd.writesize = master->writesize;
370 slave->mtd.oobsize = master->oobsize;
371 slave->mtd.oobavail = master->oobavail;
372 slave->mtd.subpage_sft = master->subpage_sft;
374 slave->mtd.name = part->name;
375 slave->mtd.owner = master->owner;
376 slave->mtd.backing_dev_info = master->backing_dev_info;
378 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
379 * to have the same data be in two different partitions.
381 slave->mtd.dev.parent = master->dev.parent;
383 slave->mtd.read = part_read;
384 slave->mtd.write = part_write;
386 if (master->panic_write)
387 slave->mtd.panic_write = part_panic_write;
389 if (master->point && master->unpoint) {
390 slave->mtd.point = part_point;
391 slave->mtd.unpoint = part_unpoint;
394 if (master->get_unmapped_area)
395 slave->mtd.get_unmapped_area = part_get_unmapped_area;
396 if (master->read_oob)
397 slave->mtd.read_oob = part_read_oob;
398 if (master->write_oob)
399 slave->mtd.write_oob = part_write_oob;
400 if (master->read_user_prot_reg)
401 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
402 if (master->read_fact_prot_reg)
403 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
404 if (master->write_user_prot_reg)
405 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
406 if (master->lock_user_prot_reg)
407 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
408 if (master->get_user_prot_info)
409 slave->mtd.get_user_prot_info = part_get_user_prot_info;
410 if (master->get_fact_prot_info)
411 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
412 if (master->sync)
413 slave->mtd.sync = part_sync;
414 if (!partno && !master->dev.class && master->suspend && master->resume) {
415 slave->mtd.suspend = part_suspend;
416 slave->mtd.resume = part_resume;
418 if (master->writev)
419 slave->mtd.writev = part_writev;
420 if (master->lock)
421 slave->mtd.lock = part_lock;
422 if (master->unlock)
423 slave->mtd.unlock = part_unlock;
424 if (master->is_locked)
425 slave->mtd.is_locked = part_is_locked;
426 if (master->block_isbad)
427 slave->mtd.block_isbad = part_block_isbad;
428 if (master->block_markbad)
429 slave->mtd.block_markbad = part_block_markbad;
430 slave->mtd.erase = part_erase;
431 slave->master = master;
432 slave->offset = part->offset;
434 if (slave->offset == MTDPART_OFS_APPEND)
435 slave->offset = cur_offset;
436 if (slave->offset == MTDPART_OFS_NXTBLK) {
437 slave->offset = cur_offset;
438 if (mtd_mod_by_eb(cur_offset, master) != 0) {
439 /* Round up to next erasesize */
440 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
441 printk(KERN_NOTICE "Moving partition %d: "
442 "0x%012llx -> 0x%012llx\n", partno,
443 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
446 if (slave->mtd.size == MTDPART_SIZ_FULL)
447 slave->mtd.size = master->size - slave->offset;
449 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
450 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
452 /* let's do some sanity checks */
453 if (slave->offset >= master->size) {
454 /* let's register it anyway to preserve ordering */
455 slave->offset = 0;
456 slave->mtd.size = 0;
457 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
458 part->name);
459 goto out_register;
461 if (slave->offset + slave->mtd.size > master->size) {
462 slave->mtd.size = master->size - slave->offset;
463 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
464 part->name, master->name, (unsigned long long)slave->mtd.size);
466 if (master->numeraseregions > 1) {
467 /* Deal with variable erase size stuff */
468 int i, max = master->numeraseregions;
469 u64 end = slave->offset + slave->mtd.size;
470 struct mtd_erase_region_info *regions = master->eraseregions;
472 /* Find the first erase regions which is part of this
473 * partition. */
474 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
476 /* The loop searched for the region _behind_ the first one */
477 if (i > 0)
478 i--;
480 /* Pick biggest erasesize */
481 for (; i < max && regions[i].offset < end; i++) {
482 if (slave->mtd.erasesize < regions[i].erasesize) {
483 slave->mtd.erasesize = regions[i].erasesize;
486 BUG_ON(slave->mtd.erasesize == 0);
487 } else {
488 /* Single erase size */
489 slave->mtd.erasesize = master->erasesize;
492 if ((slave->mtd.flags & MTD_WRITEABLE) &&
493 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
494 /* Doesn't start on a boundary of major erase size */
495 /* FIXME: Let it be writable if it is on a boundary of
496 * _minor_ erase size though */
497 slave->mtd.flags &= ~MTD_WRITEABLE;
498 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
499 part->name);
501 if ((slave->mtd.flags & MTD_WRITEABLE) &&
502 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
503 slave->mtd.flags &= ~MTD_WRITEABLE;
504 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
505 part->name);
508 slave->mtd.ecclayout = master->ecclayout;
509 if (master->block_isbad) {
510 uint64_t offs = 0;
512 while (offs < slave->mtd.size) {
513 if (master->block_isbad(master,
514 offs + slave->offset))
515 slave->mtd.ecc_stats.badblocks++;
516 offs += slave->mtd.erasesize;
520 out_register:
521 /* register our partition */
522 add_mtd_device(&slave->mtd);
524 return slave;
528 * This function, given a master MTD object and a partition table, creates
529 * and registers slave MTD objects which are bound to the master according to
530 * the partition definitions.
532 * We don't register the master, or expect the caller to have done so,
533 * for reasons of data integrity.
536 int add_mtd_partitions(struct mtd_info *master,
537 const struct mtd_partition *parts,
538 int nbparts)
540 struct mtd_part *slave;
541 uint64_t cur_offset = 0;
542 int i;
544 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
546 for (i = 0; i < nbparts; i++) {
547 slave = add_one_partition(master, parts + i, i, cur_offset);
548 if (!slave)
549 return -ENOMEM;
550 cur_offset = slave->offset + slave->mtd.size;
553 return 0;
555 EXPORT_SYMBOL(add_mtd_partitions);
557 static DEFINE_SPINLOCK(part_parser_lock);
558 static LIST_HEAD(part_parsers);
560 static struct mtd_part_parser *get_partition_parser(const char *name)
562 struct mtd_part_parser *p, *ret = NULL;
564 spin_lock(&part_parser_lock);
566 list_for_each_entry(p, &part_parsers, list)
567 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
568 ret = p;
569 break;
572 spin_unlock(&part_parser_lock);
574 return ret;
577 int register_mtd_parser(struct mtd_part_parser *p)
579 spin_lock(&part_parser_lock);
580 list_add(&p->list, &part_parsers);
581 spin_unlock(&part_parser_lock);
583 return 0;
585 EXPORT_SYMBOL_GPL(register_mtd_parser);
587 int deregister_mtd_parser(struct mtd_part_parser *p)
589 spin_lock(&part_parser_lock);
590 list_del(&p->list);
591 spin_unlock(&part_parser_lock);
592 return 0;
594 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
596 int parse_mtd_partitions(struct mtd_info *master, const char **types,
597 struct mtd_partition **pparts, unsigned long origin)
599 struct mtd_part_parser *parser;
600 int ret = 0;
602 for ( ; ret <= 0 && *types; types++) {
603 parser = get_partition_parser(*types);
604 if (!parser && !request_module("%s", *types))
605 parser = get_partition_parser(*types);
606 if (!parser) {
607 printk(KERN_NOTICE "%s partition parsing not available\n",
608 *types);
609 continue;
611 ret = (*parser->parse_fn)(master, pparts, origin);
612 if (ret > 0) {
613 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
614 ret, parser->name, master->name);
616 put_partition_parser(parser);
618 return ret;
620 EXPORT_SYMBOL_GPL(parse_mtd_partitions);