2 * Simple MTD partitioning layer
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
8 * $Id: mtdpart.c,v 1.55 2005/11/07 11:14:20 gleixner Exp $
10 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
11 * added support for read_oob, write_oob
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/kmod.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/compatmac.h>
23 #include <asm/div64.h>
25 #define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
27 /* Our partition linked list */
28 static LIST_HEAD(mtd_partitions
);
30 /* Our partition node structure */
33 struct mtd_info
*master
;
36 struct list_head list
;
41 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
42 * the pointer to that structure with this macro.
44 #define PART(x) ((struct mtd_part *)(x))
48 * MTD methods which simply translate the effective address and pass through
49 * to the _real_ device.
52 static int part_read (struct mtd_info
*mtd
, loff_t from
, size_t len
,
53 size_t *retlen
, u_char
*buf
)
55 struct mtd_part
*part
= PART(mtd
);
58 if (from
>= mtd
->size
)
60 else if (from
+ len
> mtd
->size
)
61 len
= mtd
->size
- from
;
62 res
= part
->master
->read (part
->master
, from
+ part
->offset
,
66 mtd
->ecc_stats
.corrected
++;
68 mtd
->ecc_stats
.failed
++;
73 static int part_point (struct mtd_info
*mtd
, loff_t from
, size_t len
,
74 size_t *retlen
, u_char
**buf
)
76 struct mtd_part
*part
= PART(mtd
);
77 if (from
>= mtd
->size
)
79 else if (from
+ len
> mtd
->size
)
80 len
= mtd
->size
- from
;
81 return part
->master
->point (part
->master
, from
+ part
->offset
,
85 static void part_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
, size_t len
)
87 struct mtd_part
*part
= PART(mtd
);
89 part
->master
->unpoint (part
->master
, addr
, from
+ part
->offset
, len
);
92 static int part_read_oob(struct mtd_info
*mtd
, loff_t from
,
93 struct mtd_oob_ops
*ops
)
95 struct mtd_part
*part
= PART(mtd
);
98 if (from
>= mtd
->size
)
100 if (ops
->datbuf
&& from
+ ops
->len
> mtd
->size
)
102 res
= part
->master
->read_oob(part
->master
, from
+ part
->offset
, ops
);
106 mtd
->ecc_stats
.corrected
++;
108 mtd
->ecc_stats
.failed
++;
113 static int part_read_user_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
,
114 size_t *retlen
, u_char
*buf
)
116 struct mtd_part
*part
= PART(mtd
);
117 return part
->master
->read_user_prot_reg (part
->master
, from
,
121 static int part_get_user_prot_info (struct mtd_info
*mtd
,
122 struct otp_info
*buf
, size_t len
)
124 struct mtd_part
*part
= PART(mtd
);
125 return part
->master
->get_user_prot_info (part
->master
, buf
, len
);
128 static int part_read_fact_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
,
129 size_t *retlen
, u_char
*buf
)
131 struct mtd_part
*part
= PART(mtd
);
132 return part
->master
->read_fact_prot_reg (part
->master
, from
,
136 static int part_get_fact_prot_info (struct mtd_info
*mtd
,
137 struct otp_info
*buf
, size_t len
)
139 struct mtd_part
*part
= PART(mtd
);
140 return part
->master
->get_fact_prot_info (part
->master
, buf
, len
);
143 static int part_write (struct mtd_info
*mtd
, loff_t to
, size_t len
,
144 size_t *retlen
, const u_char
*buf
)
146 struct mtd_part
*part
= PART(mtd
);
147 if (!(mtd
->flags
& MTD_WRITEABLE
))
151 else if (to
+ len
> mtd
->size
)
152 len
= mtd
->size
- to
;
153 return part
->master
->write (part
->master
, to
+ part
->offset
,
157 static int part_write_oob(struct mtd_info
*mtd
, loff_t to
,
158 struct mtd_oob_ops
*ops
)
160 struct mtd_part
*part
= PART(mtd
);
162 if (!(mtd
->flags
& MTD_WRITEABLE
))
167 if (ops
->datbuf
&& to
+ ops
->len
> mtd
->size
)
169 return part
->master
->write_oob(part
->master
, to
+ part
->offset
, ops
);
172 static int part_write_user_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
,
173 size_t *retlen
, u_char
*buf
)
175 struct mtd_part
*part
= PART(mtd
);
176 return part
->master
->write_user_prot_reg (part
->master
, from
,
180 static int part_lock_user_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
)
182 struct mtd_part
*part
= PART(mtd
);
183 return part
->master
->lock_user_prot_reg (part
->master
, from
, len
);
186 static int part_writev (struct mtd_info
*mtd
, const struct kvec
*vecs
,
187 unsigned long count
, loff_t to
, size_t *retlen
)
189 struct mtd_part
*part
= PART(mtd
);
190 if (!(mtd
->flags
& MTD_WRITEABLE
))
192 return part
->master
->writev (part
->master
, vecs
, count
,
193 to
+ part
->offset
, retlen
);
196 static int part_erase (struct mtd_info
*mtd
, struct erase_info
*instr
)
198 struct mtd_part
*part
= PART(mtd
);
200 if (!(mtd
->flags
& MTD_WRITEABLE
))
202 if (instr
->addr
>= mtd
->size
)
205 instr
->partial_start
= false;
206 if (mtd
->flags
& MTD_ERASE_PARTIAL
) {
210 instr
->erase_buf
= kmalloc(part
->master
->erasesize
, GFP_ATOMIC
);
211 if (!instr
->erase_buf
)
214 mtd_ofs
= part
->offset
+ instr
->addr
;
215 instr
->erase_buf_ofs
= do_div(mtd_ofs
, part
->master
->erasesize
);
217 if (instr
->erase_buf_ofs
> 0) {
218 instr
->addr
-= instr
->erase_buf_ofs
;
219 ret
= part
->master
->read(part
->master
,
220 instr
->addr
+ part
->offset
,
221 part
->master
->erasesize
,
222 &readlen
, instr
->erase_buf
);
224 instr
->partial_start
= true;
226 mtd_ofs
= part
->offset
+ part
->mtd
.size
;
227 instr
->erase_buf_ofs
= part
->master
->erasesize
-
228 do_div(mtd_ofs
, part
->master
->erasesize
);
230 if (instr
->erase_buf_ofs
> 0) {
231 instr
->len
+= instr
->erase_buf_ofs
;
232 ret
= part
->master
->read(part
->master
,
233 part
->offset
+ instr
->addr
+
234 instr
->len
- part
->master
->erasesize
,
235 part
->master
->erasesize
, &readlen
,
242 kfree(instr
->erase_buf
);
248 instr
->addr
+= part
->offset
;
249 ret
= part
->master
->erase(part
->master
, instr
);
251 if (instr
->fail_addr
!= 0xffffffff)
252 instr
->fail_addr
-= part
->offset
;
253 instr
->addr
-= part
->offset
;
254 if (mtd
->flags
& MTD_ERASE_PARTIAL
)
255 kfree(instr
->erase_buf
);
261 void mtd_erase_callback(struct erase_info
*instr
)
263 if (instr
->mtd
->erase
== part_erase
) {
264 struct mtd_part
*part
= PART(instr
->mtd
);
267 if (instr
->mtd
->flags
& MTD_ERASE_PARTIAL
) {
268 if (instr
->partial_start
) {
269 part
->master
->write(part
->master
,
270 instr
->addr
, instr
->erase_buf_ofs
,
271 &wrlen
, instr
->erase_buf
);
272 instr
->addr
+= instr
->erase_buf_ofs
;
274 instr
->len
-= instr
->erase_buf_ofs
;
275 part
->master
->write(part
->master
,
276 instr
->addr
+ instr
->len
,
277 instr
->erase_buf_ofs
, &wrlen
,
279 part
->master
->erasesize
-
280 instr
->erase_buf_ofs
);
282 kfree(instr
->erase_buf
);
284 if (instr
->fail_addr
!= 0xffffffff)
285 instr
->fail_addr
-= part
->offset
;
286 instr
->addr
-= part
->offset
;
289 instr
->callback(instr
);
291 EXPORT_SYMBOL_GPL(mtd_erase_callback
);
293 static int part_lock (struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
295 struct mtd_part
*part
= PART(mtd
);
296 if ((len
+ ofs
) > mtd
->size
)
298 return part
->master
->lock(part
->master
, ofs
+ part
->offset
, len
);
301 static int part_unlock (struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
303 struct mtd_part
*part
= PART(mtd
);
304 if ((len
+ ofs
) > mtd
->size
)
306 return part
->master
->unlock(part
->master
, ofs
+ part
->offset
, len
);
309 static void part_sync(struct mtd_info
*mtd
)
311 struct mtd_part
*part
= PART(mtd
);
312 part
->master
->sync(part
->master
);
315 static int part_suspend(struct mtd_info
*mtd
)
317 struct mtd_part
*part
= PART(mtd
);
318 return part
->master
->suspend(part
->master
);
321 static void part_resume(struct mtd_info
*mtd
)
323 struct mtd_part
*part
= PART(mtd
);
324 part
->master
->resume(part
->master
);
327 static int part_block_isbad (struct mtd_info
*mtd
, loff_t ofs
)
329 struct mtd_part
*part
= PART(mtd
);
330 if (ofs
>= mtd
->size
)
333 return part
->master
->block_isbad(part
->master
, ofs
);
336 static int part_block_markbad (struct mtd_info
*mtd
, loff_t ofs
)
338 struct mtd_part
*part
= PART(mtd
);
341 if (!(mtd
->flags
& MTD_WRITEABLE
))
343 if (ofs
>= mtd
->size
)
346 res
= part
->master
->block_markbad(part
->master
, ofs
);
348 mtd
->ecc_stats
.badblocks
++;
353 * This function unregisters and destroy all slave MTD objects which are
354 * attached to the given master MTD object.
357 int del_mtd_partitions(struct mtd_info
*master
)
359 struct mtd_part
*slave
, *next
;
361 list_for_each_entry_safe(slave
, next
, &mtd_partitions
, list
)
362 if (slave
->master
== master
) {
363 list_del(&slave
->list
);
364 if(slave
->registered
)
365 del_mtd_device(&slave
->mtd
);
373 * This function, given a master MTD object and a partition table, creates
374 * and registers slave MTD objects which are bound to the master according to
375 * the partition definitions.
376 * (Q: should we register the master MTD object as well?)
379 int add_mtd_partitions(struct mtd_info
*master
,
380 const struct mtd_partition
*parts
,
383 struct mtd_part
*slave
;
384 u_int32_t cur_offset
= 0;
387 printk (KERN_NOTICE
"Creating %d MTD partitions on \"%s\":\n", nbparts
, master
->name
);
389 for (i
= 0; i
< nbparts
; i
++) {
391 /* allocate the partition structure */
392 slave
= kzalloc (sizeof(*slave
), GFP_KERNEL
);
394 printk ("memory allocation error while creating partitions for \"%s\"\n",
396 del_mtd_partitions(master
);
399 list_add(&slave
->list
, &mtd_partitions
);
401 /* set up the MTD object for this partition */
402 slave
->mtd
.type
= master
->type
;
403 slave
->mtd
.flags
= master
->flags
& ~parts
[i
].mask_flags
;
404 slave
->mtd
.size
= parts
[i
].size
;
405 slave
->mtd
.writesize
= master
->writesize
;
406 slave
->mtd
.oobsize
= master
->oobsize
;
407 slave
->mtd
.oobavail
= master
->oobavail
;
408 slave
->mtd
.subpage_sft
= master
->subpage_sft
;
410 slave
->mtd
.name
= parts
[i
].name
;
411 slave
->mtd
.owner
= master
->owner
;
413 slave
->mtd
.read
= part_read
;
414 slave
->mtd
.write
= part_write
;
416 if(master
->point
&& master
->unpoint
){
417 slave
->mtd
.point
= part_point
;
418 slave
->mtd
.unpoint
= part_unpoint
;
421 if (master
->read_oob
)
422 slave
->mtd
.read_oob
= part_read_oob
;
423 if (master
->write_oob
)
424 slave
->mtd
.write_oob
= part_write_oob
;
425 if(master
->read_user_prot_reg
)
426 slave
->mtd
.read_user_prot_reg
= part_read_user_prot_reg
;
427 if(master
->read_fact_prot_reg
)
428 slave
->mtd
.read_fact_prot_reg
= part_read_fact_prot_reg
;
429 if(master
->write_user_prot_reg
)
430 slave
->mtd
.write_user_prot_reg
= part_write_user_prot_reg
;
431 if(master
->lock_user_prot_reg
)
432 slave
->mtd
.lock_user_prot_reg
= part_lock_user_prot_reg
;
433 if(master
->get_user_prot_info
)
434 slave
->mtd
.get_user_prot_info
= part_get_user_prot_info
;
435 if(master
->get_fact_prot_info
)
436 slave
->mtd
.get_fact_prot_info
= part_get_fact_prot_info
;
438 slave
->mtd
.sync
= part_sync
;
439 if (!i
&& master
->suspend
&& master
->resume
) {
440 slave
->mtd
.suspend
= part_suspend
;
441 slave
->mtd
.resume
= part_resume
;
444 slave
->mtd
.writev
= part_writev
;
446 slave
->mtd
.lock
= part_lock
;
448 slave
->mtd
.unlock
= part_unlock
;
449 if (master
->block_isbad
)
450 slave
->mtd
.block_isbad
= part_block_isbad
;
451 if (master
->block_markbad
)
452 slave
->mtd
.block_markbad
= part_block_markbad
;
453 slave
->mtd
.erase
= part_erase
;
454 slave
->master
= master
;
455 slave
->offset
= parts
[i
].offset
;
458 if (slave
->offset
== MTDPART_OFS_APPEND
)
459 slave
->offset
= cur_offset
;
460 if (slave
->offset
== MTDPART_OFS_NXTBLK
) {
461 slave
->offset
= cur_offset
;
462 if ((cur_offset
% master
->erasesize
) != 0) {
463 /* Round up to next erasesize */
464 slave
->offset
= ((cur_offset
/ master
->erasesize
) + 1) * master
->erasesize
;
465 printk(KERN_NOTICE
"Moving partition %d: "
466 "0x%08x -> 0x%08x\n", i
,
467 cur_offset
, slave
->offset
);
470 if (slave
->mtd
.size
== MTDPART_SIZ_FULL
)
471 slave
->mtd
.size
= master
->size
- slave
->offset
;
472 cur_offset
= slave
->offset
+ slave
->mtd
.size
;
474 printk (KERN_NOTICE
"0x%08x-0x%08x : \"%s\"\n", slave
->offset
,
475 slave
->offset
+ slave
->mtd
.size
, slave
->mtd
.name
);
477 /* let's do some sanity checks */
478 if (slave
->offset
>= master
->size
) {
479 /* let's register it anyway to preserve ordering */
482 printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
485 if (slave
->offset
+ slave
->mtd
.size
> master
->size
) {
486 slave
->mtd
.size
= master
->size
- slave
->offset
;
487 printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
488 parts
[i
].name
, master
->name
, slave
->mtd
.size
);
490 if (master
->numeraseregions
>1) {
491 /* Deal with variable erase size stuff */
493 struct mtd_erase_region_info
*regions
= master
->eraseregions
;
495 /* Find the first erase regions which is part of this partition. */
496 for (i
=0; i
< master
->numeraseregions
&& slave
->offset
>= regions
[i
].offset
; i
++)
499 for (i
--; i
< master
->numeraseregions
&& slave
->offset
+ slave
->mtd
.size
> regions
[i
].offset
; i
++) {
500 if (slave
->mtd
.erasesize
< regions
[i
].erasesize
) {
501 slave
->mtd
.erasesize
= regions
[i
].erasesize
;
505 /* Single erase size */
506 slave
->mtd
.erasesize
= master
->erasesize
;
509 if ((slave
->mtd
.flags
& MTD_WRITEABLE
) &&
510 ((slave
->offset
% slave
->mtd
.erasesize
) ||
511 ((slave
->offset
+ slave
->mtd
.size
) % slave
->mtd
.erasesize
))) {
512 /* Doesn't start or end on a boundary of major erase size */
513 slave
->mtd
.flags
|= MTD_ERASE_PARTIAL
;
515 if (((u32
) slave
->mtd
.size
) > master
->erasesize
)
516 slave
->mtd
.flags
&= ~MTD_WRITEABLE
;
518 slave
->mtd
.erasesize
= slave
->mtd
.size
;
520 if ((slave
->mtd
.flags
& (MTD_ERASE_PARTIAL
|MTD_WRITEABLE
)) == MTD_ERASE_PARTIAL
)
521 printk(KERN_WARNING
"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
524 slave
->mtd
.ecclayout
= master
->ecclayout
;
525 if (master
->block_isbad
) {
528 while(offs
< slave
->mtd
.size
) {
529 if (master
->block_isbad(master
,
530 offs
+ slave
->offset
))
531 slave
->mtd
.ecc_stats
.badblocks
++;
532 offs
+= slave
->mtd
.erasesize
;
537 { /* store the object pointer (caller may or may not register it */
538 *parts
[i
].mtdp
= &slave
->mtd
;
539 slave
->registered
= 0;
543 /* register our partition */
544 add_mtd_device(&slave
->mtd
);
545 slave
->registered
= 1;
552 EXPORT_SYMBOL(add_mtd_partitions
);
553 EXPORT_SYMBOL(del_mtd_partitions
);
555 static DEFINE_SPINLOCK(part_parser_lock
);
556 static LIST_HEAD(part_parsers
);
558 static struct mtd_part_parser
*get_partition_parser(const char *name
)
560 struct mtd_part_parser
*p
, *ret
= NULL
;
562 spin_lock(&part_parser_lock
);
564 list_for_each_entry(p
, &part_parsers
, list
)
565 if (!strcmp(p
->name
, name
) && try_module_get(p
->owner
)) {
570 spin_unlock(&part_parser_lock
);
575 int register_mtd_parser(struct mtd_part_parser
*p
)
577 spin_lock(&part_parser_lock
);
578 list_add(&p
->list
, &part_parsers
);
579 spin_unlock(&part_parser_lock
);
584 int deregister_mtd_parser(struct mtd_part_parser
*p
)
586 spin_lock(&part_parser_lock
);
588 spin_unlock(&part_parser_lock
);
592 int parse_mtd_partitions(struct mtd_info
*master
, const char **types
,
593 struct mtd_partition
**pparts
, unsigned long origin
)
595 struct mtd_part_parser
*parser
;
598 for ( ; ret
<= 0 && *types
; types
++) {
599 parser
= get_partition_parser(*types
);
601 if (!parser
&& !request_module("%s", *types
))
602 parser
= get_partition_parser(*types
);
605 printk(KERN_NOTICE
"%s partition parsing not available\n",
609 ret
= (*parser
->parse_fn
)(master
, pparts
, origin
);
611 printk(KERN_NOTICE
"%d %s partitions found on MTD device %s\n",
612 ret
, parser
->name
, master
->name
);
614 put_partition_parser(parser
);
619 EXPORT_SYMBOL_GPL(parse_mtd_partitions
);
620 EXPORT_SYMBOL_GPL(register_mtd_parser
);
621 EXPORT_SYMBOL_GPL(deregister_mtd_parser
);