2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $
4 * Character-device access to raw MTD devices.
8 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/compatmac.h>
20 #include <asm/uaccess.h>
22 static struct class *mtd_class
;
24 static void mtd_notify_add(struct mtd_info
* mtd
)
29 class_device_create(mtd_class
, NULL
, MKDEV(MTD_CHAR_MAJOR
, mtd
->index
*2),
30 NULL
, "mtd%d", mtd
->index
);
32 class_device_create(mtd_class
, NULL
,
33 MKDEV(MTD_CHAR_MAJOR
, mtd
->index
*2+1),
34 NULL
, "mtd%dro", mtd
->index
);
37 static void mtd_notify_remove(struct mtd_info
* mtd
)
42 class_device_destroy(mtd_class
, MKDEV(MTD_CHAR_MAJOR
, mtd
->index
*2));
43 class_device_destroy(mtd_class
, MKDEV(MTD_CHAR_MAJOR
, mtd
->index
*2+1));
46 static struct mtd_notifier notifier
= {
47 .add
= mtd_notify_add
,
48 .remove
= mtd_notify_remove
,
52 * Data structure to hold the pointer to the mtd device as well
53 * as mode information ofr various use cases.
55 struct mtd_file_info
{
57 enum mtd_file_modes mode
;
60 static loff_t
mtd_lseek (struct file
*file
, loff_t offset
, int orig
)
62 struct mtd_file_info
*mfi
= file
->private_data
;
63 struct mtd_info
*mtd
= mfi
->mtd
;
69 offset
+= file
->f_pos
;
78 if (offset
>= 0 && offset
<= mtd
->size
)
79 return file
->f_pos
= offset
;
86 static int mtd_open(struct inode
*inode
, struct file
*file
)
88 int minor
= iminor(inode
);
89 int devnum
= minor
>> 1;
91 struct mtd_file_info
*mfi
;
93 DEBUG(MTD_DEBUG_LEVEL0
, "MTD_open\n");
95 if (devnum
>= MAX_MTD_DEVICES
)
98 /* You can't open the RO devices RW */
99 if ((file
->f_mode
& 2) && (minor
& 1))
102 mtd
= get_mtd_device(NULL
, devnum
);
107 if (MTD_ABSENT
== mtd
->type
) {
112 /* You can't open it RW if it's not a writeable device */
113 if ((file
->f_mode
& 2) && !(mtd
->flags
& MTD_WRITEABLE
)) {
118 mfi
= kzalloc(sizeof(*mfi
), GFP_KERNEL
);
124 file
->private_data
= mfi
;
129 /*====================================================================*/
131 static int mtd_close(struct inode
*inode
, struct file
*file
)
133 struct mtd_file_info
*mfi
= file
->private_data
;
134 struct mtd_info
*mtd
= mfi
->mtd
;
136 DEBUG(MTD_DEBUG_LEVEL0
, "MTD_close\n");
142 file
->private_data
= NULL
;
148 /* Back in June 2001, dwmw2 wrote:
150 * FIXME: This _really_ needs to die. In 2.5, we should lock the
151 * userspace buffer down and use it directly with readv/writev.
153 * The implementation below, using mtd_kmalloc_up_to, mitigates
154 * allocation failures when the system is under low-memory situations
155 * or if memory is highly fragmented at the cost of reducing the
156 * performance of the requested transfer due to a smaller buffer size.
158 * A more complex but more memory-efficient implementation based on
159 * get_user_pages and iovecs to cover extents of those pages is a
160 * longer-term goal, as intimated by dwmw2 above. However, for the
161 * write case, this requires yet more complex head and tail transfer
162 * handling when those head and tail offsets and sizes are such that
163 * alignment requirements are not met in the NAND subdriver.
166 static ssize_t
mtd_read(struct file
*file
, char __user
*buf
, size_t count
,loff_t
*ppos
)
168 struct mtd_file_info
*mfi
= file
->private_data
;
169 struct mtd_info
*mtd
= mfi
->mtd
;
171 size_t total_retlen
=0;
177 DEBUG(MTD_DEBUG_LEVEL0
,"MTD_read\n");
179 if (*ppos
+ count
> mtd
->size
)
180 count
= mtd
->size
- *ppos
;
185 kbuf
= mtd_kmalloc_up_to(mtd
, &size
);
190 len
= min_t(size_t, count
, size
);
193 case MTD_MODE_OTP_FACTORY
:
194 ret
= mtd
->read_fact_prot_reg(mtd
, *ppos
, len
, &retlen
, kbuf
);
196 case MTD_MODE_OTP_USER
:
197 ret
= mtd
->read_user_prot_reg(mtd
, *ppos
, len
, &retlen
, kbuf
);
201 struct mtd_oob_ops ops
;
203 ops
.mode
= MTD_OOB_RAW
;
208 ret
= mtd
->read_oob(mtd
, *ppos
, &ops
);
213 ret
= mtd
->read(mtd
, *ppos
, len
, &retlen
, kbuf
);
215 /* Nand returns -EBADMSG on ecc errors, but it returns
216 * the data. For our userspace tools it is important
217 * to dump areas with ecc errors !
218 * For kernel internal usage it also might return -EUCLEAN
219 * to signal the caller that a bitflip has occured and has
220 * been corrected by the ECC algorithm.
221 * Userspace software which accesses NAND this way
222 * must be aware of the fact that it deals with NAND
224 if (!ret
|| (ret
== -EUCLEAN
) || (ret
== -EBADMSG
)) {
226 if (copy_to_user(buf
, kbuf
, retlen
)) {
231 total_retlen
+= retlen
;
249 static ssize_t
mtd_write(struct file
*file
, const char __user
*buf
, size_t count
,loff_t
*ppos
)
251 struct mtd_file_info
*mfi
= file
->private_data
;
252 struct mtd_info
*mtd
= mfi
->mtd
;
256 size_t total_retlen
=0;
260 DEBUG(MTD_DEBUG_LEVEL0
,"MTD_write\n");
262 if (*ppos
== mtd
->size
)
265 if (*ppos
+ count
> mtd
->size
)
266 count
= mtd
->size
- *ppos
;
271 kbuf
= mtd_kmalloc_up_to(mtd
, &size
);
276 len
= min_t(size_t, count
, size
);
278 if (copy_from_user(kbuf
, buf
, len
)) {
284 case MTD_MODE_OTP_FACTORY
:
287 case MTD_MODE_OTP_USER
:
288 if (!mtd
->write_user_prot_reg
) {
292 ret
= mtd
->write_user_prot_reg(mtd
, *ppos
, len
, &retlen
, kbuf
);
297 struct mtd_oob_ops ops
;
299 ops
.mode
= MTD_OOB_RAW
;
304 ret
= mtd
->write_oob(mtd
, *ppos
, &ops
);
310 ret
= (*(mtd
->write
))(mtd
, *ppos
, len
, &retlen
, kbuf
);
314 total_retlen
+= retlen
;
328 /*======================================================================
330 IOCTL calls for getting device parameters.
332 ======================================================================*/
333 static void mtdchar_erase_callback (struct erase_info
*instr
)
335 wake_up((wait_queue_head_t
*)instr
->priv
);
338 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
339 static int otp_select_filemode(struct mtd_file_info
*mfi
, int mode
)
341 struct mtd_info
*mtd
= mfi
->mtd
;
345 case MTD_OTP_FACTORY
:
346 if (!mtd
->read_fact_prot_reg
)
349 mfi
->mode
= MTD_MODE_OTP_FACTORY
;
352 if (!mtd
->read_fact_prot_reg
)
355 mfi
->mode
= MTD_MODE_OTP_USER
;
365 # define otp_select_filemode(f,m) -EOPNOTSUPP
368 static int mtd_ioctl(struct inode
*inode
, struct file
*file
,
369 u_int cmd
, u_long arg
)
371 struct mtd_file_info
*mfi
= file
->private_data
;
372 struct mtd_info
*mtd
= mfi
->mtd
;
373 void __user
*argp
= (void __user
*)arg
;
376 struct mtd_info_user info
;
378 DEBUG(MTD_DEBUG_LEVEL0
, "MTD_ioctl\n");
380 size
= (cmd
& IOCSIZE_MASK
) >> IOCSIZE_SHIFT
;
382 if (!access_ok(VERIFY_READ
, argp
, size
))
386 if (!access_ok(VERIFY_WRITE
, argp
, size
))
391 case MEMGETREGIONCOUNT
:
392 if (copy_to_user(argp
, &(mtd
->numeraseregions
), sizeof(int)))
396 case MEMGETREGIONINFO
:
398 struct region_info_user ur
;
400 if (copy_from_user(&ur
, argp
, sizeof(struct region_info_user
)))
403 if (ur
.regionindex
>= mtd
->numeraseregions
)
405 if (copy_to_user(argp
, &(mtd
->eraseregions
[ur
.regionindex
]),
406 sizeof(struct mtd_erase_region_info
)))
412 info
.type
= mtd
->type
;
413 info
.flags
= mtd
->flags
;
414 info
.size
= mtd
->size
;
415 info
.erasesize
= mtd
->erasesize
;
416 info
.writesize
= mtd
->writesize
;
417 info
.oobsize
= mtd
->oobsize
;
418 /* The below fields are obsolete */
421 if (copy_to_user(argp
, &info
, sizeof(struct mtd_info_user
)))
427 struct erase_info
*erase
;
429 if(!(file
->f_mode
& 2))
432 erase
=kzalloc(sizeof(struct erase_info
),GFP_KERNEL
);
436 wait_queue_head_t waitq
;
437 DECLARE_WAITQUEUE(wait
, current
);
439 init_waitqueue_head(&waitq
);
441 if (copy_from_user(&erase
->addr
, argp
,
442 sizeof(struct erase_info_user
))) {
447 erase
->callback
= mtdchar_erase_callback
;
448 erase
->priv
= (unsigned long)&waitq
;
451 FIXME: Allow INTERRUPTIBLE. Which means
452 not having the wait_queue head on the stack.
454 If the wq_head is on the stack, and we
455 leave because we got interrupted, then the
456 wq_head is no longer there when the
457 callback routine tries to wake us up.
459 ret
= mtd
->erase(mtd
, erase
);
461 set_current_state(TASK_UNINTERRUPTIBLE
);
462 add_wait_queue(&waitq
, &wait
);
463 if (erase
->state
!= MTD_ERASE_DONE
&&
464 erase
->state
!= MTD_ERASE_FAILED
)
466 remove_wait_queue(&waitq
, &wait
);
467 set_current_state(TASK_RUNNING
);
469 ret
= (erase
->state
== MTD_ERASE_FAILED
)?-EIO
:0;
478 struct mtd_oob_buf buf
;
479 struct mtd_oob_ops ops
;
481 if(!(file
->f_mode
& 2))
484 if (copy_from_user(&buf
, argp
, sizeof(struct mtd_oob_buf
)))
487 if (buf
.length
> 4096)
493 ret
= access_ok(VERIFY_READ
, buf
.ptr
,
494 buf
.length
) ? 0 : EFAULT
;
499 ops
.ooblen
= buf
.length
;
500 ops
.ooboffs
= buf
.start
& (mtd
->oobsize
- 1);
502 ops
.mode
= MTD_OOB_PLACE
;
504 if (ops
.ooboffs
&& ops
.ooblen
> (mtd
->oobsize
- ops
.ooboffs
))
507 ops
.oobbuf
= kmalloc(buf
.length
, GFP_KERNEL
);
511 if (copy_from_user(ops
.oobbuf
, buf
.ptr
, buf
.length
)) {
516 buf
.start
&= ~(mtd
->oobsize
- 1);
517 ret
= mtd
->write_oob(mtd
, buf
.start
, &ops
);
519 if (copy_to_user(argp
+ sizeof(uint32_t), &ops
.oobretlen
,
530 struct mtd_oob_buf buf
;
531 struct mtd_oob_ops ops
;
533 if (copy_from_user(&buf
, argp
, sizeof(struct mtd_oob_buf
)))
536 if (buf
.length
> 4096)
542 ret
= access_ok(VERIFY_WRITE
, buf
.ptr
,
543 buf
.length
) ? 0 : -EFAULT
;
547 ops
.ooblen
= buf
.length
;
548 ops
.ooboffs
= buf
.start
& (mtd
->oobsize
- 1);
550 ops
.mode
= MTD_OOB_PLACE
;
552 if (ops
.ooboffs
&& ops
.ooblen
> (mtd
->oobsize
- ops
.ooboffs
))
555 ops
.oobbuf
= kmalloc(buf
.length
, GFP_KERNEL
);
559 buf
.start
&= ~(mtd
->oobsize
- 1);
560 ret
= mtd
->read_oob(mtd
, buf
.start
, &ops
);
562 if (put_user(ops
.oobretlen
, (uint32_t __user
*)argp
))
564 else if (ops
.oobretlen
&& copy_to_user(buf
.ptr
, ops
.oobbuf
,
574 struct erase_info_user info
;
576 if (copy_from_user(&info
, argp
, sizeof(info
)))
582 ret
= mtd
->lock(mtd
, info
.start
, info
.length
);
588 struct erase_info_user info
;
590 if (copy_from_user(&info
, argp
, sizeof(info
)))
596 ret
= mtd
->unlock(mtd
, info
.start
, info
.length
);
600 /* Legacy interface */
603 struct nand_oobinfo oi
;
607 if (mtd
->ecclayout
->eccbytes
> ARRAY_SIZE(oi
.eccpos
))
610 oi
.useecc
= MTD_NANDECC_AUTOPLACE
;
611 memcpy(&oi
.eccpos
, mtd
->ecclayout
->eccpos
, sizeof(oi
.eccpos
));
612 memcpy(&oi
.oobfree
, mtd
->ecclayout
->oobfree
,
614 oi
.eccbytes
= mtd
->ecclayout
->eccbytes
;
616 if (copy_to_user(argp
, &oi
, sizeof(struct nand_oobinfo
)))
625 if (copy_from_user(&offs
, argp
, sizeof(loff_t
)))
627 if (!mtd
->block_isbad
)
630 return mtd
->block_isbad(mtd
, offs
);
638 if (copy_from_user(&offs
, argp
, sizeof(loff_t
)))
640 if (!mtd
->block_markbad
)
643 return mtd
->block_markbad(mtd
, offs
);
647 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
651 if (copy_from_user(&mode
, argp
, sizeof(int)))
654 mfi
->mode
= MTD_MODE_NORMAL
;
656 ret
= otp_select_filemode(mfi
, mode
);
662 case OTPGETREGIONCOUNT
:
663 case OTPGETREGIONINFO
:
665 struct otp_info
*buf
= kmalloc(4096, GFP_KERNEL
);
670 case MTD_MODE_OTP_FACTORY
:
671 if (mtd
->get_fact_prot_info
)
672 ret
= mtd
->get_fact_prot_info(mtd
, buf
, 4096);
674 case MTD_MODE_OTP_USER
:
675 if (mtd
->get_user_prot_info
)
676 ret
= mtd
->get_user_prot_info(mtd
, buf
, 4096);
682 if (cmd
== OTPGETREGIONCOUNT
) {
683 int nbr
= ret
/ sizeof(struct otp_info
);
684 ret
= copy_to_user(argp
, &nbr
, sizeof(int));
686 ret
= copy_to_user(argp
, buf
, ret
);
696 struct otp_info info
;
698 if (mfi
->mode
!= MTD_MODE_OTP_USER
)
700 if (copy_from_user(&info
, argp
, sizeof(info
)))
702 if (!mtd
->lock_user_prot_reg
)
704 ret
= mtd
->lock_user_prot_reg(mtd
, info
.start
, info
.length
);
714 if (copy_to_user(argp
, mtd
->ecclayout
,
715 sizeof(struct nand_ecclayout
)))
722 if (copy_to_user(argp
, &mtd
->ecc_stats
,
723 sizeof(struct mtd_ecc_stats
)))
733 case MTD_MODE_OTP_FACTORY
:
734 case MTD_MODE_OTP_USER
:
735 ret
= otp_select_filemode(mfi
, arg
);
739 if (!mtd
->read_oob
|| !mtd
->write_oob
)
743 case MTD_MODE_NORMAL
:
759 static const struct file_operations mtd_fops
= {
760 .owner
= THIS_MODULE
,
766 .release
= mtd_close
,
769 static int __init
init_mtdchar(void)
771 if (register_chrdev(MTD_CHAR_MAJOR
, "mtd", &mtd_fops
)) {
772 printk(KERN_NOTICE
"Can't allocate major number %d for Memory Technology Devices.\n",
777 mtd_class
= class_create(THIS_MODULE
, "mtd");
779 if (IS_ERR(mtd_class
)) {
780 printk(KERN_ERR
"Error creating mtd class.\n");
781 unregister_chrdev(MTD_CHAR_MAJOR
, "mtd");
782 return PTR_ERR(mtd_class
);
785 register_mtd_user(¬ifier
);
789 static void __exit
cleanup_mtdchar(void)
791 unregister_mtd_user(¬ifier
);
792 class_destroy(mtd_class
);
793 unregister_chrdev(MTD_CHAR_MAJOR
, "mtd");
796 module_init(init_mtdchar
);
797 module_exit(cleanup_mtdchar
);
800 MODULE_LICENSE("GPL");
801 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
802 MODULE_DESCRIPTION("Direct character-device access to MTD devices");