JFFS for WNR3500Lv2
[tomato.git] / release / src-rt / linux / linux-2.6 / drivers / mtd / mtdchar.c
blobc60d25b39c6e02a243a3197b5d50fb1ca54d3906
1 /*
2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $
4 * Character-device access to raw MTD devices.
6 */
8 #include <linux/device.h>
9 #include <linux/fs.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/compatmac.h>
20 #include <asm/uaccess.h>
22 static struct class *mtd_class;
24 static void mtd_notify_add(struct mtd_info* mtd)
26 if (!mtd)
27 return;
29 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
30 NULL, "mtd%d", mtd->index);
32 class_device_create(mtd_class, NULL,
33 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
34 NULL, "mtd%dro", mtd->index);
37 static void mtd_notify_remove(struct mtd_info* mtd)
39 if (!mtd)
40 return;
42 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
43 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
46 static struct mtd_notifier notifier = {
47 .add = mtd_notify_add,
48 .remove = mtd_notify_remove,
52 * Data structure to hold the pointer to the mtd device as well
53 * as mode information ofr various use cases.
55 struct mtd_file_info {
56 struct mtd_info *mtd;
57 enum mtd_file_modes mode;
60 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
62 struct mtd_file_info *mfi = file->private_data;
63 struct mtd_info *mtd = mfi->mtd;
65 switch (orig) {
66 case SEEK_SET:
67 break;
68 case SEEK_CUR:
69 offset += file->f_pos;
70 break;
71 case SEEK_END:
72 offset += mtd->size;
73 break;
74 default:
75 return -EINVAL;
78 if (offset >= 0 && offset <= mtd->size)
79 return file->f_pos = offset;
81 return -EINVAL;
86 static int mtd_open(struct inode *inode, struct file *file)
88 int minor = iminor(inode);
89 int devnum = minor >> 1;
90 struct mtd_info *mtd;
91 struct mtd_file_info *mfi;
93 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
95 if (devnum >= MAX_MTD_DEVICES)
96 return -ENODEV;
98 /* You can't open the RO devices RW */
99 if ((file->f_mode & 2) && (minor & 1))
100 return -EACCES;
102 mtd = get_mtd_device(NULL, devnum);
104 if (IS_ERR(mtd))
105 return PTR_ERR(mtd);
107 if (MTD_ABSENT == mtd->type) {
108 put_mtd_device(mtd);
109 return -ENODEV;
112 /* You can't open it RW if it's not a writeable device */
113 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
114 put_mtd_device(mtd);
115 return -EACCES;
118 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
119 if (!mfi) {
120 put_mtd_device(mtd);
121 return -ENOMEM;
123 mfi->mtd = mtd;
124 file->private_data = mfi;
126 return 0;
127 } /* mtd_open */
129 /*====================================================================*/
131 static int mtd_close(struct inode *inode, struct file *file)
133 struct mtd_file_info *mfi = file->private_data;
134 struct mtd_info *mtd = mfi->mtd;
136 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
138 if (mtd->sync)
139 mtd->sync(mtd);
141 put_mtd_device(mtd);
142 file->private_data = NULL;
143 kfree(mfi);
145 return 0;
146 } /* mtd_close */
148 /* Back in June 2001, dwmw2 wrote:
150 * FIXME: This _really_ needs to die. In 2.5, we should lock the
151 * userspace buffer down and use it directly with readv/writev.
153 * The implementation below, using mtd_kmalloc_up_to, mitigates
154 * allocation failures when the system is under low-memory situations
155 * or if memory is highly fragmented at the cost of reducing the
156 * performance of the requested transfer due to a smaller buffer size.
158 * A more complex but more memory-efficient implementation based on
159 * get_user_pages and iovecs to cover extents of those pages is a
160 * longer-term goal, as intimated by dwmw2 above. However, for the
161 * write case, this requires yet more complex head and tail transfer
162 * handling when those head and tail offsets and sizes are such that
163 * alignment requirements are not met in the NAND subdriver.
166 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
168 struct mtd_file_info *mfi = file->private_data;
169 struct mtd_info *mtd = mfi->mtd;
170 size_t retlen=0;
171 size_t total_retlen=0;
172 int ret=0;
173 int len;
174 size_t size = count;
175 char *kbuf;
177 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
179 if (*ppos + count > mtd->size)
180 count = mtd->size - *ppos;
182 if (!count)
183 return 0;
185 kbuf = mtd_kmalloc_up_to(mtd, &size);
186 if (!kbuf)
187 return -ENOMEM;
189 while (count) {
190 len = min_t(size_t, count, size);
192 switch (mfi->mode) {
193 case MTD_MODE_OTP_FACTORY:
194 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
195 break;
196 case MTD_MODE_OTP_USER:
197 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
198 break;
199 case MTD_MODE_RAW:
201 struct mtd_oob_ops ops;
203 ops.mode = MTD_OOB_RAW;
204 ops.datbuf = kbuf;
205 ops.oobbuf = NULL;
206 ops.len = len;
208 ret = mtd->read_oob(mtd, *ppos, &ops);
209 retlen = ops.retlen;
210 break;
212 default:
213 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
215 /* Nand returns -EBADMSG on ecc errors, but it returns
216 * the data. For our userspace tools it is important
217 * to dump areas with ecc errors !
218 * For kernel internal usage it also might return -EUCLEAN
219 * to signal the caller that a bitflip has occured and has
220 * been corrected by the ECC algorithm.
221 * Userspace software which accesses NAND this way
222 * must be aware of the fact that it deals with NAND
224 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
225 *ppos += retlen;
226 if (copy_to_user(buf, kbuf, retlen)) {
227 kfree(kbuf);
228 return -EFAULT;
230 else
231 total_retlen += retlen;
233 count -= retlen;
234 buf += retlen;
235 if (retlen == 0)
236 count = 0;
238 else {
239 kfree(kbuf);
240 return ret;
245 kfree(kbuf);
246 return total_retlen;
247 } /* mtd_read */
249 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
251 struct mtd_file_info *mfi = file->private_data;
252 struct mtd_info *mtd = mfi->mtd;
253 size_t size = count;
254 char *kbuf;
255 size_t retlen;
256 size_t total_retlen=0;
257 int ret=0;
258 int len;
260 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
262 if (*ppos == mtd->size)
263 return -ENOSPC;
265 if (*ppos + count > mtd->size)
266 count = mtd->size - *ppos;
268 if (!count)
269 return 0;
271 kbuf = mtd_kmalloc_up_to(mtd, &size);
272 if (!kbuf)
273 return -ENOMEM;
275 while (count) {
276 len = min_t(size_t, count, size);
278 if (copy_from_user(kbuf, buf, len)) {
279 kfree(kbuf);
280 return -EFAULT;
283 switch (mfi->mode) {
284 case MTD_MODE_OTP_FACTORY:
285 ret = -EROFS;
286 break;
287 case MTD_MODE_OTP_USER:
288 if (!mtd->write_user_prot_reg) {
289 ret = -EOPNOTSUPP;
290 break;
292 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
293 break;
295 case MTD_MODE_RAW:
297 struct mtd_oob_ops ops;
299 ops.mode = MTD_OOB_RAW;
300 ops.datbuf = kbuf;
301 ops.oobbuf = NULL;
302 ops.len = len;
304 ret = mtd->write_oob(mtd, *ppos, &ops);
305 retlen = ops.retlen;
306 break;
309 default:
310 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
312 if (!ret) {
313 *ppos += retlen;
314 total_retlen += retlen;
315 count -= retlen;
316 buf += retlen;
318 else {
319 kfree(kbuf);
320 return ret;
324 kfree(kbuf);
325 return total_retlen;
326 } /* mtd_write */
328 /*======================================================================
330 IOCTL calls for getting device parameters.
332 ======================================================================*/
333 static void mtdchar_erase_callback (struct erase_info *instr)
335 wake_up((wait_queue_head_t *)instr->priv);
338 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
339 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
341 struct mtd_info *mtd = mfi->mtd;
342 int ret = 0;
344 switch (mode) {
345 case MTD_OTP_FACTORY:
346 if (!mtd->read_fact_prot_reg)
347 ret = -EOPNOTSUPP;
348 else
349 mfi->mode = MTD_MODE_OTP_FACTORY;
350 break;
351 case MTD_OTP_USER:
352 if (!mtd->read_fact_prot_reg)
353 ret = -EOPNOTSUPP;
354 else
355 mfi->mode = MTD_MODE_OTP_USER;
356 break;
357 default:
358 ret = -EINVAL;
359 case MTD_OTP_OFF:
360 break;
362 return ret;
364 #else
365 # define otp_select_filemode(f,m) -EOPNOTSUPP
366 #endif
368 static int mtd_ioctl(struct inode *inode, struct file *file,
369 u_int cmd, u_long arg)
371 struct mtd_file_info *mfi = file->private_data;
372 struct mtd_info *mtd = mfi->mtd;
373 void __user *argp = (void __user *)arg;
374 int ret = 0;
375 u_long size;
376 struct mtd_info_user info;
378 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
380 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
381 if (cmd & IOC_IN) {
382 if (!access_ok(VERIFY_READ, argp, size))
383 return -EFAULT;
385 if (cmd & IOC_OUT) {
386 if (!access_ok(VERIFY_WRITE, argp, size))
387 return -EFAULT;
390 switch (cmd) {
391 case MEMGETREGIONCOUNT:
392 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
393 return -EFAULT;
394 break;
396 case MEMGETREGIONINFO:
398 struct region_info_user ur;
400 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
401 return -EFAULT;
403 if (ur.regionindex >= mtd->numeraseregions)
404 return -EINVAL;
405 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
406 sizeof(struct mtd_erase_region_info)))
407 return -EFAULT;
408 break;
411 case MEMGETINFO:
412 info.type = mtd->type;
413 info.flags = mtd->flags;
414 info.size = mtd->size;
415 info.erasesize = mtd->erasesize;
416 info.writesize = mtd->writesize;
417 info.oobsize = mtd->oobsize;
418 /* The below fields are obsolete */
419 info.ecctype = -1;
420 info.eccsize = 0;
421 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
422 return -EFAULT;
423 break;
425 case MEMERASE:
427 struct erase_info *erase;
429 if(!(file->f_mode & 2))
430 return -EPERM;
432 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
433 if (!erase)
434 ret = -ENOMEM;
435 else {
436 wait_queue_head_t waitq;
437 DECLARE_WAITQUEUE(wait, current);
439 init_waitqueue_head(&waitq);
441 if (copy_from_user(&erase->addr, argp,
442 sizeof(struct erase_info_user))) {
443 kfree(erase);
444 return -EFAULT;
446 erase->mtd = mtd;
447 erase->callback = mtdchar_erase_callback;
448 erase->priv = (unsigned long)&waitq;
451 FIXME: Allow INTERRUPTIBLE. Which means
452 not having the wait_queue head on the stack.
454 If the wq_head is on the stack, and we
455 leave because we got interrupted, then the
456 wq_head is no longer there when the
457 callback routine tries to wake us up.
459 ret = mtd->erase(mtd, erase);
460 if (!ret) {
461 set_current_state(TASK_UNINTERRUPTIBLE);
462 add_wait_queue(&waitq, &wait);
463 if (erase->state != MTD_ERASE_DONE &&
464 erase->state != MTD_ERASE_FAILED)
465 schedule();
466 remove_wait_queue(&waitq, &wait);
467 set_current_state(TASK_RUNNING);
469 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
471 kfree(erase);
473 break;
476 case MEMWRITEOOB:
478 struct mtd_oob_buf buf;
479 struct mtd_oob_ops ops;
481 if(!(file->f_mode & 2))
482 return -EPERM;
484 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
485 return -EFAULT;
487 if (buf.length > 4096)
488 return -EINVAL;
490 if (!mtd->write_oob)
491 ret = -EOPNOTSUPP;
492 else
493 ret = access_ok(VERIFY_READ, buf.ptr,
494 buf.length) ? 0 : EFAULT;
496 if (ret)
497 return ret;
499 ops.ooblen = buf.length;
500 ops.ooboffs = buf.start & (mtd->oobsize - 1);
501 ops.datbuf = NULL;
502 ops.mode = MTD_OOB_PLACE;
504 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
505 return -EINVAL;
507 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
508 if (!ops.oobbuf)
509 return -ENOMEM;
511 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
512 kfree(ops.oobbuf);
513 return -EFAULT;
516 buf.start &= ~(mtd->oobsize - 1);
517 ret = mtd->write_oob(mtd, buf.start, &ops);
519 if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen,
520 sizeof(uint32_t)))
521 ret = -EFAULT;
523 kfree(ops.oobbuf);
524 break;
528 case MEMREADOOB:
530 struct mtd_oob_buf buf;
531 struct mtd_oob_ops ops;
533 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
534 return -EFAULT;
536 if (buf.length > 4096)
537 return -EINVAL;
539 if (!mtd->read_oob)
540 ret = -EOPNOTSUPP;
541 else
542 ret = access_ok(VERIFY_WRITE, buf.ptr,
543 buf.length) ? 0 : -EFAULT;
544 if (ret)
545 return ret;
547 ops.ooblen = buf.length;
548 ops.ooboffs = buf.start & (mtd->oobsize - 1);
549 ops.datbuf = NULL;
550 ops.mode = MTD_OOB_PLACE;
552 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
553 return -EINVAL;
555 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
556 if (!ops.oobbuf)
557 return -ENOMEM;
559 buf.start &= ~(mtd->oobsize - 1);
560 ret = mtd->read_oob(mtd, buf.start, &ops);
562 if (put_user(ops.oobretlen, (uint32_t __user *)argp))
563 ret = -EFAULT;
564 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
565 ops.oobretlen))
566 ret = -EFAULT;
568 kfree(ops.oobbuf);
569 break;
572 case MEMLOCK:
574 struct erase_info_user info;
576 if (copy_from_user(&info, argp, sizeof(info)))
577 return -EFAULT;
579 if (!mtd->lock)
580 ret = -EOPNOTSUPP;
581 else
582 ret = mtd->lock(mtd, info.start, info.length);
583 break;
586 case MEMUNLOCK:
588 struct erase_info_user info;
590 if (copy_from_user(&info, argp, sizeof(info)))
591 return -EFAULT;
593 if (!mtd->unlock)
594 ret = -EOPNOTSUPP;
595 else
596 ret = mtd->unlock(mtd, info.start, info.length);
597 break;
600 /* Legacy interface */
601 case MEMGETOOBSEL:
603 struct nand_oobinfo oi;
605 if (!mtd->ecclayout)
606 return -EOPNOTSUPP;
607 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
608 return -EINVAL;
610 oi.useecc = MTD_NANDECC_AUTOPLACE;
611 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
612 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
613 sizeof(oi.oobfree));
614 oi.eccbytes = mtd->ecclayout->eccbytes;
616 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
617 return -EFAULT;
618 break;
621 case MEMGETBADBLOCK:
623 loff_t offs;
625 if (copy_from_user(&offs, argp, sizeof(loff_t)))
626 return -EFAULT;
627 if (!mtd->block_isbad)
628 ret = -EOPNOTSUPP;
629 else
630 return mtd->block_isbad(mtd, offs);
631 break;
634 case MEMSETBADBLOCK:
636 loff_t offs;
638 if (copy_from_user(&offs, argp, sizeof(loff_t)))
639 return -EFAULT;
640 if (!mtd->block_markbad)
641 ret = -EOPNOTSUPP;
642 else
643 return mtd->block_markbad(mtd, offs);
644 break;
647 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
648 case OTPSELECT:
650 int mode;
651 if (copy_from_user(&mode, argp, sizeof(int)))
652 return -EFAULT;
654 mfi->mode = MTD_MODE_NORMAL;
656 ret = otp_select_filemode(mfi, mode);
658 file->f_pos = 0;
659 break;
662 case OTPGETREGIONCOUNT:
663 case OTPGETREGIONINFO:
665 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
666 if (!buf)
667 return -ENOMEM;
668 ret = -EOPNOTSUPP;
669 switch (mfi->mode) {
670 case MTD_MODE_OTP_FACTORY:
671 if (mtd->get_fact_prot_info)
672 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
673 break;
674 case MTD_MODE_OTP_USER:
675 if (mtd->get_user_prot_info)
676 ret = mtd->get_user_prot_info(mtd, buf, 4096);
677 break;
678 default:
679 break;
681 if (ret >= 0) {
682 if (cmd == OTPGETREGIONCOUNT) {
683 int nbr = ret / sizeof(struct otp_info);
684 ret = copy_to_user(argp, &nbr, sizeof(int));
685 } else
686 ret = copy_to_user(argp, buf, ret);
687 if (ret)
688 ret = -EFAULT;
690 kfree(buf);
691 break;
694 case OTPLOCK:
696 struct otp_info info;
698 if (mfi->mode != MTD_MODE_OTP_USER)
699 return -EINVAL;
700 if (copy_from_user(&info, argp, sizeof(info)))
701 return -EFAULT;
702 if (!mtd->lock_user_prot_reg)
703 return -EOPNOTSUPP;
704 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
705 break;
707 #endif
709 case ECCGETLAYOUT:
711 if (!mtd->ecclayout)
712 return -EOPNOTSUPP;
714 if (copy_to_user(argp, mtd->ecclayout,
715 sizeof(struct nand_ecclayout)))
716 return -EFAULT;
717 break;
720 case ECCGETSTATS:
722 if (copy_to_user(argp, &mtd->ecc_stats,
723 sizeof(struct mtd_ecc_stats)))
724 return -EFAULT;
725 break;
728 case MTDFILEMODE:
730 mfi->mode = 0;
732 switch(arg) {
733 case MTD_MODE_OTP_FACTORY:
734 case MTD_MODE_OTP_USER:
735 ret = otp_select_filemode(mfi, arg);
736 break;
738 case MTD_MODE_RAW:
739 if (!mtd->read_oob || !mtd->write_oob)
740 return -EOPNOTSUPP;
741 mfi->mode = arg;
743 case MTD_MODE_NORMAL:
744 break;
745 default:
746 ret = -EINVAL;
748 file->f_pos = 0;
749 break;
752 default:
753 ret = -ENOTTY;
756 return ret;
757 } /* memory_ioctl */
759 static const struct file_operations mtd_fops = {
760 .owner = THIS_MODULE,
761 .llseek = mtd_lseek,
762 .read = mtd_read,
763 .write = mtd_write,
764 .ioctl = mtd_ioctl,
765 .open = mtd_open,
766 .release = mtd_close,
769 static int __init init_mtdchar(void)
771 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
772 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
773 MTD_CHAR_MAJOR);
774 return -EAGAIN;
777 mtd_class = class_create(THIS_MODULE, "mtd");
779 if (IS_ERR(mtd_class)) {
780 printk(KERN_ERR "Error creating mtd class.\n");
781 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
782 return PTR_ERR(mtd_class);
785 register_mtd_user(&notifier);
786 return 0;
789 static void __exit cleanup_mtdchar(void)
791 unregister_mtd_user(&notifier);
792 class_destroy(mtd_class);
793 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
796 module_init(init_mtdchar);
797 module_exit(cleanup_mtdchar);
800 MODULE_LICENSE("GPL");
801 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
802 MODULE_DESCRIPTION("Direct character-device access to MTD devices");