mtd: mtdchar: add missing initializer on raw write
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / mtd / mtdchar.c
blob726a1b8d29d265c64508bf7b07e1a6aaaaa6a738
1 /*
2 * Character-device access to raw MTD devices.
4 */
6 #include <linux/device.h>
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/backing-dev.h>
17 #include <linux/compat.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/compatmac.h>
22 #include <asm/uaccess.h>
26 * Data structure to hold the pointer to the mtd device as well
27 * as mode information ofr various use cases.
29 struct mtd_file_info {
30 struct mtd_info *mtd;
31 enum mtd_file_modes mode;
34 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
36 struct mtd_file_info *mfi = file->private_data;
37 struct mtd_info *mtd = mfi->mtd;
39 switch (orig) {
40 case SEEK_SET:
41 break;
42 case SEEK_CUR:
43 offset += file->f_pos;
44 break;
45 case SEEK_END:
46 offset += mtd->size;
47 break;
48 default:
49 return -EINVAL;
52 if (offset >= 0 && offset <= mtd->size)
53 return file->f_pos = offset;
55 return -EINVAL;
60 static int mtd_open(struct inode *inode, struct file *file)
62 int minor = iminor(inode);
63 int devnum = minor >> 1;
64 int ret = 0;
65 struct mtd_info *mtd;
66 struct mtd_file_info *mfi;
68 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
70 if (devnum >= MAX_MTD_DEVICES)
71 return -ENODEV;
73 /* You can't open the RO devices RW */
74 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
75 return -EACCES;
77 lock_kernel();
78 mtd = get_mtd_device(NULL, devnum);
80 if (IS_ERR(mtd)) {
81 ret = PTR_ERR(mtd);
82 goto out;
85 if (mtd->type == MTD_ABSENT) {
86 put_mtd_device(mtd);
87 ret = -ENODEV;
88 goto out;
91 if (mtd->backing_dev_info)
92 file->f_mapping->backing_dev_info = mtd->backing_dev_info;
94 /* You can't open it RW if it's not a writeable device */
95 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
96 put_mtd_device(mtd);
97 ret = -EACCES;
98 goto out;
101 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
102 if (!mfi) {
103 put_mtd_device(mtd);
104 ret = -ENOMEM;
105 goto out;
107 mfi->mtd = mtd;
108 file->private_data = mfi;
110 out:
111 unlock_kernel();
112 return ret;
113 } /* mtd_open */
115 /*====================================================================*/
117 static int mtd_close(struct inode *inode, struct file *file)
119 struct mtd_file_info *mfi = file->private_data;
120 struct mtd_info *mtd = mfi->mtd;
122 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
124 /* Only sync if opened RW */
125 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
126 mtd->sync(mtd);
128 put_mtd_device(mtd);
129 file->private_data = NULL;
130 kfree(mfi);
132 return 0;
133 } /* mtd_close */
135 /* FIXME: This _really_ needs to die. In 2.5, we should lock the
136 userspace buffer down and use it directly with readv/writev.
138 #define MAX_KMALLOC_SIZE 0x20000
140 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
142 struct mtd_file_info *mfi = file->private_data;
143 struct mtd_info *mtd = mfi->mtd;
144 size_t retlen=0;
145 size_t total_retlen=0;
146 int ret=0;
147 int len;
148 char *kbuf;
150 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
152 if (*ppos + count > mtd->size)
153 count = mtd->size - *ppos;
155 if (!count)
156 return 0;
158 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
159 and pass them directly to the MTD functions */
161 if (count > MAX_KMALLOC_SIZE)
162 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
163 else
164 kbuf=kmalloc(count, GFP_KERNEL);
166 if (!kbuf)
167 return -ENOMEM;
169 while (count) {
171 if (count > MAX_KMALLOC_SIZE)
172 len = MAX_KMALLOC_SIZE;
173 else
174 len = count;
176 switch (mfi->mode) {
177 case MTD_MODE_OTP_FACTORY:
178 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
179 break;
180 case MTD_MODE_OTP_USER:
181 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
182 break;
183 case MTD_MODE_RAW:
185 struct mtd_oob_ops ops;
187 ops.mode = MTD_OOB_RAW;
188 ops.datbuf = kbuf;
189 ops.oobbuf = NULL;
190 ops.len = len;
192 ret = mtd->read_oob(mtd, *ppos, &ops);
193 retlen = ops.retlen;
194 break;
196 default:
197 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
199 /* Nand returns -EBADMSG on ecc errors, but it returns
200 * the data. For our userspace tools it is important
201 * to dump areas with ecc errors !
202 * For kernel internal usage it also might return -EUCLEAN
203 * to signal the caller that a bitflip has occured and has
204 * been corrected by the ECC algorithm.
205 * Userspace software which accesses NAND this way
206 * must be aware of the fact that it deals with NAND
208 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
209 *ppos += retlen;
210 if (copy_to_user(buf, kbuf, retlen)) {
211 kfree(kbuf);
212 return -EFAULT;
214 else
215 total_retlen += retlen;
217 count -= retlen;
218 buf += retlen;
219 if (retlen == 0)
220 count = 0;
222 else {
223 kfree(kbuf);
224 return ret;
229 kfree(kbuf);
230 return total_retlen;
231 } /* mtd_read */
233 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
235 struct mtd_file_info *mfi = file->private_data;
236 struct mtd_info *mtd = mfi->mtd;
237 char *kbuf;
238 size_t retlen;
239 size_t total_retlen=0;
240 int ret=0;
241 int len;
243 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
245 if (*ppos == mtd->size)
246 return -ENOSPC;
248 if (*ppos + count > mtd->size)
249 count = mtd->size - *ppos;
251 if (!count)
252 return 0;
254 if (count > MAX_KMALLOC_SIZE)
255 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
256 else
257 kbuf=kmalloc(count, GFP_KERNEL);
259 if (!kbuf)
260 return -ENOMEM;
262 while (count) {
264 if (count > MAX_KMALLOC_SIZE)
265 len = MAX_KMALLOC_SIZE;
266 else
267 len = count;
269 if (copy_from_user(kbuf, buf, len)) {
270 kfree(kbuf);
271 return -EFAULT;
274 switch (mfi->mode) {
275 case MTD_MODE_OTP_FACTORY:
276 ret = -EROFS;
277 break;
278 case MTD_MODE_OTP_USER:
279 if (!mtd->write_user_prot_reg) {
280 ret = -EOPNOTSUPP;
281 break;
283 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
284 break;
286 case MTD_MODE_RAW:
288 struct mtd_oob_ops ops;
290 ops.mode = MTD_OOB_RAW;
291 ops.datbuf = kbuf;
292 ops.oobbuf = NULL;
293 ops.ooboffs = 0;
294 ops.len = len;
296 ret = mtd->write_oob(mtd, *ppos, &ops);
297 retlen = ops.retlen;
298 break;
301 default:
302 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
304 if (!ret) {
305 *ppos += retlen;
306 total_retlen += retlen;
307 count -= retlen;
308 buf += retlen;
310 else {
311 kfree(kbuf);
312 return ret;
316 kfree(kbuf);
317 return total_retlen;
318 } /* mtd_write */
320 /*======================================================================
322 IOCTL calls for getting device parameters.
324 ======================================================================*/
325 static void mtdchar_erase_callback (struct erase_info *instr)
327 wake_up((wait_queue_head_t *)instr->priv);
330 #ifdef CONFIG_HAVE_MTD_OTP
331 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
333 struct mtd_info *mtd = mfi->mtd;
334 int ret = 0;
336 switch (mode) {
337 case MTD_OTP_FACTORY:
338 if (!mtd->read_fact_prot_reg)
339 ret = -EOPNOTSUPP;
340 else
341 mfi->mode = MTD_MODE_OTP_FACTORY;
342 break;
343 case MTD_OTP_USER:
344 if (!mtd->read_fact_prot_reg)
345 ret = -EOPNOTSUPP;
346 else
347 mfi->mode = MTD_MODE_OTP_USER;
348 break;
349 default:
350 ret = -EINVAL;
351 case MTD_OTP_OFF:
352 break;
354 return ret;
356 #else
357 # define otp_select_filemode(f,m) -EOPNOTSUPP
358 #endif
360 static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
361 uint64_t start, uint32_t length, void __user *ptr,
362 uint32_t __user *retp)
364 struct mtd_oob_ops ops;
365 uint32_t retlen;
366 int ret = 0;
368 if (!(file->f_mode & FMODE_WRITE))
369 return -EPERM;
371 if (length > 4096)
372 return -EINVAL;
374 if (!mtd->write_oob)
375 ret = -EOPNOTSUPP;
376 else
377 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
379 if (ret)
380 return ret;
382 ops.ooblen = length;
383 ops.ooboffs = start & (mtd->oobsize - 1);
384 ops.datbuf = NULL;
385 ops.mode = MTD_OOB_PLACE;
387 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
388 return -EINVAL;
390 ops.oobbuf = kmalloc(length, GFP_KERNEL);
391 if (!ops.oobbuf)
392 return -ENOMEM;
394 if (copy_from_user(ops.oobbuf, ptr, length)) {
395 kfree(ops.oobbuf);
396 return -EFAULT;
399 start &= ~((uint64_t)mtd->oobsize - 1);
400 ret = mtd->write_oob(mtd, start, &ops);
402 if (ops.oobretlen > 0xFFFFFFFFU)
403 ret = -EOVERFLOW;
404 retlen = ops.oobretlen;
405 if (copy_to_user(retp, &retlen, sizeof(length)))
406 ret = -EFAULT;
408 kfree(ops.oobbuf);
409 return ret;
412 static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
413 uint32_t length, void __user *ptr, uint32_t __user *retp)
415 struct mtd_oob_ops ops;
416 int ret = 0;
418 if (length > 4096)
419 return -EINVAL;
421 if (!mtd->read_oob)
422 ret = -EOPNOTSUPP;
423 else
424 ret = access_ok(VERIFY_WRITE, ptr,
425 length) ? 0 : -EFAULT;
426 if (ret)
427 return ret;
429 ops.ooblen = length;
430 ops.ooboffs = start & (mtd->oobsize - 1);
431 ops.datbuf = NULL;
432 ops.mode = MTD_OOB_PLACE;
434 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
435 return -EINVAL;
437 ops.oobbuf = kmalloc(length, GFP_KERNEL);
438 if (!ops.oobbuf)
439 return -ENOMEM;
441 start &= ~((uint64_t)mtd->oobsize - 1);
442 ret = mtd->read_oob(mtd, start, &ops);
444 if (put_user(ops.oobretlen, retp))
445 ret = -EFAULT;
446 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
447 ops.oobretlen))
448 ret = -EFAULT;
450 kfree(ops.oobbuf);
451 return ret;
454 static int mtd_ioctl(struct inode *inode, struct file *file,
455 u_int cmd, u_long arg)
457 struct mtd_file_info *mfi = file->private_data;
458 struct mtd_info *mtd = mfi->mtd;
459 void __user *argp = (void __user *)arg;
460 int ret = 0;
461 u_long size;
462 struct mtd_info_user info;
464 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
466 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
467 if (cmd & IOC_IN) {
468 if (!access_ok(VERIFY_READ, argp, size))
469 return -EFAULT;
471 if (cmd & IOC_OUT) {
472 if (!access_ok(VERIFY_WRITE, argp, size))
473 return -EFAULT;
476 switch (cmd) {
477 case MEMGETREGIONCOUNT:
478 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
479 return -EFAULT;
480 break;
482 case MEMGETREGIONINFO:
484 uint32_t ur_idx;
485 struct mtd_erase_region_info *kr;
486 struct region_info_user *ur = (struct region_info_user *) argp;
488 if (get_user(ur_idx, &(ur->regionindex)))
489 return -EFAULT;
491 kr = &(mtd->eraseregions[ur_idx]);
493 if (put_user(kr->offset, &(ur->offset))
494 || put_user(kr->erasesize, &(ur->erasesize))
495 || put_user(kr->numblocks, &(ur->numblocks)))
496 return -EFAULT;
498 break;
501 case MEMGETINFO:
502 info.type = mtd->type;
503 info.flags = mtd->flags;
504 info.size = mtd->size;
505 info.erasesize = mtd->erasesize;
506 info.writesize = mtd->writesize;
507 info.oobsize = mtd->oobsize;
508 /* The below fields are obsolete */
509 info.ecctype = -1;
510 info.eccsize = 0;
511 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
512 return -EFAULT;
513 break;
515 case MEMERASE:
516 case MEMERASE64:
518 struct erase_info *erase;
520 if(!(file->f_mode & FMODE_WRITE))
521 return -EPERM;
523 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
524 if (!erase)
525 ret = -ENOMEM;
526 else {
527 wait_queue_head_t waitq;
528 DECLARE_WAITQUEUE(wait, current);
530 init_waitqueue_head(&waitq);
532 if (cmd == MEMERASE64) {
533 struct erase_info_user64 einfo64;
535 if (copy_from_user(&einfo64, argp,
536 sizeof(struct erase_info_user64))) {
537 kfree(erase);
538 return -EFAULT;
540 erase->addr = einfo64.start;
541 erase->len = einfo64.length;
542 } else {
543 struct erase_info_user einfo32;
545 if (copy_from_user(&einfo32, argp,
546 sizeof(struct erase_info_user))) {
547 kfree(erase);
548 return -EFAULT;
550 erase->addr = einfo32.start;
551 erase->len = einfo32.length;
553 erase->mtd = mtd;
554 erase->callback = mtdchar_erase_callback;
555 erase->priv = (unsigned long)&waitq;
558 FIXME: Allow INTERRUPTIBLE. Which means
559 not having the wait_queue head on the stack.
561 If the wq_head is on the stack, and we
562 leave because we got interrupted, then the
563 wq_head is no longer there when the
564 callback routine tries to wake us up.
566 ret = mtd->erase(mtd, erase);
567 if (!ret) {
568 set_current_state(TASK_UNINTERRUPTIBLE);
569 add_wait_queue(&waitq, &wait);
570 if (erase->state != MTD_ERASE_DONE &&
571 erase->state != MTD_ERASE_FAILED)
572 schedule();
573 remove_wait_queue(&waitq, &wait);
574 set_current_state(TASK_RUNNING);
576 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
578 kfree(erase);
580 break;
583 case MEMWRITEOOB:
585 struct mtd_oob_buf buf;
586 struct mtd_oob_buf __user *buf_user = argp;
588 /* NOTE: writes return length to buf_user->length */
589 if (copy_from_user(&buf, argp, sizeof(buf)))
590 ret = -EFAULT;
591 else
592 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
593 buf.ptr, &buf_user->length);
594 break;
597 case MEMREADOOB:
599 struct mtd_oob_buf buf;
600 struct mtd_oob_buf __user *buf_user = argp;
602 /* NOTE: writes return length to buf_user->start */
603 if (copy_from_user(&buf, argp, sizeof(buf)))
604 ret = -EFAULT;
605 else
606 ret = mtd_do_readoob(mtd, buf.start, buf.length,
607 buf.ptr, &buf_user->start);
608 break;
611 case MEMWRITEOOB64:
613 struct mtd_oob_buf64 buf;
614 struct mtd_oob_buf64 __user *buf_user = argp;
616 if (copy_from_user(&buf, argp, sizeof(buf)))
617 ret = -EFAULT;
618 else
619 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
620 (void __user *)(uintptr_t)buf.usr_ptr,
621 &buf_user->length);
622 break;
625 case MEMREADOOB64:
627 struct mtd_oob_buf64 buf;
628 struct mtd_oob_buf64 __user *buf_user = argp;
630 if (copy_from_user(&buf, argp, sizeof(buf)))
631 ret = -EFAULT;
632 else
633 ret = mtd_do_readoob(mtd, buf.start, buf.length,
634 (void __user *)(uintptr_t)buf.usr_ptr,
635 &buf_user->length);
636 break;
639 case MEMLOCK:
641 struct erase_info_user einfo;
643 if (copy_from_user(&einfo, argp, sizeof(einfo)))
644 return -EFAULT;
646 if (!mtd->lock)
647 ret = -EOPNOTSUPP;
648 else
649 ret = mtd->lock(mtd, einfo.start, einfo.length);
650 break;
653 case MEMUNLOCK:
655 struct erase_info_user einfo;
657 if (copy_from_user(&einfo, argp, sizeof(einfo)))
658 return -EFAULT;
660 if (!mtd->unlock)
661 ret = -EOPNOTSUPP;
662 else
663 ret = mtd->unlock(mtd, einfo.start, einfo.length);
664 break;
667 /* Legacy interface */
668 case MEMGETOOBSEL:
670 struct nand_oobinfo oi;
672 if (!mtd->ecclayout)
673 return -EOPNOTSUPP;
674 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
675 return -EINVAL;
677 oi.useecc = MTD_NANDECC_AUTOPLACE;
678 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
679 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
680 sizeof(oi.oobfree));
681 oi.eccbytes = mtd->ecclayout->eccbytes;
683 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
684 return -EFAULT;
685 break;
688 case MEMGETBADBLOCK:
690 loff_t offs;
692 if (copy_from_user(&offs, argp, sizeof(loff_t)))
693 return -EFAULT;
694 if (!mtd->block_isbad)
695 ret = -EOPNOTSUPP;
696 else
697 return mtd->block_isbad(mtd, offs);
698 break;
701 case MEMSETBADBLOCK:
703 loff_t offs;
705 if (copy_from_user(&offs, argp, sizeof(loff_t)))
706 return -EFAULT;
707 if (!mtd->block_markbad)
708 ret = -EOPNOTSUPP;
709 else
710 return mtd->block_markbad(mtd, offs);
711 break;
714 #ifdef CONFIG_HAVE_MTD_OTP
715 case OTPSELECT:
717 int mode;
718 if (copy_from_user(&mode, argp, sizeof(int)))
719 return -EFAULT;
721 mfi->mode = MTD_MODE_NORMAL;
723 ret = otp_select_filemode(mfi, mode);
725 file->f_pos = 0;
726 break;
729 case OTPGETREGIONCOUNT:
730 case OTPGETREGIONINFO:
732 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
733 if (!buf)
734 return -ENOMEM;
735 ret = -EOPNOTSUPP;
736 switch (mfi->mode) {
737 case MTD_MODE_OTP_FACTORY:
738 if (mtd->get_fact_prot_info)
739 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
740 break;
741 case MTD_MODE_OTP_USER:
742 if (mtd->get_user_prot_info)
743 ret = mtd->get_user_prot_info(mtd, buf, 4096);
744 break;
745 default:
746 break;
748 if (ret >= 0) {
749 if (cmd == OTPGETREGIONCOUNT) {
750 int nbr = ret / sizeof(struct otp_info);
751 ret = copy_to_user(argp, &nbr, sizeof(int));
752 } else
753 ret = copy_to_user(argp, buf, ret);
754 if (ret)
755 ret = -EFAULT;
757 kfree(buf);
758 break;
761 case OTPLOCK:
763 struct otp_info oinfo;
765 if (mfi->mode != MTD_MODE_OTP_USER)
766 return -EINVAL;
767 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
768 return -EFAULT;
769 if (!mtd->lock_user_prot_reg)
770 return -EOPNOTSUPP;
771 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
772 break;
774 #endif
776 case ECCGETLAYOUT:
778 if (!mtd->ecclayout)
779 return -EOPNOTSUPP;
781 if (copy_to_user(argp, mtd->ecclayout,
782 sizeof(struct nand_ecclayout)))
783 return -EFAULT;
784 break;
787 case ECCGETSTATS:
789 if (copy_to_user(argp, &mtd->ecc_stats,
790 sizeof(struct mtd_ecc_stats)))
791 return -EFAULT;
792 break;
795 case MTDFILEMODE:
797 mfi->mode = 0;
799 switch(arg) {
800 case MTD_MODE_OTP_FACTORY:
801 case MTD_MODE_OTP_USER:
802 ret = otp_select_filemode(mfi, arg);
803 break;
805 case MTD_MODE_RAW:
806 if (!mtd->read_oob || !mtd->write_oob)
807 return -EOPNOTSUPP;
808 mfi->mode = arg;
810 case MTD_MODE_NORMAL:
811 break;
812 default:
813 ret = -EINVAL;
815 file->f_pos = 0;
816 break;
819 default:
820 ret = -ENOTTY;
823 return ret;
824 } /* memory_ioctl */
826 #ifdef CONFIG_COMPAT
828 struct mtd_oob_buf32 {
829 u_int32_t start;
830 u_int32_t length;
831 compat_caddr_t ptr; /* unsigned char* */
834 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
835 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
837 static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
838 unsigned long arg)
840 struct inode *inode = file->f_path.dentry->d_inode;
841 struct mtd_file_info *mfi = file->private_data;
842 struct mtd_info *mtd = mfi->mtd;
843 void __user *argp = compat_ptr(arg);
844 int ret = 0;
846 lock_kernel();
848 switch (cmd) {
849 case MEMWRITEOOB32:
851 struct mtd_oob_buf32 buf;
852 struct mtd_oob_buf32 __user *buf_user = argp;
854 if (copy_from_user(&buf, argp, sizeof(buf)))
855 ret = -EFAULT;
856 else
857 ret = mtd_do_writeoob(file, mtd, buf.start,
858 buf.length, compat_ptr(buf.ptr),
859 &buf_user->length);
860 break;
863 case MEMREADOOB32:
865 struct mtd_oob_buf32 buf;
866 struct mtd_oob_buf32 __user *buf_user = argp;
868 /* NOTE: writes return length to buf->start */
869 if (copy_from_user(&buf, argp, sizeof(buf)))
870 ret = -EFAULT;
871 else
872 ret = mtd_do_readoob(mtd, buf.start,
873 buf.length, compat_ptr(buf.ptr),
874 &buf_user->start);
875 break;
877 default:
878 ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
881 unlock_kernel();
883 return ret;
886 #endif /* CONFIG_COMPAT */
889 * try to determine where a shared mapping can be made
890 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
891 * mappings)
893 #ifndef CONFIG_MMU
894 static unsigned long mtd_get_unmapped_area(struct file *file,
895 unsigned long addr,
896 unsigned long len,
897 unsigned long pgoff,
898 unsigned long flags)
900 struct mtd_file_info *mfi = file->private_data;
901 struct mtd_info *mtd = mfi->mtd;
903 if (mtd->get_unmapped_area) {
904 unsigned long offset;
906 if (addr != 0)
907 return (unsigned long) -EINVAL;
909 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
910 return (unsigned long) -EINVAL;
912 offset = pgoff << PAGE_SHIFT;
913 if (offset > mtd->size - len)
914 return (unsigned long) -EINVAL;
916 return mtd->get_unmapped_area(mtd, len, offset, flags);
919 /* can't map directly */
920 return (unsigned long) -ENOSYS;
922 #endif
925 * set up a mapping for shared memory segments
927 static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
929 #ifdef CONFIG_MMU
930 struct mtd_file_info *mfi = file->private_data;
931 struct mtd_info *mtd = mfi->mtd;
933 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
934 return 0;
935 return -ENOSYS;
936 #else
937 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
938 #endif
941 static const struct file_operations mtd_fops = {
942 .owner = THIS_MODULE,
943 .llseek = mtd_lseek,
944 .read = mtd_read,
945 .write = mtd_write,
946 .ioctl = mtd_ioctl,
947 #ifdef CONFIG_COMPAT
948 .compat_ioctl = mtd_compat_ioctl,
949 #endif
950 .open = mtd_open,
951 .release = mtd_close,
952 .mmap = mtd_mmap,
953 #ifndef CONFIG_MMU
954 .get_unmapped_area = mtd_get_unmapped_area,
955 #endif
958 static int __init init_mtdchar(void)
960 int status;
962 status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
963 if (status < 0) {
964 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
965 MTD_CHAR_MAJOR);
968 return status;
971 static void __exit cleanup_mtdchar(void)
973 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
976 module_init(init_mtdchar);
977 module_exit(cleanup_mtdchar);
979 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
981 MODULE_LICENSE("GPL");
982 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
983 MODULE_DESCRIPTION("Direct character-device access to MTD devices");
984 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);