- Kai Germaschewski: ISDN update (including Makefiles)
[davej-history.git] / drivers / mtd / mtdblock.c
blobd1344468bdb1fd41844c93ef6d355a77a6c4718d
1 /*
2 * Direct MTD block device access
4 * $Id: mtdblock.c,v 1.38 2000/11/27 08:50:22 dwmw2 Exp $
6 * 02-nov-2000 Nicolas Pitre Added read-modify-write with cache
7 */
9 #include <linux/config.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/malloc.h>
14 #include <linux/mtd/mtd.h>
16 #define MAJOR_NR MTD_BLOCK_MAJOR
17 #define DEVICE_NAME "mtdblock"
18 #define DEVICE_REQUEST mtdblock_request
19 #define DEVICE_NR(device) (device)
20 #define DEVICE_ON(device)
21 #define DEVICE_OFF(device)
22 #define DEVICE_NO_RANDOM
23 #include <linux/blk.h>
24 /* for old kernels... */
25 #ifndef QUEUE_EMPTY
26 #define QUEUE_EMPTY (!CURRENT)
27 #endif
28 #if LINUX_VERSION_CODE < 0x20300
29 #define QUEUE_PLUGGED (blk_dev[MAJOR_NR].plug_tq.sync)
30 #else
31 #define QUEUE_PLUGGED (blk_dev[MAJOR_NR].request_queue.plugged)
32 #endif
34 #ifdef CONFIG_DEVFS_FS
35 #include <linux/devfs_fs_kernel.h>
36 static void mtd_notify_add(struct mtd_info* mtd);
37 static void mtd_notify_remove(struct mtd_info* mtd);
38 static struct mtd_notifier notifier = {
39 mtd_notify_add,
40 mtd_notify_remove,
41 NULL
43 static devfs_handle_t devfs_dir_handle = NULL;
44 static devfs_handle_t devfs_rw_handle[MAX_MTD_DEVICES];
45 #endif
47 static struct mtdblk_dev {
48 struct mtd_info *mtd; /* Locked */
49 int count;
50 struct semaphore cache_sem;
51 unsigned char *cache_data;
52 unsigned long cache_offset;
53 unsigned int cache_size;
54 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
55 } *mtdblks[MAX_MTD_DEVICES];
57 static spinlock_t mtdblks_lock;
59 static int mtd_sizes[MAX_MTD_DEVICES];
60 static int mtd_blksizes[MAX_MTD_DEVICES];
64 * Cache stuff...
66 * Since typical flash erasable sectors are much larger than what Linux's
67 * buffer cache can handle, we must implement read-modify-write on flash
68 * sectors for each block write requests. To avoid over-erasing flash sectors
69 * and to speed things up, we locally cache a whole flash sector while it is
70 * being written to until a different sector is required.
73 static void erase_callback(struct erase_info *done)
75 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
76 wake_up(wait_q);
79 static int erase_write (struct mtd_info *mtd, unsigned long pos,
80 int len, const char *buf)
82 struct erase_info erase;
83 DECLARE_WAITQUEUE(wait, current);
84 wait_queue_head_t wait_q;
85 size_t retlen;
86 int ret;
89 * First, let's erase the flash block.
92 init_waitqueue_head(&wait_q);
93 erase.mtd = mtd;
94 erase.callback = erase_callback;
95 erase.addr = pos;
96 erase.len = len;
97 erase.priv = (u_long)&wait_q;
99 set_current_state(TASK_INTERRUPTIBLE);
100 add_wait_queue(&wait_q, &wait);
102 ret = MTD_ERASE(mtd, &erase);
103 if (ret) {
104 set_current_state(TASK_RUNNING);
105 remove_wait_queue(&wait_q, &wait);
106 printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
107 "on \"%s\" failed\n",
108 pos, len, mtd->name);
109 return ret;
112 schedule(); /* Wait for erase to finish. */
113 remove_wait_queue(&wait_q, &wait);
116 * Next, writhe data to flash.
119 ret = MTD_WRITE (mtd, pos, len, &retlen, buf);
120 if (ret)
121 return ret;
122 if (retlen != len)
123 return -EIO;
124 return 0;
128 static int write_cached_data (struct mtdblk_dev *mtdblk)
130 struct mtd_info *mtd = mtdblk->mtd;
131 int ret;
133 if (mtdblk->cache_state != STATE_DIRTY)
134 return 0;
136 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
137 "at 0x%lx, size 0x%x\n", mtd->name,
138 mtdblk->cache_offset, mtdblk->cache_size);
140 ret = erase_write (mtd, mtdblk->cache_offset,
141 mtdblk->cache_size, mtdblk->cache_data);
142 if (ret)
143 return ret;
146 * Here we could argably set the cache state to STATE_CLEAN.
147 * However this could lead to inconsistency since we will not
148 * be notified if this content is altered on the flash by other
149 * means. Let's declare it empty and leave buffering tasks to
150 * the buffer cache instead.
152 mtdblk->cache_state = STATE_EMPTY;
153 return 0;
157 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
158 int len, const char *buf)
160 struct mtd_info *mtd = mtdblk->mtd;
161 unsigned int sect_size = mtd->erasesize;
162 size_t retlen;
163 int ret;
165 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
166 mtd->name, pos, len);
168 while (len > 0) {
169 unsigned long sect_start = (pos/sect_size)*sect_size;
170 unsigned int offset = pos - sect_start;
171 unsigned int size = sect_size - offset;
172 if( size > len )
173 size = len;
175 if (size == sect_size) {
177 * We are covering a whole sector. Thus there is no
178 * need to bother with the cache while it may still be
179 * useful for other partial writes.
181 ret = erase_write (mtd, pos, size, buf);
182 if (ret)
183 return ret;
184 } else {
185 /* Partial sector: need to use the cache */
187 if (mtdblk->cache_state == STATE_DIRTY &&
188 mtdblk->cache_offset != sect_start) {
189 ret = write_cached_data(mtdblk);
190 if (ret)
191 return ret;
194 if (mtdblk->cache_state == STATE_EMPTY ||
195 mtdblk->cache_offset != sect_start) {
196 /* fill the cache with the current sector */
197 mtdblk->cache_state = STATE_EMPTY;
198 ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data);
199 if (ret)
200 return ret;
201 if (retlen != sect_size)
202 return -EIO;
204 mtdblk->cache_offset = sect_start;
205 mtdblk->cache_size = sect_size;
206 mtdblk->cache_state = STATE_CLEAN;
209 /* write data to our local cache */
210 memcpy (mtdblk->cache_data + offset, buf, size);
211 mtdblk->cache_state = STATE_DIRTY;
214 buf += size;
215 pos += size;
216 len -= size;
219 return 0;
223 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
224 int len, char *buf)
226 struct mtd_info *mtd = mtdblk->mtd;
227 unsigned int sect_size = mtd->erasesize;
228 size_t retlen;
229 int ret;
231 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
232 mtd->name, pos, len);
234 while (len > 0) {
235 unsigned long sect_start = (pos/sect_size)*sect_size;
236 unsigned int offset = pos - sect_start;
237 unsigned int size = sect_size - offset;
238 if (size > len)
239 size = len;
242 * Check if the requested data is already cached
243 * Read the requested amount of data from our internal cache if it
244 * contains what we want, otherwise we read the data directly
245 * from flash.
247 if (mtdblk->cache_state != STATE_EMPTY &&
248 mtdblk->cache_offset == sect_start) {
249 memcpy (buf, mtdblk->cache_data + offset, size);
250 } else {
251 ret = MTD_READ (mtd, pos, size, &retlen, buf);
252 if (ret)
253 return ret;
254 if (retlen != size)
255 return -EIO;
258 buf += size;
259 pos += size;
260 len -= size;
263 return 0;
268 static int mtdblock_open(struct inode *inode, struct file *file)
270 struct mtdblk_dev *mtdblk;
271 int dev;
273 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
275 if (!inode)
276 return -EINVAL;
278 dev = MINOR(inode->i_rdev);
279 if (dev >= MAX_MTD_DEVICES)
280 return -EINVAL;
282 MOD_INC_USE_COUNT;
284 spin_lock(&mtdblks_lock);
286 /* If it's already open, no need to piss about. */
287 if (mtdblks[dev]) {
288 mtdblks[dev]->count++;
289 spin_unlock(&mtdblks_lock);
290 return 0;
293 /* OK, it's not open. Try to find it */
295 /* First we have to drop the lock, because we have to
296 to things which might sleep.
298 spin_unlock(&mtdblks_lock);
300 mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
301 if (!mtdblk) {
302 MOD_DEC_USE_COUNT;
303 return -ENOMEM;
305 memset(mtdblk, 0, sizeof(*mtdblk));
306 mtdblk->count = 1;
307 mtdblk->mtd = get_mtd_device(NULL, dev);
309 if (!mtdblk->mtd) {
310 kfree(mtdblk);
311 MOD_DEC_USE_COUNT;
312 return -ENODEV;
315 init_MUTEX (&mtdblk->cache_sem);
316 mtdblk->cache_state = STATE_EMPTY;
317 mtdblk->cache_size = mtdblk->mtd->erasesize;
318 mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
319 if (!mtdblk->cache_data) {
320 put_mtd_device(mtdblk->mtd);
321 kfree(mtdblk);
322 MOD_DEC_USE_COUNT;
323 return -ENOMEM;
326 /* OK, we've created a new one. Add it to the list. */
328 spin_lock(&mtdblks_lock);
330 if (mtdblks[dev]) {
331 /* Another CPU made one at the same time as us. */
332 mtdblks[dev]->count++;
333 spin_unlock(&mtdblks_lock);
334 put_mtd_device(mtdblk->mtd);
335 vfree(mtdblk->cache_data);
336 kfree(mtdblk);
337 return 0;
340 mtdblks[dev] = mtdblk;
341 mtd_sizes[dev] = mtdblk->mtd->size/1024;
342 mtd_blksizes[dev] = mtdblk->mtd->erasesize;
343 if (mtd_blksizes[dev] > PAGE_SIZE)
344 mtd_blksizes[dev] = PAGE_SIZE;
345 set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
347 spin_unlock(&mtdblks_lock);
349 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
351 return 0;
354 static release_t mtdblock_release(struct inode *inode, struct file *file)
356 int dev;
357 struct mtdblk_dev *mtdblk;
358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
359 struct super_block * sb = get_super(inode->i_rdev);
360 #endif
361 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
363 if (inode == NULL)
364 release_return(-ENODEV);
366 fsync_dev(inode->i_rdev);
367 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
368 if (sb) invalidate_inodes(sb);
369 #endif
370 invalidate_buffers(inode->i_rdev);
372 dev = MINOR(inode->i_rdev);
373 mtdblk = mtdblks[dev];
375 down(&mtdblk->cache_sem);
376 write_cached_data(mtdblk);
377 up(&mtdblk->cache_sem);
379 spin_lock(&mtdblks_lock);
380 if (!--mtdblk->count) {
381 /* It was the last usage. Free the device */
382 mtdblks[dev] = NULL;
383 spin_unlock(&mtdblks_lock);
384 if (mtdblk->mtd->sync)
385 mtdblk->mtd->sync(mtdblk->mtd);
386 put_mtd_device(mtdblk->mtd);
387 vfree(mtdblk->cache_data);
388 kfree(mtdblk);
389 } else {
390 spin_unlock(&mtdblks_lock);
393 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
395 MOD_DEC_USE_COUNT;
396 release_return(0);
401 * This is a special request_fn because it is executed in a process context
402 * to be able to sleep independently of the caller. The io_request_lock
403 * is held upon entry and exit.
404 * The head of our request queue is considered active so there is no need
405 * to dequeue requests before we are done.
407 static void handle_mtdblock_request(void)
409 struct request *req;
410 struct mtdblk_dev *mtdblk;
411 unsigned int res;
413 for (;;) {
414 INIT_REQUEST;
415 req = CURRENT;
416 spin_unlock_irq(&io_request_lock);
417 mtdblk = mtdblks[MINOR(req->rq_dev)];
418 res = 0;
420 if (MINOR(req->rq_dev) >= MAX_MTD_DEVICES)
421 panic(__FUNCTION__": minor out of bound");
423 if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
424 goto end_req;
426 // Handle the request
427 switch (req->cmd)
429 int err;
431 case READ:
432 down(&mtdblk->cache_sem);
433 err = do_cached_read (mtdblk, req->sector << 9,
434 req->current_nr_sectors << 9,
435 req->buffer);
436 up(&mtdblk->cache_sem);
437 if (!err)
438 res = 1;
439 break;
441 case WRITE:
442 // Read only device
443 if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) )
444 break;
446 // Do the write
447 down(&mtdblk->cache_sem);
448 err = do_cached_write (mtdblk, req->sector << 9,
449 req->current_nr_sectors << 9,
450 req->buffer);
451 up(&mtdblk->cache_sem);
452 if (!err)
453 res = 1;
454 break;
457 end_req:
458 spin_lock_irq(&io_request_lock);
459 end_request(res);
463 static volatile int leaving = 0;
464 #if LINUX_VERSION_CODE > 0x020300
465 static DECLARE_MUTEX_LOCKED(thread_sem);
466 static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
467 #else
468 static struct semaphore thread_sem = MUTEX_LOCKED;
469 DECLARE_WAIT_QUEUE_HEAD(thr_wq);
470 #endif
472 int mtdblock_thread(void *dummy)
474 struct task_struct *tsk = current;
475 DECLARE_WAITQUEUE(wait, tsk);
477 tsk->session = 1;
478 tsk->pgrp = 1;
479 /* we might get involved when memory gets low, so use PF_MEMALLOC */
480 tsk->flags |= PF_MEMALLOC;
481 strcpy(tsk->comm, "mtdblockd");
482 tsk->tty = NULL;
483 spin_lock_irq(&tsk->sigmask_lock);
484 sigfillset(&tsk->blocked);
485 recalc_sigpending(tsk);
486 spin_unlock_irq(&tsk->sigmask_lock);
487 exit_mm(tsk);
488 exit_files(tsk);
489 exit_sighand(tsk);
490 exit_fs(tsk);
492 while (!leaving) {
493 add_wait_queue(&thr_wq, &wait);
494 set_current_state(TASK_INTERRUPTIBLE);
495 spin_lock_irq(&io_request_lock);
496 if (QUEUE_EMPTY || QUEUE_PLUGGED) {
497 spin_unlock_irq(&io_request_lock);
498 schedule();
499 remove_wait_queue(&thr_wq, &wait);
500 } else {
501 remove_wait_queue(&thr_wq, &wait);
502 set_current_state(TASK_RUNNING);
503 handle_mtdblock_request();
504 spin_unlock_irq(&io_request_lock);
508 up(&thread_sem);
509 return 0;
512 #if LINUX_VERSION_CODE < 0x20300
513 #define RQFUNC_ARG void
514 #else
515 #define RQFUNC_ARG request_queue_t *q
516 #endif
518 static void mtdblock_request(RQFUNC_ARG)
520 /* Don't do anything, except wake the thread if necessary */
521 wake_up(&thr_wq);
525 static int mtdblock_ioctl(struct inode * inode, struct file * file,
526 unsigned int cmd, unsigned long arg)
528 struct mtdblk_dev *mtdblk;
530 mtdblk = mtdblks[MINOR(inode->i_rdev)];
532 #ifdef PARANOIA
533 if (!mtdblk)
534 BUG();
535 #endif
537 switch (cmd) {
538 case BLKGETSIZE: /* Return device size */
539 if (!arg)
540 return -EFAULT;
541 return put_user((mtdblk->mtd->size >> 9),
542 (long *) arg)?-EFAULT:0;
544 case BLKFLSBUF:
545 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
546 if(!capable(CAP_SYS_ADMIN))
547 return -EACCES;
548 #endif
549 fsync_dev(inode->i_rdev);
550 invalidate_buffers(inode->i_rdev);
551 down(&mtdblk->cache_sem);
552 write_cached_data(mtdblk);
553 up(&mtdblk->cache_sem);
554 if (mtdblk->mtd->sync)
555 mtdblk->mtd->sync(mtdblk->mtd);
556 return 0;
558 default:
559 return -EINVAL;
563 #if LINUX_VERSION_CODE < 0x20326
564 static struct file_operations mtd_fops =
566 open: mtdblock_open,
567 ioctl: mtdblock_ioctl,
568 release: mtdblock_release,
569 read: block_read,
570 write: block_write
572 #else
573 static struct block_device_operations mtd_fops =
575 open: mtdblock_open,
576 release: mtdblock_release,
577 ioctl: mtdblock_ioctl
579 #endif
581 #ifdef CONFIG_DEVFS_FS
582 /* Notification that a new device has been added. Create the devfs entry for
583 * it. */
585 static void mtd_notify_add(struct mtd_info* mtd)
587 char name[8];
589 if (!mtd)
590 return;
592 sprintf(name, "%d", mtd->index);
593 devfs_rw_handle[mtd->index] = devfs_register(devfs_dir_handle, name,
594 DEVFS_FL_DEFAULT, MTD_BLOCK_MAJOR, mtd->index,
595 S_IFBLK | S_IRUGO | S_IWUGO,
596 &mtd_fops, NULL);
599 static void mtd_notify_remove(struct mtd_info* mtd)
601 if (!mtd)
602 return;
604 devfs_unregister(devfs_rw_handle[mtd->index]);
606 #endif
608 #if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
609 #define init_mtdblock init_module
610 #define cleanup_mtdblock cleanup_module
611 #endif
613 int __init init_mtdblock(void)
615 int i;
617 spin_lock_init(&mtdblks_lock);
618 #ifdef CONFIG_DEVFS_FS
619 if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))
621 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
622 MTD_BLOCK_MAJOR);
623 return -EAGAIN;
626 devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
627 register_mtd_user(&notifier);
628 #else
629 if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
630 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
631 MTD_BLOCK_MAJOR);
632 return -EAGAIN;
634 #endif
636 /* We fill it in at open() time. */
637 for (i=0; i< MAX_MTD_DEVICES; i++) {
638 mtd_sizes[i] = 0;
639 mtd_blksizes[i] = BLOCK_SIZE;
641 init_waitqueue_head(&thr_wq);
642 /* Allow the block size to default to BLOCK_SIZE. */
643 blksize_size[MAJOR_NR] = mtd_blksizes;
644 blk_size[MAJOR_NR] = mtd_sizes;
646 #if LINUX_VERSION_CODE < 0x20320
647 blk_dev[MAJOR_NR].request_fn = mtdblock_request;
648 #else
649 blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
650 #endif
651 kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
652 return 0;
655 static void __exit cleanup_mtdblock(void)
657 leaving = 1;
658 wake_up(&thr_wq);
659 down(&thread_sem);
660 #ifdef CONFIG_DEVFS_FS
661 unregister_mtd_user(&notifier);
662 devfs_unregister(devfs_dir_handle);
663 devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME);
664 #else
665 unregister_blkdev(MAJOR_NR,DEVICE_NAME);
666 #endif
667 #if LINUX_VERSION_CODE < 0x20320
668 blk_dev[MAJOR_NR].request_fn = NULL;
669 #else
670 blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
671 #endif
672 blksize_size[MAJOR_NR] = NULL;
673 blk_size[MAJOR_NR] = NULL;
676 module_init(init_mtdblock);
677 module_exit(cleanup_mtdblock);