Hopefully get the Kconfig PCI stuff right, finally.
[linux-2.6/linux-mips.git] / drivers / mtd / mtdblock.c
blob815dbfe4ad830fb04e43bd35712dff7985eaafac
1 /*
2 * Direct MTD block device access
4 * $Id: mtdblock.c,v 1.47 2001/10/02 15:05:11 dwmw2 Exp $
6 * 02-nov-2000 Nicolas Pitre Added read-modify-write with cache
7 */
9 #include <linux/config.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/mtd/compatmac.h>
17 #include <linux/buffer_head.h>
19 #define MAJOR_NR MTD_BLOCK_MAJOR
20 #define DEVICE_NAME "mtdblock"
21 #define DEVICE_NR(device) (device)
22 #include <linux/blk.h>
23 #include <linux/devfs_fs_kernel.h>
25 static void mtd_notify_add(struct mtd_info* mtd);
26 static void mtd_notify_remove(struct mtd_info* mtd);
27 static struct mtd_notifier notifier = {
28 mtd_notify_add,
29 mtd_notify_remove,
30 NULL
33 static struct mtdblk_dev {
34 struct mtd_info *mtd; /* Locked */
35 int count;
36 struct semaphore cache_sem;
37 unsigned char *cache_data;
38 unsigned long cache_offset;
39 unsigned int cache_size;
40 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
41 } *mtdblks[MAX_MTD_DEVICES];
43 static struct gendisk *mtddisk[MAX_MTD_DEVICES];
45 static spinlock_t mtdblks_lock;
48 * Cache stuff...
50 * Since typical flash erasable sectors are much larger than what Linux's
51 * buffer cache can handle, we must implement read-modify-write on flash
52 * sectors for each block write requests. To avoid over-erasing flash sectors
53 * and to speed things up, we locally cache a whole flash sector while it is
54 * being written to until a different sector is required.
57 static void erase_callback(struct erase_info *done)
59 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
60 wake_up(wait_q);
63 static int erase_write (struct mtd_info *mtd, unsigned long pos,
64 int len, const char *buf)
66 struct erase_info erase;
67 DECLARE_WAITQUEUE(wait, current);
68 wait_queue_head_t wait_q;
69 size_t retlen;
70 int ret;
73 * First, let's erase the flash block.
76 init_waitqueue_head(&wait_q);
77 erase.mtd = mtd;
78 erase.callback = erase_callback;
79 erase.addr = pos;
80 erase.len = len;
81 erase.priv = (u_long)&wait_q;
83 set_current_state(TASK_INTERRUPTIBLE);
84 add_wait_queue(&wait_q, &wait);
86 ret = MTD_ERASE(mtd, &erase);
87 if (ret) {
88 set_current_state(TASK_RUNNING);
89 remove_wait_queue(&wait_q, &wait);
90 printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
91 "on \"%s\" failed\n",
92 pos, len, mtd->name);
93 return ret;
96 schedule(); /* Wait for erase to finish. */
97 remove_wait_queue(&wait_q, &wait);
100 * Next, writhe data to flash.
103 ret = MTD_WRITE (mtd, pos, len, &retlen, buf);
104 if (ret)
105 return ret;
106 if (retlen != len)
107 return -EIO;
108 return 0;
112 static int write_cached_data (struct mtdblk_dev *mtdblk)
114 struct mtd_info *mtd = mtdblk->mtd;
115 int ret;
117 if (mtdblk->cache_state != STATE_DIRTY)
118 return 0;
120 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
121 "at 0x%lx, size 0x%x\n", mtd->name,
122 mtdblk->cache_offset, mtdblk->cache_size);
124 ret = erase_write (mtd, mtdblk->cache_offset,
125 mtdblk->cache_size, mtdblk->cache_data);
126 if (ret)
127 return ret;
130 * Here we could argably set the cache state to STATE_CLEAN.
131 * However this could lead to inconsistency since we will not
132 * be notified if this content is altered on the flash by other
133 * means. Let's declare it empty and leave buffering tasks to
134 * the buffer cache instead.
136 mtdblk->cache_state = STATE_EMPTY;
137 return 0;
141 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
142 int len, const char *buf)
144 struct mtd_info *mtd = mtdblk->mtd;
145 unsigned int sect_size = mtdblk->cache_size;
146 size_t retlen;
147 int ret;
149 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
150 mtd->name, pos, len);
152 if (!sect_size)
153 return MTD_WRITE (mtd, pos, len, &retlen, buf);
155 while (len > 0) {
156 unsigned long sect_start = (pos/sect_size)*sect_size;
157 unsigned int offset = pos - sect_start;
158 unsigned int size = sect_size - offset;
159 if( size > len )
160 size = len;
162 if (size == sect_size) {
164 * We are covering a whole sector. Thus there is no
165 * need to bother with the cache while it may still be
166 * useful for other partial writes.
168 ret = erase_write (mtd, pos, size, buf);
169 if (ret)
170 return ret;
171 } else {
172 /* Partial sector: need to use the cache */
174 if (mtdblk->cache_state == STATE_DIRTY &&
175 mtdblk->cache_offset != sect_start) {
176 ret = write_cached_data(mtdblk);
177 if (ret)
178 return ret;
181 if (mtdblk->cache_state == STATE_EMPTY ||
182 mtdblk->cache_offset != sect_start) {
183 /* fill the cache with the current sector */
184 mtdblk->cache_state = STATE_EMPTY;
185 ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data);
186 if (ret)
187 return ret;
188 if (retlen != sect_size)
189 return -EIO;
191 mtdblk->cache_offset = sect_start;
192 mtdblk->cache_size = sect_size;
193 mtdblk->cache_state = STATE_CLEAN;
196 /* write data to our local cache */
197 memcpy (mtdblk->cache_data + offset, buf, size);
198 mtdblk->cache_state = STATE_DIRTY;
201 buf += size;
202 pos += size;
203 len -= size;
206 return 0;
210 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
211 int len, char *buf)
213 struct mtd_info *mtd = mtdblk->mtd;
214 unsigned int sect_size = mtdblk->cache_size;
215 size_t retlen;
216 int ret;
218 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
219 mtd->name, pos, len);
221 if (!sect_size)
222 return MTD_READ (mtd, pos, len, &retlen, buf);
224 while (len > 0) {
225 unsigned long sect_start = (pos/sect_size)*sect_size;
226 unsigned int offset = pos - sect_start;
227 unsigned int size = sect_size - offset;
228 if (size > len)
229 size = len;
232 * Check if the requested data is already cached
233 * Read the requested amount of data from our internal cache if it
234 * contains what we want, otherwise we read the data directly
235 * from flash.
237 if (mtdblk->cache_state != STATE_EMPTY &&
238 mtdblk->cache_offset == sect_start) {
239 memcpy (buf, mtdblk->cache_data + offset, size);
240 } else {
241 ret = MTD_READ (mtd, pos, size, &retlen, buf);
242 if (ret)
243 return ret;
244 if (retlen != size)
245 return -EIO;
248 buf += size;
249 pos += size;
250 len -= size;
253 return 0;
256 static struct block_device_operations mtd_fops;
258 static int mtdblock_open(struct inode *inode, struct file *file)
260 struct mtdblk_dev *mtdblk;
261 struct mtd_info *mtd;
262 int dev = minor(inode->i_rdev);
263 struct gendisk *disk;
265 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
267 if (dev >= MAX_MTD_DEVICES)
268 return -EINVAL;
270 mtd = get_mtd_device(NULL, dev);
271 if (!mtd)
272 return -ENODEV;
273 if (MTD_ABSENT == mtd->type) {
274 put_mtd_device(mtd);
275 return -ENODEV;
278 spin_lock(&mtdblks_lock);
280 /* If it's already open, no need to piss about. */
281 if (mtdblks[dev]) {
282 mtdblks[dev]->count++;
283 spin_unlock(&mtdblks_lock);
284 return 0;
287 /* OK, it's not open. Try to find it */
289 /* First we have to drop the lock, because we have to
290 to things which might sleep.
292 spin_unlock(&mtdblks_lock);
294 mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
295 disk = mtddisk[dev];
296 if (!mtdblk || !disk)
297 goto Enomem;
298 memset(mtdblk, 0, sizeof(*mtdblk));
299 mtdblk->count = 1;
300 mtdblk->mtd = mtd;
302 init_MUTEX (&mtdblk->cache_sem);
303 mtdblk->cache_state = STATE_EMPTY;
304 if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
305 mtdblk->mtd->erasesize) {
306 mtdblk->cache_size = mtdblk->mtd->erasesize;
307 mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
308 if (!mtdblk->cache_data)
309 goto Enomem;
312 /* OK, we've created a new one. Add it to the list. */
314 spin_lock(&mtdblks_lock);
316 if (mtdblks[dev]) {
317 /* Another CPU made one at the same time as us. */
318 mtdblks[dev]->count++;
319 spin_unlock(&mtdblks_lock);
320 put_mtd_device(mtdblk->mtd);
321 vfree(mtdblk->cache_data);
322 kfree(mtdblk);
323 return 0;
326 mtdblks[dev] = mtdblk;
327 set_device_ro(inode->i_bdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
329 spin_unlock(&mtdblks_lock);
331 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
333 return 0;
334 Enomem:
335 put_mtd_device(mtd);
336 kfree(mtdblk);
337 return -ENOMEM;
340 static release_t mtdblock_release(struct inode *inode, struct file *file)
342 int dev;
343 struct mtdblk_dev *mtdblk;
344 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
346 if (inode == NULL)
347 release_return(-ENODEV);
349 dev = minor(inode->i_rdev);
350 mtdblk = mtdblks[dev];
352 down(&mtdblk->cache_sem);
353 write_cached_data(mtdblk);
354 up(&mtdblk->cache_sem);
356 spin_lock(&mtdblks_lock);
357 if (!--mtdblk->count) {
358 /* It was the last usage. Free the device */
359 mtdblks[dev] = NULL;
360 spin_unlock(&mtdblks_lock);
361 if (mtdblk->mtd->sync)
362 mtdblk->mtd->sync(mtdblk->mtd);
363 put_mtd_device(mtdblk->mtd);
364 vfree(mtdblk->cache_data);
365 kfree(mtdblk);
366 } else {
367 spin_unlock(&mtdblks_lock);
370 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
372 release_return(0);
377 * This is a special request_fn because it is executed in a process context
378 * to be able to sleep independently of the caller. The queue_lock
379 * is held upon entry and exit.
380 * The head of our request queue is considered active so there is no need
381 * to dequeue requests before we are done.
383 static struct request_queue mtd_queue;
384 static void handle_mtdblock_request(void)
386 struct request *req;
387 struct mtdblk_dev *mtdblk;
388 unsigned int res;
390 while ((req = elv_next_request(&mtd_queue)) != NULL) {
391 struct mtdblk_dev **p = req->rq_disk->private_data;
392 spin_unlock_irq(mtd_queue.queue_lock);
393 mtdblk = *p;
394 res = 0;
396 if (! (req->flags & REQ_CMD))
397 goto end_req;
399 if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
400 goto end_req;
402 // Handle the request
403 switch (rq_data_dir(req))
405 int err;
407 case READ:
408 down(&mtdblk->cache_sem);
409 err = do_cached_read (mtdblk, req->sector << 9,
410 req->current_nr_sectors << 9,
411 req->buffer);
412 up(&mtdblk->cache_sem);
413 if (!err)
414 res = 1;
415 break;
417 case WRITE:
418 // Read only device
419 if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) )
420 break;
422 // Do the write
423 down(&mtdblk->cache_sem);
424 err = do_cached_write (mtdblk, req->sector << 9,
425 req->current_nr_sectors << 9,
426 req->buffer);
427 up(&mtdblk->cache_sem);
428 if (!err)
429 res = 1;
430 break;
433 end_req:
434 spin_lock_irq(mtd_queue.queue_lock);
435 if (!end_that_request_first(req, res, req->hard_cur_sectors)) {
436 blkdev_dequeue_request(req);
437 end_that_request_last(req);
443 static volatile int leaving = 0;
444 static DECLARE_MUTEX_LOCKED(thread_sem);
445 static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
447 int mtdblock_thread(void *dummy)
449 struct task_struct *tsk = current;
450 DECLARE_WAITQUEUE(wait, tsk);
452 /* we might get involved when memory gets low, so use PF_MEMALLOC */
453 tsk->flags |= PF_MEMALLOC;
454 daemonize("mtdblockd");
456 while (!leaving) {
457 add_wait_queue(&thr_wq, &wait);
458 set_current_state(TASK_INTERRUPTIBLE);
459 spin_lock_irq(mtd_queue.queue_lock);
460 if (!elv_next_request(&mtd_queue) || blk_queue_plugged(&mtd_queue)) {
461 spin_unlock_irq(mtd_queue.queue_lock);
462 schedule();
463 remove_wait_queue(&thr_wq, &wait);
464 } else {
465 remove_wait_queue(&thr_wq, &wait);
466 set_current_state(TASK_RUNNING);
467 handle_mtdblock_request();
468 spin_unlock_irq(mtd_queue.queue_lock);
472 up(&thread_sem);
473 return 0;
476 static void mtdblock_request(struct request_queue *q)
478 /* Don't do anything, except wake the thread if necessary */
479 wake_up(&thr_wq);
483 static int mtdblock_ioctl(struct inode * inode, struct file * file,
484 unsigned int cmd, unsigned long arg)
486 struct mtdblk_dev *mtdblk;
488 mtdblk = mtdblks[minor(inode->i_rdev)];
490 #ifdef PARANOIA
491 if (!mtdblk)
492 BUG();
493 #endif
495 switch (cmd) {
496 case BLKFLSBUF:
497 fsync_bdev(inode->i_bdev);
498 invalidate_bdev(inode->i_bdev, 0);
499 down(&mtdblk->cache_sem);
500 write_cached_data(mtdblk);
501 up(&mtdblk->cache_sem);
502 if (mtdblk->mtd->sync)
503 mtdblk->mtd->sync(mtdblk->mtd);
504 return 0;
506 default:
507 return -EINVAL;
511 static struct block_device_operations mtd_fops =
513 .owner = THIS_MODULE,
514 .open = mtdblock_open,
515 .release = mtdblock_release,
516 .ioctl = mtdblock_ioctl
519 /* Notification that a new device has been added. Create the devfs entry for
520 * it. */
522 static void mtd_notify_add(struct mtd_info* mtd)
524 struct gendisk *disk;
525 char name[16];
527 if (!mtd || mtd->type == MTD_ABSENT)
528 return;
530 disk = alloc_disk(1);
531 if (disk) {
532 disk->major = MAJOR_NR;
533 disk->first_minor = mtd->index;
534 disk->fops = &mtd_fops;
536 sprintf(disk->disk_name, "mtdblock%d", mtd->index);
537 sprintf(disk->devfs_name, "mtdblock/%d", mtd->index);
539 mtddisk[mtd->index] = disk;
540 set_capacity(disk, mtd->size / 512);
541 disk->private_data = &mtdblks[mtd->index];
542 disk->queue = &mtd_queue;
544 add_disk(disk);
548 static void mtd_notify_remove(struct mtd_info* mtd)
550 if (!mtd || mtd->type == MTD_ABSENT)
551 return;
553 if (mtddisk[mtd->index]) {
554 del_gendisk(mtddisk[mtd->index]);
555 put_disk(mtddisk[mtd->index]);
556 mtddisk[mtd->index] = NULL;
560 static spinlock_t mtddev_lock = SPIN_LOCK_UNLOCKED;
562 int __init init_mtdblock(void)
564 spin_lock_init(&mtdblks_lock);
566 if (register_blkdev(MAJOR_NR, DEVICE_NAME))
567 return -EAGAIN;
569 #ifdef CONFIG_DEVFS_FS
570 devfs_mk_dir(DEVICE_NAME);
571 #endif
572 register_mtd_user(&notifier);
574 init_waitqueue_head(&thr_wq);
575 blk_init_queue(&mtd_queue, &mtdblock_request, &mtddev_lock);
576 kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
577 return 0;
580 static void __exit cleanup_mtdblock(void)
582 leaving = 1;
583 wake_up(&thr_wq);
584 down(&thread_sem);
585 unregister_mtd_user(&notifier);
586 #ifdef CONFIG_DEVFS_FS
587 devfs_remove(DEVICE_NAME);
588 #endif
589 unregister_blkdev(MAJOR_NR,DEVICE_NAME);
590 blk_cleanup_queue(&mtd_queue);
593 module_init(init_mtdblock);
594 module_exit(cleanup_mtdblock);
597 MODULE_LICENSE("GPL");
598 MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> et al.");
599 MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");