[PATCH] m68k: bitops update [3/20]
[linux-2.6/history.git] / drivers / mtd / mtdblock.c
blob3262d574ee6fcf3c5438a7f0c46f8848177c7074
1 /*
2 * Direct MTD block device access
4 * $Id: mtdblock.c,v 1.47 2001/10/02 15:05:11 dwmw2 Exp $
6 * 02-nov-2000 Nicolas Pitre Added read-modify-write with cache
7 */
9 #include <linux/config.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/mtd/compatmac.h>
17 #define MAJOR_NR MTD_BLOCK_MAJOR
18 #define DEVICE_NAME "mtdblock"
19 #define DEVICE_NR(device) (device)
20 #define LOCAL_END_REQUEST
21 #include <linux/blk.h>
22 /* for old kernels... */
23 #ifndef QUEUE_EMPTY
24 #define QUEUE_EMPTY (!CURRENT)
25 #endif
26 #if LINUX_VERSION_CODE < 0x20300
27 #define QUEUE_PLUGGED (blk_dev[MAJOR_NR].plug_tq.sync)
28 #else
29 #define QUEUE_PLUGGED (blk_queue_plugged(QUEUE))
30 #endif
32 #ifdef CONFIG_DEVFS_FS
33 #include <linux/devfs_fs_kernel.h>
34 static void mtd_notify_add(struct mtd_info* mtd);
35 static void mtd_notify_remove(struct mtd_info* mtd);
36 static struct mtd_notifier notifier = {
37 mtd_notify_add,
38 mtd_notify_remove,
39 NULL
41 static devfs_handle_t devfs_dir_handle = NULL;
42 static devfs_handle_t devfs_rw_handle[MAX_MTD_DEVICES];
43 #endif
45 static struct mtdblk_dev {
46 struct mtd_info *mtd; /* Locked */
47 int count;
48 struct semaphore cache_sem;
49 unsigned char *cache_data;
50 unsigned long cache_offset;
51 unsigned int cache_size;
52 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
53 } *mtdblks[MAX_MTD_DEVICES];
55 static spinlock_t mtdblks_lock;
57 static int mtd_sizes[MAX_MTD_DEVICES];
60 * Cache stuff...
62 * Since typical flash erasable sectors are much larger than what Linux's
63 * buffer cache can handle, we must implement read-modify-write on flash
64 * sectors for each block write requests. To avoid over-erasing flash sectors
65 * and to speed things up, we locally cache a whole flash sector while it is
66 * being written to until a different sector is required.
69 static void erase_callback(struct erase_info *done)
71 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
72 wake_up(wait_q);
75 static int erase_write (struct mtd_info *mtd, unsigned long pos,
76 int len, const char *buf)
78 struct erase_info erase;
79 DECLARE_WAITQUEUE(wait, current);
80 wait_queue_head_t wait_q;
81 size_t retlen;
82 int ret;
85 * First, let's erase the flash block.
88 init_waitqueue_head(&wait_q);
89 erase.mtd = mtd;
90 erase.callback = erase_callback;
91 erase.addr = pos;
92 erase.len = len;
93 erase.priv = (u_long)&wait_q;
95 set_current_state(TASK_INTERRUPTIBLE);
96 add_wait_queue(&wait_q, &wait);
98 ret = MTD_ERASE(mtd, &erase);
99 if (ret) {
100 set_current_state(TASK_RUNNING);
101 remove_wait_queue(&wait_q, &wait);
102 printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
103 "on \"%s\" failed\n",
104 pos, len, mtd->name);
105 return ret;
108 schedule(); /* Wait for erase to finish. */
109 remove_wait_queue(&wait_q, &wait);
112 * Next, writhe data to flash.
115 ret = MTD_WRITE (mtd, pos, len, &retlen, buf);
116 if (ret)
117 return ret;
118 if (retlen != len)
119 return -EIO;
120 return 0;
124 static int write_cached_data (struct mtdblk_dev *mtdblk)
126 struct mtd_info *mtd = mtdblk->mtd;
127 int ret;
129 if (mtdblk->cache_state != STATE_DIRTY)
130 return 0;
132 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
133 "at 0x%lx, size 0x%x\n", mtd->name,
134 mtdblk->cache_offset, mtdblk->cache_size);
136 ret = erase_write (mtd, mtdblk->cache_offset,
137 mtdblk->cache_size, mtdblk->cache_data);
138 if (ret)
139 return ret;
142 * Here we could argably set the cache state to STATE_CLEAN.
143 * However this could lead to inconsistency since we will not
144 * be notified if this content is altered on the flash by other
145 * means. Let's declare it empty and leave buffering tasks to
146 * the buffer cache instead.
148 mtdblk->cache_state = STATE_EMPTY;
149 return 0;
153 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
154 int len, const char *buf)
156 struct mtd_info *mtd = mtdblk->mtd;
157 unsigned int sect_size = mtdblk->cache_size;
158 size_t retlen;
159 int ret;
161 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
162 mtd->name, pos, len);
164 if (!sect_size)
165 return MTD_WRITE (mtd, pos, len, &retlen, buf);
167 while (len > 0) {
168 unsigned long sect_start = (pos/sect_size)*sect_size;
169 unsigned int offset = pos - sect_start;
170 unsigned int size = sect_size - offset;
171 if( size > len )
172 size = len;
174 if (size == sect_size) {
176 * We are covering a whole sector. Thus there is no
177 * need to bother with the cache while it may still be
178 * useful for other partial writes.
180 ret = erase_write (mtd, pos, size, buf);
181 if (ret)
182 return ret;
183 } else {
184 /* Partial sector: need to use the cache */
186 if (mtdblk->cache_state == STATE_DIRTY &&
187 mtdblk->cache_offset != sect_start) {
188 ret = write_cached_data(mtdblk);
189 if (ret)
190 return ret;
193 if (mtdblk->cache_state == STATE_EMPTY ||
194 mtdblk->cache_offset != sect_start) {
195 /* fill the cache with the current sector */
196 mtdblk->cache_state = STATE_EMPTY;
197 ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data);
198 if (ret)
199 return ret;
200 if (retlen != sect_size)
201 return -EIO;
203 mtdblk->cache_offset = sect_start;
204 mtdblk->cache_size = sect_size;
205 mtdblk->cache_state = STATE_CLEAN;
208 /* write data to our local cache */
209 memcpy (mtdblk->cache_data + offset, buf, size);
210 mtdblk->cache_state = STATE_DIRTY;
213 buf += size;
214 pos += size;
215 len -= size;
218 return 0;
222 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
223 int len, char *buf)
225 struct mtd_info *mtd = mtdblk->mtd;
226 unsigned int sect_size = mtdblk->cache_size;
227 size_t retlen;
228 int ret;
230 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
231 mtd->name, pos, len);
233 if (!sect_size)
234 return MTD_READ (mtd, pos, len, &retlen, buf);
236 while (len > 0) {
237 unsigned long sect_start = (pos/sect_size)*sect_size;
238 unsigned int offset = pos - sect_start;
239 unsigned int size = sect_size - offset;
240 if (size > len)
241 size = len;
244 * Check if the requested data is already cached
245 * Read the requested amount of data from our internal cache if it
246 * contains what we want, otherwise we read the data directly
247 * from flash.
249 if (mtdblk->cache_state != STATE_EMPTY &&
250 mtdblk->cache_offset == sect_start) {
251 memcpy (buf, mtdblk->cache_data + offset, size);
252 } else {
253 ret = MTD_READ (mtd, pos, size, &retlen, buf);
254 if (ret)
255 return ret;
256 if (retlen != size)
257 return -EIO;
260 buf += size;
261 pos += size;
262 len -= size;
265 return 0;
270 static int mtdblock_open(struct inode *inode, struct file *file)
272 struct mtdblk_dev *mtdblk;
273 struct mtd_info *mtd;
274 int dev;
276 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
278 if (!inode)
279 return -EINVAL;
281 dev = minor(inode->i_rdev);
282 if (dev >= MAX_MTD_DEVICES)
283 return -EINVAL;
285 mtd = get_mtd_device(NULL, dev);
286 if (!mtd)
287 return -ENODEV;
288 if (MTD_ABSENT == mtd->type) {
289 put_mtd_device(mtd);
290 return -ENODEV;
293 spin_lock(&mtdblks_lock);
295 /* If it's already open, no need to piss about. */
296 if (mtdblks[dev]) {
297 mtdblks[dev]->count++;
298 spin_unlock(&mtdblks_lock);
299 return 0;
302 /* OK, it's not open. Try to find it */
304 /* First we have to drop the lock, because we have to
305 to things which might sleep.
307 spin_unlock(&mtdblks_lock);
309 mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
310 if (!mtdblk) {
311 put_mtd_device(mtd);
312 return -ENOMEM;
314 memset(mtdblk, 0, sizeof(*mtdblk));
315 mtdblk->count = 1;
316 mtdblk->mtd = mtd;
318 init_MUTEX (&mtdblk->cache_sem);
319 mtdblk->cache_state = STATE_EMPTY;
320 if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
321 mtdblk->mtd->erasesize) {
322 mtdblk->cache_size = mtdblk->mtd->erasesize;
323 mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
324 if (!mtdblk->cache_data) {
325 put_mtd_device(mtdblk->mtd);
326 kfree(mtdblk);
327 return -ENOMEM;
331 /* OK, we've created a new one. Add it to the list. */
333 spin_lock(&mtdblks_lock);
335 if (mtdblks[dev]) {
336 /* Another CPU made one at the same time as us. */
337 mtdblks[dev]->count++;
338 spin_unlock(&mtdblks_lock);
339 put_mtd_device(mtdblk->mtd);
340 vfree(mtdblk->cache_data);
341 kfree(mtdblk);
342 return 0;
345 mtdblks[dev] = mtdblk;
346 mtd_sizes[dev] = mtdblk->mtd->size/1024;
347 set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
349 spin_unlock(&mtdblks_lock);
351 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
353 return 0;
356 static release_t mtdblock_release(struct inode *inode, struct file *file)
358 int dev;
359 struct mtdblk_dev *mtdblk;
360 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
362 if (inode == NULL)
363 release_return(-ENODEV);
365 invalidate_device(inode->i_rdev, 1);
367 dev = minor(inode->i_rdev);
368 mtdblk = mtdblks[dev];
370 down(&mtdblk->cache_sem);
371 write_cached_data(mtdblk);
372 up(&mtdblk->cache_sem);
374 spin_lock(&mtdblks_lock);
375 if (!--mtdblk->count) {
376 /* It was the last usage. Free the device */
377 mtdblks[dev] = NULL;
378 spin_unlock(&mtdblks_lock);
379 if (mtdblk->mtd->sync)
380 mtdblk->mtd->sync(mtdblk->mtd);
381 put_mtd_device(mtdblk->mtd);
382 vfree(mtdblk->cache_data);
383 kfree(mtdblk);
384 } else {
385 spin_unlock(&mtdblks_lock);
388 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
390 release_return(0);
395 * This is a special request_fn because it is executed in a process context
396 * to be able to sleep independently of the caller. The queue_lock
397 * is held upon entry and exit.
398 * The head of our request queue is considered active so there is no need
399 * to dequeue requests before we are done.
401 static void handle_mtdblock_request(void)
403 struct request *req;
404 struct mtdblk_dev *mtdblk;
405 unsigned int res;
407 for (;;) {
408 INIT_REQUEST;
409 req = CURRENT;
410 spin_unlock_irq(QUEUE->queue_lock);
411 mtdblk = mtdblks[minor(req->rq_dev)];
412 res = 0;
414 if (minor(req->rq_dev) >= MAX_MTD_DEVICES)
415 panic(__FUNCTION__": minor out of bound");
417 if (req->flags & REQ_CMD)
418 goto end_req;
420 if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
421 goto end_req;
423 // Handle the request
424 switch (rq_data_dir(CURRENT)) {
425 int err;
427 case READ:
428 down(&mtdblk->cache_sem);
429 err = do_cached_read (mtdblk, req->sector << 9,
430 req->current_nr_sectors << 9,
431 req->buffer);
432 up(&mtdblk->cache_sem);
433 if (!err)
434 res = 1;
435 break;
437 case WRITE:
438 // Read only device
439 if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) )
440 break;
442 // Do the write
443 down(&mtdblk->cache_sem);
444 err = do_cached_write (mtdblk, req->sector << 9,
445 req->current_nr_sectors << 9,
446 req->buffer);
447 up(&mtdblk->cache_sem);
448 if (!err)
449 res = 1;
450 break;
453 end_req:
454 spin_lock_irq(QUEUE->queue_lock);
455 if (!end_that_request_first(req, res, req->hard_cur_sectors)) {
456 blkdev_dequeue_request(req);
457 end_that_request_last(req);
463 static volatile int leaving = 0;
464 static DECLARE_MUTEX_LOCKED(thread_sem);
465 static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
467 int mtdblock_thread(void *dummy)
469 struct task_struct *tsk = current;
470 DECLARE_WAITQUEUE(wait, tsk);
472 /* we might get involved when memory gets low, so use PF_MEMALLOC */
473 tsk->flags |= PF_MEMALLOC;
474 strcpy(tsk->comm, "mtdblockd");
475 spin_lock_irq(&tsk->sigmask_lock);
476 sigfillset(&tsk->blocked);
477 recalc_sigpending();
478 spin_unlock_irq(&tsk->sigmask_lock);
479 daemonize();
481 while (!leaving) {
482 add_wait_queue(&thr_wq, &wait);
483 set_current_state(TASK_INTERRUPTIBLE);
484 spin_lock_irq(QUEUE->queue_lock);
485 if (QUEUE_EMPTY || QUEUE_PLUGGED) {
486 spin_unlock_irq(QUEUE->queue_lock);
487 schedule();
488 remove_wait_queue(&thr_wq, &wait);
489 } else {
490 remove_wait_queue(&thr_wq, &wait);
491 set_current_state(TASK_RUNNING);
492 handle_mtdblock_request();
493 spin_unlock_irq(QUEUE->queue_lock);
497 up(&thread_sem);
498 return 0;
501 #if LINUX_VERSION_CODE < 0x20300
502 #define RQFUNC_ARG void
503 #else
504 #define RQFUNC_ARG request_queue_t *q
505 #endif
507 static void mtdblock_request(RQFUNC_ARG)
509 /* Don't do anything, except wake the thread if necessary */
510 wake_up(&thr_wq);
514 static int mtdblock_ioctl(struct inode * inode, struct file * file,
515 unsigned int cmd, unsigned long arg)
517 struct mtdblk_dev *mtdblk;
519 mtdblk = mtdblks[minor(inode->i_rdev)];
521 #ifdef PARANOIA
522 if (!mtdblk)
523 BUG();
524 #endif
526 switch (cmd) {
527 case BLKGETSIZE: /* Return device size */
528 return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg);
529 case BLKGETSIZE64:
530 return put_user((u64)mtdblk->mtd->size, (u64 *)arg);
532 case BLKFLSBUF:
533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
534 if(!capable(CAP_SYS_ADMIN))
535 return -EACCES;
536 #endif
537 fsync_bdev(inode->i_bdev);
538 invalidate_bdev(inode->i_bdev, 0);
539 down(&mtdblk->cache_sem);
540 write_cached_data(mtdblk);
541 up(&mtdblk->cache_sem);
542 if (mtdblk->mtd->sync)
543 mtdblk->mtd->sync(mtdblk->mtd);
544 return 0;
546 default:
547 return -EINVAL;
551 #if LINUX_VERSION_CODE < 0x20326
552 static struct file_operations mtd_fops =
554 open: mtdblock_open,
555 ioctl: mtdblock_ioctl,
556 release: mtdblock_release,
557 read: block_read,
558 write: block_write
560 #else
561 static struct block_device_operations mtd_fops =
563 owner: THIS_MODULE,
564 open: mtdblock_open,
565 release: mtdblock_release,
566 ioctl: mtdblock_ioctl
568 #endif
570 #ifdef CONFIG_DEVFS_FS
571 /* Notification that a new device has been added. Create the devfs entry for
572 * it. */
574 static void mtd_notify_add(struct mtd_info* mtd)
576 char name[8];
578 if (!mtd || mtd->type == MTD_ABSENT)
579 return;
581 sprintf(name, "%d", mtd->index);
582 devfs_rw_handle[mtd->index] = devfs_register(devfs_dir_handle, name,
583 DEVFS_FL_DEFAULT, MTD_BLOCK_MAJOR, mtd->index,
584 S_IFBLK | S_IRUGO | S_IWUGO,
585 &mtd_fops, NULL);
588 static void mtd_notify_remove(struct mtd_info* mtd)
590 if (!mtd || mtd->type == MTD_ABSENT)
591 return;
593 devfs_unregister(devfs_rw_handle[mtd->index]);
595 #endif
597 static spinlock_t mtddev_lock = SPIN_LOCK_UNLOCKED;
599 int __init init_mtdblock(void)
601 int i;
603 spin_lock_init(&mtdblks_lock);
604 #ifdef CONFIG_DEVFS_FS
605 if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))
607 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
608 MTD_BLOCK_MAJOR);
609 return -EAGAIN;
612 devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
613 register_mtd_user(&notifier);
614 #else
615 if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
616 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
617 MTD_BLOCK_MAJOR);
618 return -EAGAIN;
620 #endif
622 /* We fill it in at open() time. */
623 for (i=0; i< MAX_MTD_DEVICES; i++)
624 mtd_sizes[i] = 0;
625 init_waitqueue_head(&thr_wq);
626 blk_size[MAJOR_NR] = mtd_sizes;
628 blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request, &mtddev_lock);
629 kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
630 return 0;
633 static void __exit cleanup_mtdblock(void)
635 leaving = 1;
636 wake_up(&thr_wq);
637 down(&thread_sem);
638 #ifdef CONFIG_DEVFS_FS
639 unregister_mtd_user(&notifier);
640 devfs_unregister(devfs_dir_handle);
641 devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME);
642 #else
643 unregister_blkdev(MAJOR_NR,DEVICE_NAME);
644 #endif
645 blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
646 blk_size[MAJOR_NR] = NULL;
649 module_init(init_mtdblock);
650 module_exit(cleanup_mtdblock);
653 MODULE_LICENSE("GPL");
654 MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> et al.");
655 MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");