MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / mtd / mtd_blkdevs.c
blobb58096abb4e0a0670986d946e509e892e8a17753
1 /*
2 * $Id: mtd_blkdevs.c,v 1.22 2004/07/12 12:35:28 dwmw2 Exp $
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
6 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
8 */
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/fs.h>
15 #include <linux/mtd/blktrans.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/blkdev.h>
18 #include <linux/blkpg.h>
19 #include <linux/spinlock.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <asm/semaphore.h>
23 #include <asm/uaccess.h>
24 #include <linux/devfs_fs_kernel.h>
26 static LIST_HEAD(blktrans_majors);
28 extern struct semaphore mtd_table_mutex;
29 extern struct mtd_info *mtd_table[];
31 struct mtd_blkcore_priv {
32 struct completion thread_dead;
33 int exiting;
34 wait_queue_head_t thread_wq;
35 struct request_queue *rq;
36 spinlock_t queue_lock;
39 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
40 struct mtd_blktrans_dev *dev,
41 struct request *req)
43 unsigned long block, nsect;
44 char *buf;
46 block = req->sector;
47 nsect = req->current_nr_sectors;
48 buf = req->buffer;
50 if (!(req->flags & REQ_CMD))
51 return 0;
53 if (block + nsect > get_capacity(req->rq_disk))
54 return 0;
56 switch(rq_data_dir(req)) {
57 case READ:
58 for (; nsect > 0; nsect--, block++, buf += 512)
59 if (tr->readsect(dev, block, buf))
60 return 0;
61 return 1;
63 case WRITE:
64 if (!tr->writesect)
65 return 0;
67 for (; nsect > 0; nsect--, block++, buf += 512)
68 if (tr->writesect(dev, block, buf))
69 return 0;
70 return 1;
72 default:
73 printk(KERN_NOTICE "Unknown request %ld\n", rq_data_dir(req));
74 return 0;
78 static int mtd_blktrans_thread(void *arg)
80 struct mtd_blktrans_ops *tr = arg;
81 struct request_queue *rq = tr->blkcore_priv->rq;
83 /* we might get involved when memory gets low, so use PF_MEMALLOC */
84 current->flags |= PF_MEMALLOC;
86 daemonize("%sd", tr->name);
88 /* daemonize() doesn't do this for us since some kernel threads
89 actually want to deal with signals. We can't just call
90 exit_sighand() since that'll cause an oops when we finally
91 do exit. */
92 spin_lock_irq(&current->sighand->siglock);
93 sigfillset(&current->blocked);
94 recalc_sigpending();
95 spin_unlock_irq(&current->sighand->siglock);
97 spin_lock_irq(rq->queue_lock);
99 while (!tr->blkcore_priv->exiting) {
100 struct request *req;
101 struct mtd_blktrans_dev *dev;
102 int res = 0;
103 DECLARE_WAITQUEUE(wait, current);
105 req = elv_next_request(rq);
107 if (!req) {
108 add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
109 #if 1 // mask by Victor Yu. 05-14-2007
110 set_current_state(TASK_INTERRUPTIBLE);
111 #endif
113 spin_unlock_irq(rq->queue_lock);
115 schedule();
116 remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
117 spin_lock_irq(rq->queue_lock);
120 continue;
123 dev = req->rq_disk->private_data;
124 tr = dev->tr;
126 spin_unlock_irq(rq->queue_lock);
128 down(&dev->sem);
129 res = do_blktrans_request(tr, dev, req);
130 up(&dev->sem);
132 spin_lock_irq(rq->queue_lock);
134 end_request(req, res);
136 spin_unlock_irq(rq->queue_lock);
138 complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
141 static void mtd_blktrans_request(struct request_queue *rq)
143 struct mtd_blktrans_ops *tr = rq->queuedata;
144 wake_up(&tr->blkcore_priv->thread_wq);
148 int blktrans_open(struct inode *i, struct file *f)
150 struct mtd_blktrans_dev *dev;
151 struct mtd_blktrans_ops *tr;
152 int ret = -ENODEV;
154 dev = i->i_bdev->bd_disk->private_data;
155 tr = dev->tr;
157 if (!try_module_get(dev->mtd->owner))
158 goto out;
160 if (!try_module_get(tr->owner))
161 goto out_tr;
163 /* FIXME: Locking. A hot pluggable device can go away
164 (del_mtd_device can be called for it) without its module
165 being unloaded. */
166 dev->mtd->usecount++;
168 ret = 0;
169 if (tr->open && (ret = tr->open(dev))) {
170 dev->mtd->usecount--;
171 module_put(dev->mtd->owner);
172 out_tr:
173 module_put(tr->owner);
175 out:
176 return ret;
179 int blktrans_release(struct inode *i, struct file *f)
181 struct mtd_blktrans_dev *dev;
182 struct mtd_blktrans_ops *tr;
183 int ret = 0;
185 dev = i->i_bdev->bd_disk->private_data;
186 tr = dev->tr;
188 if (tr->release)
189 ret = tr->release(dev);
191 if (!ret) {
192 dev->mtd->usecount--;
193 module_put(dev->mtd->owner);
194 module_put(tr->owner);
197 return ret;
201 static int blktrans_ioctl(struct inode *inode, struct file *file,
202 unsigned int cmd, unsigned long arg)
204 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
205 struct mtd_blktrans_ops *tr = dev->tr;
207 switch (cmd) {
208 case BLKFLSBUF:
209 if (tr->flush)
210 return tr->flush(dev);
211 /* The core code did the work, we had nothing to do. */
212 return 0;
214 case HDIO_GETGEO:
215 if (tr->getgeo) {
216 struct hd_geometry g;
217 int ret;
219 memset(&g, 0, sizeof(g));
220 ret = tr->getgeo(dev, &g);
221 if (ret)
222 return ret;
224 g.start = get_start_sect(inode->i_bdev);
225 if (copy_to_user((void __user *)arg, &g, sizeof(g)))
226 return -EFAULT;
227 return 0;
228 } /* else */
229 default:
230 return -ENOTTY;
234 #ifdef CONFIG_MAGIC_ROM_PTR
235 int blktrans_romptr(struct block_device *bdev, struct vm_area_struct * vma)
237 struct mtd_blktrans_dev *dev;
238 struct mtd_blktrans_ops *tr;
240 dev = bdev->bd_disk->private_data;
241 tr = dev->tr;
242 /* FIXME : I don't know at which level we must << PAGE_SHIFT */
243 vma->vm_pgoff <<= PAGE_SHIFT;
244 if (!tr->romptr || tr->romptr(dev, vma))
245 return -ENOSYS;
246 return 0;
248 #endif
250 struct block_device_operations mtd_blktrans_ops = {
251 .owner = THIS_MODULE,
252 .open = blktrans_open,
253 .release = blktrans_release,
254 .ioctl = blktrans_ioctl,
255 #ifdef CONFIG_MAGIC_ROM_PTR
256 .romptr = blktrans_romptr,
257 #endif
260 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
262 struct mtd_blktrans_ops *tr = new->tr;
263 struct list_head *this;
264 int last_devnum = -1;
265 struct gendisk *gd;
267 if (!down_trylock(&mtd_table_mutex)) {
268 up(&mtd_table_mutex);
269 BUG();
272 list_for_each(this, &tr->devs) {
273 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
274 if (new->devnum == -1) {
275 /* Use first free number */
276 if (d->devnum != last_devnum+1) {
277 /* Found a free devnum. Plug it in here */
278 new->devnum = last_devnum+1;
279 list_add_tail(&new->list, &d->list);
280 goto added;
282 } else if (d->devnum == new->devnum) {
283 /* Required number taken */
284 return -EBUSY;
285 } else if (d->devnum > new->devnum) {
286 /* Required number was free */
287 list_add_tail(&new->list, &d->list);
288 goto added;
290 last_devnum = d->devnum;
292 if (new->devnum == -1)
293 new->devnum = last_devnum+1;
295 if ((new->devnum << tr->part_bits) > 256) {
296 return -EBUSY;
299 init_MUTEX(&new->sem);
300 list_add_tail(&new->list, &tr->devs);
301 added:
302 if (!tr->writesect)
303 new->readonly = 1;
305 gd = alloc_disk(1 << tr->part_bits);
306 if (!gd) {
307 list_del(&new->list);
308 return -ENOMEM;
310 gd->major = tr->major;
311 gd->first_minor = (new->devnum) << tr->part_bits;
312 gd->fops = &mtd_blktrans_ops;
314 snprintf(gd->disk_name, sizeof(gd->disk_name),
315 "%s%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
316 snprintf(gd->devfs_name, sizeof(gd->devfs_name),
317 "%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
319 /* 2.5 has capacity in units of 512 bytes while still
320 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
321 set_capacity(gd, (new->size * new->blksize) >> 9);
323 gd->private_data = new;
324 new->blkcore_priv = gd;
325 gd->queue = tr->blkcore_priv->rq;
327 if (new->readonly)
328 set_disk_ro(gd, 1);
330 add_disk(gd);
332 return 0;
335 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
337 if (!down_trylock(&mtd_table_mutex)) {
338 up(&mtd_table_mutex);
339 BUG();
342 list_del(&old->list);
344 del_gendisk(old->blkcore_priv);
345 put_disk(old->blkcore_priv);
347 return 0;
350 void blktrans_notify_remove(struct mtd_info *mtd)
352 struct list_head *this, *this2, *next;
354 list_for_each(this, &blktrans_majors) {
355 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
357 list_for_each_safe(this2, next, &tr->devs) {
358 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
360 if (dev->mtd == mtd)
361 tr->remove_dev(dev);
366 void blktrans_notify_add(struct mtd_info *mtd)
368 struct list_head *this;
370 if (mtd->type == MTD_ABSENT)
371 return;
373 list_for_each(this, &blktrans_majors) {
374 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
376 tr->add_mtd(tr, mtd);
381 static struct mtd_notifier blktrans_notifier = {
382 .add = blktrans_notify_add,
383 .remove = blktrans_notify_remove,
386 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
388 int ret, i;
390 /* Register the notifier if/when the first device type is
391 registered, to prevent the link/init ordering from fucking
392 us over. */
393 if (!blktrans_notifier.list.next)
394 register_mtd_user(&blktrans_notifier);
396 tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
397 if (!tr->blkcore_priv)
398 return -ENOMEM;
400 memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
402 down(&mtd_table_mutex);
404 ret = register_blkdev(tr->major, tr->name);
405 if (ret) {
406 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
407 tr->name, tr->major, ret);
408 kfree(tr->blkcore_priv);
409 up(&mtd_table_mutex);
410 return ret;
412 spin_lock_init(&tr->blkcore_priv->queue_lock);
413 init_completion(&tr->blkcore_priv->thread_dead);
414 init_waitqueue_head(&tr->blkcore_priv->thread_wq);
416 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
417 if (!tr->blkcore_priv->rq) {
418 unregister_blkdev(tr->major, tr->name);
419 kfree(tr->blkcore_priv);
420 up(&mtd_table_mutex);
421 return -ENOMEM;
424 tr->blkcore_priv->rq->queuedata = tr;
426 ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL);
427 if (ret < 0) {
428 blk_cleanup_queue(tr->blkcore_priv->rq);
429 unregister_blkdev(tr->major, tr->name);
430 kfree(tr->blkcore_priv);
431 up(&mtd_table_mutex);
432 return ret;
435 devfs_mk_dir(tr->name);
437 INIT_LIST_HEAD(&tr->devs);
438 list_add(&tr->list, &blktrans_majors);
440 for (i=0; i<MAX_MTD_DEVICES; i++) {
441 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
442 tr->add_mtd(tr, mtd_table[i]);
445 up(&mtd_table_mutex);
447 return 0;
450 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
452 struct list_head *this, *next;
454 down(&mtd_table_mutex);
456 /* Clean up the kernel thread */
457 tr->blkcore_priv->exiting = 1;
458 wake_up(&tr->blkcore_priv->thread_wq);
459 wait_for_completion(&tr->blkcore_priv->thread_dead);
461 /* Remove it from the list of active majors */
462 list_del(&tr->list);
464 list_for_each_safe(this, next, &tr->devs) {
465 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
466 tr->remove_dev(dev);
469 devfs_remove(tr->name);
470 blk_cleanup_queue(tr->blkcore_priv->rq);
471 unregister_blkdev(tr->major, tr->name);
473 up(&mtd_table_mutex);
475 kfree(tr->blkcore_priv);
477 if (!list_empty(&tr->devs))
478 BUG();
479 return 0;
482 static void __exit mtd_blktrans_exit(void)
484 /* No race here -- if someone's currently in register_mtd_blktrans
485 we're screwed anyway. */
486 if (blktrans_notifier.list.next)
487 unregister_mtd_user(&blktrans_notifier);
490 module_exit(mtd_blktrans_exit);
492 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
493 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
494 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
495 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
497 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
498 MODULE_LICENSE("GPL");
499 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");