Use physical addresses at the interface level, letting drivers remap
[linux-2.6/mini2440.git] / drivers / mtd / mtd_blkdevs.c
blobf8d2185819e7e2e17348b0c3fd8b0c2b41208d44
1 /*
2 * $Id: mtd_blkdevs.c,v 1.24 2004/11/16 18:28:59 dwmw2 Exp $
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
6 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
8 */
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/fs.h>
15 #include <linux/mtd/blktrans.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/blkdev.h>
18 #include <linux/blkpg.h>
19 #include <linux/spinlock.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <asm/semaphore.h>
23 #include <asm/uaccess.h>
24 #include <linux/devfs_fs_kernel.h>
26 static LIST_HEAD(blktrans_majors);
28 extern struct semaphore mtd_table_mutex;
29 extern struct mtd_info *mtd_table[];
31 struct mtd_blkcore_priv {
32 struct completion thread_dead;
33 int exiting;
34 wait_queue_head_t thread_wq;
35 struct request_queue *rq;
36 spinlock_t queue_lock;
39 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
40 struct mtd_blktrans_dev *dev,
41 struct request *req)
43 unsigned long block, nsect;
44 char *buf;
46 block = req->sector;
47 nsect = req->current_nr_sectors;
48 buf = req->buffer;
50 if (!(req->flags & REQ_CMD))
51 return 0;
53 if (block + nsect > get_capacity(req->rq_disk))
54 return 0;
56 switch(rq_data_dir(req)) {
57 case READ:
58 for (; nsect > 0; nsect--, block++, buf += 512)
59 if (tr->readsect(dev, block, buf))
60 return 0;
61 return 1;
63 case WRITE:
64 if (!tr->writesect)
65 return 0;
67 for (; nsect > 0; nsect--, block++, buf += 512)
68 if (tr->writesect(dev, block, buf))
69 return 0;
70 return 1;
72 default:
73 printk(KERN_NOTICE "Unknown request %ld\n", rq_data_dir(req));
74 return 0;
78 static int mtd_blktrans_thread(void *arg)
80 struct mtd_blktrans_ops *tr = arg;
81 struct request_queue *rq = tr->blkcore_priv->rq;
83 /* we might get involved when memory gets low, so use PF_MEMALLOC */
84 current->flags |= PF_MEMALLOC | PF_NOFREEZE;
86 daemonize("%sd", tr->name);
88 /* daemonize() doesn't do this for us since some kernel threads
89 actually want to deal with signals. We can't just call
90 exit_sighand() since that'll cause an oops when we finally
91 do exit. */
92 spin_lock_irq(&current->sighand->siglock);
93 sigfillset(&current->blocked);
94 recalc_sigpending();
95 spin_unlock_irq(&current->sighand->siglock);
97 spin_lock_irq(rq->queue_lock);
99 while (!tr->blkcore_priv->exiting) {
100 struct request *req;
101 struct mtd_blktrans_dev *dev;
102 int res = 0;
103 DECLARE_WAITQUEUE(wait, current);
105 req = elv_next_request(rq);
107 if (!req) {
108 add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
109 set_current_state(TASK_INTERRUPTIBLE);
111 spin_unlock_irq(rq->queue_lock);
113 schedule();
114 remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
116 spin_lock_irq(rq->queue_lock);
118 continue;
121 dev = req->rq_disk->private_data;
122 tr = dev->tr;
124 spin_unlock_irq(rq->queue_lock);
126 down(&dev->sem);
127 res = do_blktrans_request(tr, dev, req);
128 up(&dev->sem);
130 spin_lock_irq(rq->queue_lock);
132 end_request(req, res);
134 spin_unlock_irq(rq->queue_lock);
136 complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
139 static void mtd_blktrans_request(struct request_queue *rq)
141 struct mtd_blktrans_ops *tr = rq->queuedata;
142 wake_up(&tr->blkcore_priv->thread_wq);
146 static int blktrans_open(struct inode *i, struct file *f)
148 struct mtd_blktrans_dev *dev;
149 struct mtd_blktrans_ops *tr;
150 int ret = -ENODEV;
152 dev = i->i_bdev->bd_disk->private_data;
153 tr = dev->tr;
155 if (!try_module_get(dev->mtd->owner))
156 goto out;
158 if (!try_module_get(tr->owner))
159 goto out_tr;
161 /* FIXME: Locking. A hot pluggable device can go away
162 (del_mtd_device can be called for it) without its module
163 being unloaded. */
164 dev->mtd->usecount++;
166 ret = 0;
167 if (tr->open && (ret = tr->open(dev))) {
168 dev->mtd->usecount--;
169 module_put(dev->mtd->owner);
170 out_tr:
171 module_put(tr->owner);
173 out:
174 return ret;
177 static int blktrans_release(struct inode *i, struct file *f)
179 struct mtd_blktrans_dev *dev;
180 struct mtd_blktrans_ops *tr;
181 int ret = 0;
183 dev = i->i_bdev->bd_disk->private_data;
184 tr = dev->tr;
186 if (tr->release)
187 ret = tr->release(dev);
189 if (!ret) {
190 dev->mtd->usecount--;
191 module_put(dev->mtd->owner);
192 module_put(tr->owner);
195 return ret;
199 static int blktrans_ioctl(struct inode *inode, struct file *file,
200 unsigned int cmd, unsigned long arg)
202 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
203 struct mtd_blktrans_ops *tr = dev->tr;
205 switch (cmd) {
206 case BLKFLSBUF:
207 if (tr->flush)
208 return tr->flush(dev);
209 /* The core code did the work, we had nothing to do. */
210 return 0;
212 case HDIO_GETGEO:
213 if (tr->getgeo) {
214 struct hd_geometry g;
215 int ret;
217 memset(&g, 0, sizeof(g));
218 ret = tr->getgeo(dev, &g);
219 if (ret)
220 return ret;
222 g.start = get_start_sect(inode->i_bdev);
223 if (copy_to_user((void __user *)arg, &g, sizeof(g)))
224 return -EFAULT;
225 return 0;
226 } /* else */
227 default:
228 return -ENOTTY;
232 struct block_device_operations mtd_blktrans_ops = {
233 .owner = THIS_MODULE,
234 .open = blktrans_open,
235 .release = blktrans_release,
236 .ioctl = blktrans_ioctl,
239 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
241 struct mtd_blktrans_ops *tr = new->tr;
242 struct list_head *this;
243 int last_devnum = -1;
244 struct gendisk *gd;
246 if (!down_trylock(&mtd_table_mutex)) {
247 up(&mtd_table_mutex);
248 BUG();
251 list_for_each(this, &tr->devs) {
252 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
253 if (new->devnum == -1) {
254 /* Use first free number */
255 if (d->devnum != last_devnum+1) {
256 /* Found a free devnum. Plug it in here */
257 new->devnum = last_devnum+1;
258 list_add_tail(&new->list, &d->list);
259 goto added;
261 } else if (d->devnum == new->devnum) {
262 /* Required number taken */
263 return -EBUSY;
264 } else if (d->devnum > new->devnum) {
265 /* Required number was free */
266 list_add_tail(&new->list, &d->list);
267 goto added;
269 last_devnum = d->devnum;
271 if (new->devnum == -1)
272 new->devnum = last_devnum+1;
274 if ((new->devnum << tr->part_bits) > 256) {
275 return -EBUSY;
278 init_MUTEX(&new->sem);
279 list_add_tail(&new->list, &tr->devs);
280 added:
281 if (!tr->writesect)
282 new->readonly = 1;
284 gd = alloc_disk(1 << tr->part_bits);
285 if (!gd) {
286 list_del(&new->list);
287 return -ENOMEM;
289 gd->major = tr->major;
290 gd->first_minor = (new->devnum) << tr->part_bits;
291 gd->fops = &mtd_blktrans_ops;
293 snprintf(gd->disk_name, sizeof(gd->disk_name),
294 "%s%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
295 snprintf(gd->devfs_name, sizeof(gd->devfs_name),
296 "%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
298 /* 2.5 has capacity in units of 512 bytes while still
299 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
300 set_capacity(gd, (new->size * new->blksize) >> 9);
302 gd->private_data = new;
303 new->blkcore_priv = gd;
304 gd->queue = tr->blkcore_priv->rq;
306 if (new->readonly)
307 set_disk_ro(gd, 1);
309 add_disk(gd);
311 return 0;
314 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
316 if (!down_trylock(&mtd_table_mutex)) {
317 up(&mtd_table_mutex);
318 BUG();
321 list_del(&old->list);
323 del_gendisk(old->blkcore_priv);
324 put_disk(old->blkcore_priv);
326 return 0;
329 static void blktrans_notify_remove(struct mtd_info *mtd)
331 struct list_head *this, *this2, *next;
333 list_for_each(this, &blktrans_majors) {
334 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
336 list_for_each_safe(this2, next, &tr->devs) {
337 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
339 if (dev->mtd == mtd)
340 tr->remove_dev(dev);
345 static void blktrans_notify_add(struct mtd_info *mtd)
347 struct list_head *this;
349 if (mtd->type == MTD_ABSENT)
350 return;
352 list_for_each(this, &blktrans_majors) {
353 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
355 tr->add_mtd(tr, mtd);
360 static struct mtd_notifier blktrans_notifier = {
361 .add = blktrans_notify_add,
362 .remove = blktrans_notify_remove,
365 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
367 int ret, i;
369 /* Register the notifier if/when the first device type is
370 registered, to prevent the link/init ordering from fucking
371 us over. */
372 if (!blktrans_notifier.list.next)
373 register_mtd_user(&blktrans_notifier);
375 tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
376 if (!tr->blkcore_priv)
377 return -ENOMEM;
379 memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
381 down(&mtd_table_mutex);
383 ret = register_blkdev(tr->major, tr->name);
384 if (ret) {
385 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
386 tr->name, tr->major, ret);
387 kfree(tr->blkcore_priv);
388 up(&mtd_table_mutex);
389 return ret;
391 spin_lock_init(&tr->blkcore_priv->queue_lock);
392 init_completion(&tr->blkcore_priv->thread_dead);
393 init_waitqueue_head(&tr->blkcore_priv->thread_wq);
395 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
396 if (!tr->blkcore_priv->rq) {
397 unregister_blkdev(tr->major, tr->name);
398 kfree(tr->blkcore_priv);
399 up(&mtd_table_mutex);
400 return -ENOMEM;
403 tr->blkcore_priv->rq->queuedata = tr;
405 ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL);
406 if (ret < 0) {
407 blk_cleanup_queue(tr->blkcore_priv->rq);
408 unregister_blkdev(tr->major, tr->name);
409 kfree(tr->blkcore_priv);
410 up(&mtd_table_mutex);
411 return ret;
414 devfs_mk_dir(tr->name);
416 INIT_LIST_HEAD(&tr->devs);
417 list_add(&tr->list, &blktrans_majors);
419 for (i=0; i<MAX_MTD_DEVICES; i++) {
420 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
421 tr->add_mtd(tr, mtd_table[i]);
424 up(&mtd_table_mutex);
426 return 0;
429 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
431 struct list_head *this, *next;
433 down(&mtd_table_mutex);
435 /* Clean up the kernel thread */
436 tr->blkcore_priv->exiting = 1;
437 wake_up(&tr->blkcore_priv->thread_wq);
438 wait_for_completion(&tr->blkcore_priv->thread_dead);
440 /* Remove it from the list of active majors */
441 list_del(&tr->list);
443 list_for_each_safe(this, next, &tr->devs) {
444 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
445 tr->remove_dev(dev);
448 devfs_remove(tr->name);
449 blk_cleanup_queue(tr->blkcore_priv->rq);
450 unregister_blkdev(tr->major, tr->name);
452 up(&mtd_table_mutex);
454 kfree(tr->blkcore_priv);
456 if (!list_empty(&tr->devs))
457 BUG();
458 return 0;
461 static void __exit mtd_blktrans_exit(void)
463 /* No race here -- if someone's currently in register_mtd_blktrans
464 we're screwed anyway. */
465 if (blktrans_notifier.list.next)
466 unregister_mtd_user(&blktrans_notifier);
469 module_exit(mtd_blktrans_exit);
471 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
472 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
473 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
474 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
476 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
477 MODULE_LICENSE("GPL");
478 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");