block: autoconvert trivial BKL users to private mutex
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / aoe / aoeblk.c
blobf21c237a9e5e124a07c205c9770379831df6d8a4
1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoeblk.c
4 * block device routines
5 */
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/fs.h>
11 #include <linux/ioctl.h>
12 #include <linux/slab.h>
13 #include <linux/genhd.h>
14 #include <linux/netdevice.h>
15 #include <linux/mutex.h>
16 #include "aoe.h"
18 static DEFINE_MUTEX(aoeblk_mutex);
19 static struct kmem_cache *buf_pool_cache;
21 static ssize_t aoedisk_show_state(struct device *dev,
22 struct device_attribute *attr, char *page)
24 struct gendisk *disk = dev_to_disk(dev);
25 struct aoedev *d = disk->private_data;
27 return snprintf(page, PAGE_SIZE,
28 "%s%s\n",
29 (d->flags & DEVFL_UP) ? "up" : "down",
30 (d->flags & DEVFL_KICKME) ? ",kickme" :
31 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
32 /* I'd rather see nopen exported so we can ditch closewait */
34 static ssize_t aoedisk_show_mac(struct device *dev,
35 struct device_attribute *attr, char *page)
37 struct gendisk *disk = dev_to_disk(dev);
38 struct aoedev *d = disk->private_data;
39 struct aoetgt *t = d->targets[0];
41 if (t == NULL)
42 return snprintf(page, PAGE_SIZE, "none\n");
43 return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
45 static ssize_t aoedisk_show_netif(struct device *dev,
46 struct device_attribute *attr, char *page)
48 struct gendisk *disk = dev_to_disk(dev);
49 struct aoedev *d = disk->private_data;
50 struct net_device *nds[8], **nd, **nnd, **ne;
51 struct aoetgt **t, **te;
52 struct aoeif *ifp, *e;
53 char *p;
55 memset(nds, 0, sizeof nds);
56 nd = nds;
57 ne = nd + ARRAY_SIZE(nds);
58 t = d->targets;
59 te = t + NTARGETS;
60 for (; t < te && *t; t++) {
61 ifp = (*t)->ifs;
62 e = ifp + NAOEIFS;
63 for (; ifp < e && ifp->nd; ifp++) {
64 for (nnd = nds; nnd < nd; nnd++)
65 if (*nnd == ifp->nd)
66 break;
67 if (nnd == nd && nd != ne)
68 *nd++ = ifp->nd;
72 ne = nd;
73 nd = nds;
74 if (*nd == NULL)
75 return snprintf(page, PAGE_SIZE, "none\n");
76 for (p = page; nd < ne; nd++)
77 p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
78 p == page ? "" : ",", (*nd)->name);
79 p += snprintf(p, PAGE_SIZE - (p-page), "\n");
80 return p-page;
82 /* firmware version */
83 static ssize_t aoedisk_show_fwver(struct device *dev,
84 struct device_attribute *attr, char *page)
86 struct gendisk *disk = dev_to_disk(dev);
87 struct aoedev *d = disk->private_data;
89 return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
92 static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
93 static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
94 static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
95 static struct device_attribute dev_attr_firmware_version = {
96 .attr = { .name = "firmware-version", .mode = S_IRUGO },
97 .show = aoedisk_show_fwver,
100 static struct attribute *aoe_attrs[] = {
101 &dev_attr_state.attr,
102 &dev_attr_mac.attr,
103 &dev_attr_netif.attr,
104 &dev_attr_firmware_version.attr,
105 NULL,
108 static const struct attribute_group attr_group = {
109 .attrs = aoe_attrs,
112 static int
113 aoedisk_add_sysfs(struct aoedev *d)
115 return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
117 void
118 aoedisk_rm_sysfs(struct aoedev *d)
120 sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
123 static int
124 aoeblk_open(struct block_device *bdev, fmode_t mode)
126 struct aoedev *d = bdev->bd_disk->private_data;
127 ulong flags;
129 mutex_lock(&aoeblk_mutex);
130 spin_lock_irqsave(&d->lock, flags);
131 if (d->flags & DEVFL_UP) {
132 d->nopen++;
133 spin_unlock_irqrestore(&d->lock, flags);
134 mutex_unlock(&aoeblk_mutex);
135 return 0;
137 spin_unlock_irqrestore(&d->lock, flags);
138 mutex_unlock(&aoeblk_mutex);
139 return -ENODEV;
142 static int
143 aoeblk_release(struct gendisk *disk, fmode_t mode)
145 struct aoedev *d = disk->private_data;
146 ulong flags;
148 spin_lock_irqsave(&d->lock, flags);
150 if (--d->nopen == 0) {
151 spin_unlock_irqrestore(&d->lock, flags);
152 aoecmd_cfg(d->aoemajor, d->aoeminor);
153 return 0;
155 spin_unlock_irqrestore(&d->lock, flags);
157 return 0;
160 static int
161 aoeblk_make_request(struct request_queue *q, struct bio *bio)
163 struct sk_buff_head queue;
164 struct aoedev *d;
165 struct buf *buf;
166 ulong flags;
168 blk_queue_bounce(q, &bio);
170 if (bio == NULL) {
171 printk(KERN_ERR "aoe: bio is NULL\n");
172 BUG();
173 return 0;
175 d = bio->bi_bdev->bd_disk->private_data;
176 if (d == NULL) {
177 printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
178 BUG();
179 bio_endio(bio, -ENXIO);
180 return 0;
181 } else if (bio->bi_rw & REQ_HARDBARRIER) {
182 bio_endio(bio, -EOPNOTSUPP);
183 return 0;
184 } else if (bio->bi_io_vec == NULL) {
185 printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
186 BUG();
187 bio_endio(bio, -ENXIO);
188 return 0;
190 buf = mempool_alloc(d->bufpool, GFP_NOIO);
191 if (buf == NULL) {
192 printk(KERN_INFO "aoe: buf allocation failure\n");
193 bio_endio(bio, -ENOMEM);
194 return 0;
196 memset(buf, 0, sizeof(*buf));
197 INIT_LIST_HEAD(&buf->bufs);
198 buf->stime = jiffies;
199 buf->bio = bio;
200 buf->resid = bio->bi_size;
201 buf->sector = bio->bi_sector;
202 buf->bv = &bio->bi_io_vec[bio->bi_idx];
203 buf->bv_resid = buf->bv->bv_len;
204 WARN_ON(buf->bv_resid == 0);
205 buf->bv_off = buf->bv->bv_offset;
207 spin_lock_irqsave(&d->lock, flags);
209 if ((d->flags & DEVFL_UP) == 0) {
210 printk(KERN_INFO "aoe: device %ld.%d is not up\n",
211 d->aoemajor, d->aoeminor);
212 spin_unlock_irqrestore(&d->lock, flags);
213 mempool_free(buf, d->bufpool);
214 bio_endio(bio, -ENXIO);
215 return 0;
218 list_add_tail(&buf->bufs, &d->bufq);
220 aoecmd_work(d);
221 __skb_queue_head_init(&queue);
222 skb_queue_splice_init(&d->sendq, &queue);
224 spin_unlock_irqrestore(&d->lock, flags);
225 aoenet_xmit(&queue);
227 return 0;
230 static int
231 aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
233 struct aoedev *d = bdev->bd_disk->private_data;
235 if ((d->flags & DEVFL_UP) == 0) {
236 printk(KERN_ERR "aoe: disk not up\n");
237 return -ENODEV;
240 geo->cylinders = d->geo.cylinders;
241 geo->heads = d->geo.heads;
242 geo->sectors = d->geo.sectors;
243 return 0;
246 static const struct block_device_operations aoe_bdops = {
247 .open = aoeblk_open,
248 .release = aoeblk_release,
249 .getgeo = aoeblk_getgeo,
250 .owner = THIS_MODULE,
253 /* alloc_disk and add_disk can sleep */
254 void
255 aoeblk_gdalloc(void *vp)
257 struct aoedev *d = vp;
258 struct gendisk *gd;
259 ulong flags;
261 gd = alloc_disk(AOE_PARTITIONS);
262 if (gd == NULL) {
263 printk(KERN_ERR
264 "aoe: cannot allocate disk structure for %ld.%d\n",
265 d->aoemajor, d->aoeminor);
266 goto err;
269 d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
270 if (d->bufpool == NULL) {
271 printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
272 d->aoemajor, d->aoeminor);
273 goto err_disk;
276 d->blkq = blk_alloc_queue(GFP_KERNEL);
277 if (!d->blkq)
278 goto err_mempool;
279 blk_queue_make_request(d->blkq, aoeblk_make_request);
280 d->blkq->backing_dev_info.name = "aoe";
281 if (bdi_init(&d->blkq->backing_dev_info))
282 goto err_blkq;
283 spin_lock_irqsave(&d->lock, flags);
284 gd->major = AOE_MAJOR;
285 gd->first_minor = d->sysminor * AOE_PARTITIONS;
286 gd->fops = &aoe_bdops;
287 gd->private_data = d;
288 set_capacity(gd, d->ssize);
289 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
290 d->aoemajor, d->aoeminor);
292 gd->queue = d->blkq;
293 d->gd = gd;
294 d->flags &= ~DEVFL_GDALLOC;
295 d->flags |= DEVFL_UP;
297 spin_unlock_irqrestore(&d->lock, flags);
299 add_disk(gd);
300 aoedisk_add_sysfs(d);
301 return;
303 err_blkq:
304 blk_cleanup_queue(d->blkq);
305 d->blkq = NULL;
306 err_mempool:
307 mempool_destroy(d->bufpool);
308 err_disk:
309 put_disk(gd);
310 err:
311 spin_lock_irqsave(&d->lock, flags);
312 d->flags &= ~DEVFL_GDALLOC;
313 spin_unlock_irqrestore(&d->lock, flags);
316 void
317 aoeblk_exit(void)
319 kmem_cache_destroy(buf_pool_cache);
322 int __init
323 aoeblk_init(void)
325 buf_pool_cache = kmem_cache_create("aoe_bufs",
326 sizeof(struct buf),
327 0, 0, NULL);
328 if (buf_pool_cache == NULL)
329 return -ENOMEM;
331 return 0;