1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
4 * block device routines
7 #include <linux/kernel.h>
8 #include <linux/hdreg.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
12 #include <linux/ioctl.h>
13 #include <linux/slab.h>
14 #include <linux/ratelimit.h>
15 #include <linux/genhd.h>
16 #include <linux/netdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/export.h>
21 static DEFINE_MUTEX(aoeblk_mutex
);
22 static struct kmem_cache
*buf_pool_cache
;
24 static ssize_t
aoedisk_show_state(struct device
*dev
,
25 struct device_attribute
*attr
, char *page
)
27 struct gendisk
*disk
= dev_to_disk(dev
);
28 struct aoedev
*d
= disk
->private_data
;
30 return snprintf(page
, PAGE_SIZE
,
32 (d
->flags
& DEVFL_UP
) ? "up" : "down",
33 (d
->flags
& DEVFL_KICKME
) ? ",kickme" :
34 (d
->nopen
&& !(d
->flags
& DEVFL_UP
)) ? ",closewait" : "");
35 /* I'd rather see nopen exported so we can ditch closewait */
37 static ssize_t
aoedisk_show_mac(struct device
*dev
,
38 struct device_attribute
*attr
, char *page
)
40 struct gendisk
*disk
= dev_to_disk(dev
);
41 struct aoedev
*d
= disk
->private_data
;
42 struct aoetgt
*t
= d
->targets
[0];
45 return snprintf(page
, PAGE_SIZE
, "none\n");
46 return snprintf(page
, PAGE_SIZE
, "%pm\n", t
->addr
);
48 static ssize_t
aoedisk_show_netif(struct device
*dev
,
49 struct device_attribute
*attr
, char *page
)
51 struct gendisk
*disk
= dev_to_disk(dev
);
52 struct aoedev
*d
= disk
->private_data
;
53 struct net_device
*nds
[8], **nd
, **nnd
, **ne
;
54 struct aoetgt
**t
, **te
;
55 struct aoeif
*ifp
, *e
;
58 memset(nds
, 0, sizeof nds
);
60 ne
= nd
+ ARRAY_SIZE(nds
);
63 for (; t
< te
&& *t
; t
++) {
66 for (; ifp
< e
&& ifp
->nd
; ifp
++) {
67 for (nnd
= nds
; nnd
< nd
; nnd
++)
70 if (nnd
== nd
&& nd
!= ne
)
78 return snprintf(page
, PAGE_SIZE
, "none\n");
79 for (p
= page
; nd
< ne
; nd
++)
80 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "%s%s",
81 p
== page
? "" : ",", (*nd
)->name
);
82 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "\n");
85 /* firmware version */
86 static ssize_t
aoedisk_show_fwver(struct device
*dev
,
87 struct device_attribute
*attr
, char *page
)
89 struct gendisk
*disk
= dev_to_disk(dev
);
90 struct aoedev
*d
= disk
->private_data
;
92 return snprintf(page
, PAGE_SIZE
, "0x%04x\n", (unsigned int) d
->fw_ver
);
95 static DEVICE_ATTR(state
, S_IRUGO
, aoedisk_show_state
, NULL
);
96 static DEVICE_ATTR(mac
, S_IRUGO
, aoedisk_show_mac
, NULL
);
97 static DEVICE_ATTR(netif
, S_IRUGO
, aoedisk_show_netif
, NULL
);
98 static struct device_attribute dev_attr_firmware_version
= {
99 .attr
= { .name
= "firmware-version", .mode
= S_IRUGO
},
100 .show
= aoedisk_show_fwver
,
103 static struct attribute
*aoe_attrs
[] = {
104 &dev_attr_state
.attr
,
106 &dev_attr_netif
.attr
,
107 &dev_attr_firmware_version
.attr
,
111 static const struct attribute_group attr_group
= {
116 aoedisk_add_sysfs(struct aoedev
*d
)
118 return sysfs_create_group(&disk_to_dev(d
->gd
)->kobj
, &attr_group
);
121 aoedisk_rm_sysfs(struct aoedev
*d
)
123 sysfs_remove_group(&disk_to_dev(d
->gd
)->kobj
, &attr_group
);
127 aoeblk_open(struct block_device
*bdev
, fmode_t mode
)
129 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
132 mutex_lock(&aoeblk_mutex
);
133 spin_lock_irqsave(&d
->lock
, flags
);
134 if (d
->flags
& DEVFL_UP
) {
136 spin_unlock_irqrestore(&d
->lock
, flags
);
137 mutex_unlock(&aoeblk_mutex
);
140 spin_unlock_irqrestore(&d
->lock
, flags
);
141 mutex_unlock(&aoeblk_mutex
);
146 aoeblk_release(struct gendisk
*disk
, fmode_t mode
)
148 struct aoedev
*d
= disk
->private_data
;
151 spin_lock_irqsave(&d
->lock
, flags
);
153 if (--d
->nopen
== 0) {
154 spin_unlock_irqrestore(&d
->lock
, flags
);
155 aoecmd_cfg(d
->aoemajor
, d
->aoeminor
);
158 spin_unlock_irqrestore(&d
->lock
, flags
);
164 aoeblk_make_request(struct request_queue
*q
, struct bio
*bio
)
166 struct sk_buff_head queue
;
171 blk_queue_bounce(q
, &bio
);
174 printk(KERN_ERR
"aoe: bio is NULL\n");
178 d
= bio
->bi_bdev
->bd_disk
->private_data
;
180 printk(KERN_ERR
"aoe: bd_disk->private_data is NULL\n");
182 bio_endio(bio
, -ENXIO
);
184 } else if (bio
->bi_io_vec
== NULL
) {
185 printk(KERN_ERR
"aoe: bi_io_vec is NULL\n");
187 bio_endio(bio
, -ENXIO
);
190 buf
= mempool_alloc(d
->bufpool
, GFP_NOIO
);
192 printk(KERN_INFO
"aoe: buf allocation failure\n");
193 bio_endio(bio
, -ENOMEM
);
196 memset(buf
, 0, sizeof(*buf
));
197 INIT_LIST_HEAD(&buf
->bufs
);
198 buf
->stime
= jiffies
;
200 buf
->resid
= bio
->bi_size
;
201 buf
->sector
= bio
->bi_sector
;
202 buf
->bv
= &bio
->bi_io_vec
[bio
->bi_idx
];
203 buf
->bv_resid
= buf
->bv
->bv_len
;
204 WARN_ON(buf
->bv_resid
== 0);
205 buf
->bv_off
= buf
->bv
->bv_offset
;
207 spin_lock_irqsave(&d
->lock
, flags
);
209 if ((d
->flags
& DEVFL_UP
) == 0) {
210 pr_info_ratelimited("aoe: device %ld.%d is not up\n",
211 d
->aoemajor
, d
->aoeminor
);
212 spin_unlock_irqrestore(&d
->lock
, flags
);
213 mempool_free(buf
, d
->bufpool
);
214 bio_endio(bio
, -ENXIO
);
218 list_add_tail(&buf
->bufs
, &d
->bufq
);
221 __skb_queue_head_init(&queue
);
222 skb_queue_splice_init(&d
->sendq
, &queue
);
224 spin_unlock_irqrestore(&d
->lock
, flags
);
229 aoeblk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
231 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
233 if ((d
->flags
& DEVFL_UP
) == 0) {
234 printk(KERN_ERR
"aoe: disk not up\n");
238 geo
->cylinders
= d
->geo
.cylinders
;
239 geo
->heads
= d
->geo
.heads
;
240 geo
->sectors
= d
->geo
.sectors
;
244 static const struct block_device_operations aoe_bdops
= {
246 .release
= aoeblk_release
,
247 .getgeo
= aoeblk_getgeo
,
248 .owner
= THIS_MODULE
,
251 /* alloc_disk and add_disk can sleep */
253 aoeblk_gdalloc(void *vp
)
255 struct aoedev
*d
= vp
;
259 gd
= alloc_disk(AOE_PARTITIONS
);
262 "aoe: cannot allocate disk structure for %ld.%d\n",
263 d
->aoemajor
, d
->aoeminor
);
267 d
->bufpool
= mempool_create_slab_pool(MIN_BUFS
, buf_pool_cache
);
268 if (d
->bufpool
== NULL
) {
269 printk(KERN_ERR
"aoe: cannot allocate bufpool for %ld.%d\n",
270 d
->aoemajor
, d
->aoeminor
);
274 d
->blkq
= blk_alloc_queue(GFP_KERNEL
);
277 blk_queue_make_request(d
->blkq
, aoeblk_make_request
);
278 d
->blkq
->backing_dev_info
.name
= "aoe";
279 if (bdi_init(&d
->blkq
->backing_dev_info
))
281 spin_lock_irqsave(&d
->lock
, flags
);
282 gd
->major
= AOE_MAJOR
;
283 gd
->first_minor
= d
->sysminor
* AOE_PARTITIONS
;
284 gd
->fops
= &aoe_bdops
;
285 gd
->private_data
= d
;
286 set_capacity(gd
, d
->ssize
);
287 snprintf(gd
->disk_name
, sizeof gd
->disk_name
, "etherd/e%ld.%d",
288 d
->aoemajor
, d
->aoeminor
);
292 d
->flags
&= ~DEVFL_GDALLOC
;
293 d
->flags
|= DEVFL_UP
;
295 spin_unlock_irqrestore(&d
->lock
, flags
);
298 aoedisk_add_sysfs(d
);
302 blk_cleanup_queue(d
->blkq
);
305 mempool_destroy(d
->bufpool
);
309 spin_lock_irqsave(&d
->lock
, flags
);
310 d
->flags
&= ~DEVFL_GDALLOC
;
311 spin_unlock_irqrestore(&d
->lock
, flags
);
317 kmem_cache_destroy(buf_pool_cache
);
323 buf_pool_cache
= kmem_cache_create("aoe_bufs",
326 if (buf_pool_cache
== NULL
)