tipc: Reduce footprint by un-inlining address routines
[linux-2.6/x86.git] / drivers / block / virtio_blk.c
blob2138a7ae050c10c44bdba1608f441a7811ca1a22
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/virtio.h>
7 #include <linux/virtio_blk.h>
8 #include <linux/scatterlist.h>
10 #define PART_BITS 4
12 static int major, index;
14 struct virtio_blk
16 spinlock_t lock;
18 struct virtio_device *vdev;
19 struct virtqueue *vq;
21 /* The disk structure for the kernel. */
22 struct gendisk *disk;
24 /* Request tracking. */
25 struct list_head reqs;
27 mempool_t *pool;
29 /* What host tells us, plus 2 for header & tailer. */
30 unsigned int sg_elems;
32 /* Scatterlist: can be too big for stack. */
33 struct scatterlist sg[/*sg_elems*/];
36 struct virtblk_req
38 struct list_head list;
39 struct request *req;
40 struct virtio_blk_outhdr out_hdr;
41 struct virtio_scsi_inhdr in_hdr;
42 u8 status;
45 static void blk_done(struct virtqueue *vq)
47 struct virtio_blk *vblk = vq->vdev->priv;
48 struct virtblk_req *vbr;
49 unsigned int len;
50 unsigned long flags;
52 spin_lock_irqsave(&vblk->lock, flags);
53 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
54 int error;
56 switch (vbr->status) {
57 case VIRTIO_BLK_S_OK:
58 error = 0;
59 break;
60 case VIRTIO_BLK_S_UNSUPP:
61 error = -ENOTTY;
62 break;
63 default:
64 error = -EIO;
65 break;
68 if (blk_pc_request(vbr->req)) {
69 vbr->req->resid_len = vbr->in_hdr.residual;
70 vbr->req->sense_len = vbr->in_hdr.sense_len;
71 vbr->req->errors = vbr->in_hdr.errors;
74 __blk_end_request_all(vbr->req, error);
75 list_del(&vbr->list);
76 mempool_free(vbr, vblk->pool);
78 /* In case queue is stopped waiting for more buffers. */
79 blk_start_queue(vblk->disk->queue);
80 spin_unlock_irqrestore(&vblk->lock, flags);
83 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
84 struct request *req)
86 unsigned long num, out = 0, in = 0;
87 struct virtblk_req *vbr;
89 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
90 if (!vbr)
91 /* When another request finishes we'll try again. */
92 return false;
94 vbr->req = req;
95 switch (req->cmd_type) {
96 case REQ_TYPE_FS:
97 vbr->out_hdr.type = 0;
98 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
99 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
100 break;
101 case REQ_TYPE_BLOCK_PC:
102 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
103 vbr->out_hdr.sector = 0;
104 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
105 break;
106 case REQ_TYPE_LINUX_BLOCK:
107 if (req->cmd[0] == REQ_LB_OP_FLUSH) {
108 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
109 vbr->out_hdr.sector = 0;
110 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
111 break;
113 /*FALLTHRU*/
114 default:
115 /* We don't put anything else in the queue. */
116 BUG();
119 if (blk_barrier_rq(vbr->req))
120 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
122 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
125 * If this is a packet command we need a couple of additional headers.
126 * Behind the normal outhdr we put a segment with the scsi command
127 * block, and before the normal inhdr we put the sense data and the
128 * inhdr with additional status information before the normal inhdr.
130 if (blk_pc_request(vbr->req))
131 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
133 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
135 if (blk_pc_request(vbr->req)) {
136 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
137 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
138 sizeof(vbr->in_hdr));
141 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
142 sizeof(vbr->status));
144 if (num) {
145 if (rq_data_dir(vbr->req) == WRITE) {
146 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
147 out += num;
148 } else {
149 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
150 in += num;
154 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
155 mempool_free(vbr, vblk->pool);
156 return false;
159 list_add_tail(&vbr->list, &vblk->reqs);
160 return true;
163 static void do_virtblk_request(struct request_queue *q)
165 struct virtio_blk *vblk = q->queuedata;
166 struct request *req;
167 unsigned int issued = 0;
169 while ((req = blk_peek_request(q)) != NULL) {
170 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
172 /* If this request fails, stop queue and wait for something to
173 finish to restart it. */
174 if (!do_req(q, vblk, req)) {
175 blk_stop_queue(q);
176 break;
178 blk_start_request(req);
179 issued++;
182 if (issued)
183 vblk->vq->vq_ops->kick(vblk->vq);
186 static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
188 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
189 req->cmd[0] = REQ_LB_OP_FLUSH;
192 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
193 unsigned cmd, unsigned long data)
195 struct gendisk *disk = bdev->bd_disk;
196 struct virtio_blk *vblk = disk->private_data;
199 * Only allow the generic SCSI ioctls if the host can support it.
201 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
202 return -ENOTTY;
204 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
205 (void __user *)data);
208 /* We provide getgeo only to please some old bootloader/partitioning tools */
209 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
211 struct virtio_blk *vblk = bd->bd_disk->private_data;
212 struct virtio_blk_geometry vgeo;
213 int err;
215 /* see if the host passed in geometry config */
216 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
217 offsetof(struct virtio_blk_config, geometry),
218 &vgeo);
220 if (!err) {
221 geo->heads = vgeo.heads;
222 geo->sectors = vgeo.sectors;
223 geo->cylinders = vgeo.cylinders;
224 } else {
225 /* some standard values, similar to sd */
226 geo->heads = 1 << 6;
227 geo->sectors = 1 << 5;
228 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
230 return 0;
233 static const struct block_device_operations virtblk_fops = {
234 .locked_ioctl = virtblk_ioctl,
235 .owner = THIS_MODULE,
236 .getgeo = virtblk_getgeo,
239 static int index_to_minor(int index)
241 return index << PART_BITS;
244 static int __devinit virtblk_probe(struct virtio_device *vdev)
246 struct virtio_blk *vblk;
247 struct request_queue *q;
248 int err;
249 u64 cap;
250 u32 v, blk_size, sg_elems, opt_io_size;
251 u16 min_io_size;
252 u8 physical_block_exp, alignment_offset;
254 if (index_to_minor(index) >= 1 << MINORBITS)
255 return -ENOSPC;
257 /* We need to know how many segments before we allocate. */
258 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
259 offsetof(struct virtio_blk_config, seg_max),
260 &sg_elems);
261 if (err)
262 sg_elems = 1;
264 /* We need an extra sg elements at head and tail. */
265 sg_elems += 2;
266 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
267 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
268 if (!vblk) {
269 err = -ENOMEM;
270 goto out;
273 INIT_LIST_HEAD(&vblk->reqs);
274 spin_lock_init(&vblk->lock);
275 vblk->vdev = vdev;
276 vblk->sg_elems = sg_elems;
277 sg_init_table(vblk->sg, vblk->sg_elems);
279 /* We expect one virtqueue, for output. */
280 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
281 if (IS_ERR(vblk->vq)) {
282 err = PTR_ERR(vblk->vq);
283 goto out_free_vblk;
286 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
287 if (!vblk->pool) {
288 err = -ENOMEM;
289 goto out_free_vq;
292 /* FIXME: How many partitions? How long is a piece of string? */
293 vblk->disk = alloc_disk(1 << PART_BITS);
294 if (!vblk->disk) {
295 err = -ENOMEM;
296 goto out_mempool;
299 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
300 if (!q) {
301 err = -ENOMEM;
302 goto out_put_disk;
305 q->queuedata = vblk;
307 if (index < 26) {
308 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
309 } else if (index < (26 + 1) * 26) {
310 sprintf(vblk->disk->disk_name, "vd%c%c",
311 'a' + index / 26 - 1, 'a' + index % 26);
312 } else {
313 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
314 const unsigned int m2 = (index / 26 - 1) % 26;
315 const unsigned int m3 = index % 26;
316 sprintf(vblk->disk->disk_name, "vd%c%c%c",
317 'a' + m1, 'a' + m2, 'a' + m3);
320 vblk->disk->major = major;
321 vblk->disk->first_minor = index_to_minor(index);
322 vblk->disk->private_data = vblk;
323 vblk->disk->fops = &virtblk_fops;
324 vblk->disk->driverfs_dev = &vdev->dev;
325 index++;
327 /* If barriers are supported, tell block layer that queue is ordered */
328 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
329 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
330 virtblk_prepare_flush);
331 else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
332 blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
334 /* If disk is read-only in the host, the guest should obey */
335 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
336 set_disk_ro(vblk->disk, 1);
338 /* Host must always specify the capacity. */
339 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
340 &cap, sizeof(cap));
342 /* If capacity is too big, truncate with warning. */
343 if ((sector_t)cap != cap) {
344 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
345 (unsigned long long)cap);
346 cap = (sector_t)-1;
348 set_capacity(vblk->disk, cap);
350 /* We can handle whatever the host told us to handle. */
351 blk_queue_max_segments(q, vblk->sg_elems-2);
353 /* No need to bounce any requests */
354 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
356 /* No real sector limit. */
357 blk_queue_max_hw_sectors(q, -1U);
359 /* Host can optionally specify maximum segment size and number of
360 * segments. */
361 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
362 offsetof(struct virtio_blk_config, size_max),
363 &v);
364 if (!err)
365 blk_queue_max_segment_size(q, v);
366 else
367 blk_queue_max_segment_size(q, -1U);
369 /* Host can optionally specify the block size of the device */
370 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
371 offsetof(struct virtio_blk_config, blk_size),
372 &blk_size);
373 if (!err)
374 blk_queue_logical_block_size(q, blk_size);
375 else
376 blk_size = queue_logical_block_size(q);
378 /* Use topology information if available */
379 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
380 offsetof(struct virtio_blk_config, physical_block_exp),
381 &physical_block_exp);
382 if (!err && physical_block_exp)
383 blk_queue_physical_block_size(q,
384 blk_size * (1 << physical_block_exp));
386 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
387 offsetof(struct virtio_blk_config, alignment_offset),
388 &alignment_offset);
389 if (!err && alignment_offset)
390 blk_queue_alignment_offset(q, blk_size * alignment_offset);
392 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
393 offsetof(struct virtio_blk_config, min_io_size),
394 &min_io_size);
395 if (!err && min_io_size)
396 blk_queue_io_min(q, blk_size * min_io_size);
398 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
399 offsetof(struct virtio_blk_config, opt_io_size),
400 &opt_io_size);
401 if (!err && opt_io_size)
402 blk_queue_io_opt(q, blk_size * opt_io_size);
405 add_disk(vblk->disk);
406 return 0;
408 out_put_disk:
409 put_disk(vblk->disk);
410 out_mempool:
411 mempool_destroy(vblk->pool);
412 out_free_vq:
413 vdev->config->del_vqs(vdev);
414 out_free_vblk:
415 kfree(vblk);
416 out:
417 return err;
420 static void __devexit virtblk_remove(struct virtio_device *vdev)
422 struct virtio_blk *vblk = vdev->priv;
424 /* Nothing should be pending. */
425 BUG_ON(!list_empty(&vblk->reqs));
427 /* Stop all the virtqueues. */
428 vdev->config->reset(vdev);
430 del_gendisk(vblk->disk);
431 blk_cleanup_queue(vblk->disk->queue);
432 put_disk(vblk->disk);
433 mempool_destroy(vblk->pool);
434 vdev->config->del_vqs(vdev);
435 kfree(vblk);
438 static const struct virtio_device_id id_table[] = {
439 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
440 { 0 },
443 static unsigned int features[] = {
444 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
445 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
446 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
450 * virtio_blk causes spurious section mismatch warning by
451 * simultaneously referring to a __devinit and a __devexit function.
452 * Use __refdata to avoid this warning.
454 static struct virtio_driver __refdata virtio_blk = {
455 .feature_table = features,
456 .feature_table_size = ARRAY_SIZE(features),
457 .driver.name = KBUILD_MODNAME,
458 .driver.owner = THIS_MODULE,
459 .id_table = id_table,
460 .probe = virtblk_probe,
461 .remove = __devexit_p(virtblk_remove),
464 static int __init init(void)
466 major = register_blkdev(0, "virtblk");
467 if (major < 0)
468 return major;
469 return register_virtio_driver(&virtio_blk);
472 static void __exit fini(void)
474 unregister_blkdev(major, "virtblk");
475 unregister_virtio_driver(&virtio_blk);
477 module_init(init);
478 module_exit(fini);
480 MODULE_DEVICE_TABLE(virtio, id_table);
481 MODULE_DESCRIPTION("Virtio block driver");
482 MODULE_LICENSE("GPL");