2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/virtio.h>
7 #include <linux/virtio_blk.h>
8 #include <linux/scatterlist.h>
12 static int major
, index
;
18 struct virtio_device
*vdev
;
21 /* The disk structure for the kernel. */
24 /* Request tracking. */
25 struct list_head reqs
;
29 /* What host tells us, plus 2 for header & tailer. */
30 unsigned int sg_elems
;
32 /* Scatterlist: can be too big for stack. */
33 struct scatterlist sg
[/*sg_elems*/];
38 struct list_head list
;
40 struct virtio_blk_outhdr out_hdr
;
41 struct virtio_scsi_inhdr in_hdr
;
45 static void blk_done(struct virtqueue
*vq
)
47 struct virtio_blk
*vblk
= vq
->vdev
->priv
;
48 struct virtblk_req
*vbr
;
52 spin_lock_irqsave(&vblk
->lock
, flags
);
53 while ((vbr
= vblk
->vq
->vq_ops
->get_buf(vblk
->vq
, &len
)) != NULL
) {
56 switch (vbr
->status
) {
60 case VIRTIO_BLK_S_UNSUPP
:
68 if (blk_pc_request(vbr
->req
)) {
69 vbr
->req
->resid_len
= vbr
->in_hdr
.residual
;
70 vbr
->req
->sense_len
= vbr
->in_hdr
.sense_len
;
71 vbr
->req
->errors
= vbr
->in_hdr
.errors
;
74 __blk_end_request_all(vbr
->req
, error
);
76 mempool_free(vbr
, vblk
->pool
);
78 /* In case queue is stopped waiting for more buffers. */
79 blk_start_queue(vblk
->disk
->queue
);
80 spin_unlock_irqrestore(&vblk
->lock
, flags
);
83 static bool do_req(struct request_queue
*q
, struct virtio_blk
*vblk
,
86 unsigned long num
, out
= 0, in
= 0;
87 struct virtblk_req
*vbr
;
89 vbr
= mempool_alloc(vblk
->pool
, GFP_ATOMIC
);
91 /* When another request finishes we'll try again. */
95 switch (req
->cmd_type
) {
97 vbr
->out_hdr
.type
= 0;
98 vbr
->out_hdr
.sector
= blk_rq_pos(vbr
->req
);
99 vbr
->out_hdr
.ioprio
= req_get_ioprio(vbr
->req
);
101 case REQ_TYPE_BLOCK_PC
:
102 vbr
->out_hdr
.type
= VIRTIO_BLK_T_SCSI_CMD
;
103 vbr
->out_hdr
.sector
= 0;
104 vbr
->out_hdr
.ioprio
= req_get_ioprio(vbr
->req
);
106 case REQ_TYPE_LINUX_BLOCK
:
107 if (req
->cmd
[0] == REQ_LB_OP_FLUSH
) {
108 vbr
->out_hdr
.type
= VIRTIO_BLK_T_FLUSH
;
109 vbr
->out_hdr
.sector
= 0;
110 vbr
->out_hdr
.ioprio
= req_get_ioprio(vbr
->req
);
115 /* We don't put anything else in the queue. */
119 if (blk_barrier_rq(vbr
->req
))
120 vbr
->out_hdr
.type
|= VIRTIO_BLK_T_BARRIER
;
122 sg_set_buf(&vblk
->sg
[out
++], &vbr
->out_hdr
, sizeof(vbr
->out_hdr
));
125 * If this is a packet command we need a couple of additional headers.
126 * Behind the normal outhdr we put a segment with the scsi command
127 * block, and before the normal inhdr we put the sense data and the
128 * inhdr with additional status information before the normal inhdr.
130 if (blk_pc_request(vbr
->req
))
131 sg_set_buf(&vblk
->sg
[out
++], vbr
->req
->cmd
, vbr
->req
->cmd_len
);
133 num
= blk_rq_map_sg(q
, vbr
->req
, vblk
->sg
+ out
);
135 if (blk_pc_request(vbr
->req
)) {
136 sg_set_buf(&vblk
->sg
[num
+ out
+ in
++], vbr
->req
->sense
, 96);
137 sg_set_buf(&vblk
->sg
[num
+ out
+ in
++], &vbr
->in_hdr
,
138 sizeof(vbr
->in_hdr
));
141 sg_set_buf(&vblk
->sg
[num
+ out
+ in
++], &vbr
->status
,
142 sizeof(vbr
->status
));
145 if (rq_data_dir(vbr
->req
) == WRITE
) {
146 vbr
->out_hdr
.type
|= VIRTIO_BLK_T_OUT
;
149 vbr
->out_hdr
.type
|= VIRTIO_BLK_T_IN
;
154 if (vblk
->vq
->vq_ops
->add_buf(vblk
->vq
, vblk
->sg
, out
, in
, vbr
) < 0) {
155 mempool_free(vbr
, vblk
->pool
);
159 list_add_tail(&vbr
->list
, &vblk
->reqs
);
163 static void do_virtblk_request(struct request_queue
*q
)
165 struct virtio_blk
*vblk
= q
->queuedata
;
167 unsigned int issued
= 0;
169 while ((req
= blk_peek_request(q
)) != NULL
) {
170 BUG_ON(req
->nr_phys_segments
+ 2 > vblk
->sg_elems
);
172 /* If this request fails, stop queue and wait for something to
173 finish to restart it. */
174 if (!do_req(q
, vblk
, req
)) {
178 blk_start_request(req
);
183 vblk
->vq
->vq_ops
->kick(vblk
->vq
);
186 static void virtblk_prepare_flush(struct request_queue
*q
, struct request
*req
)
188 req
->cmd_type
= REQ_TYPE_LINUX_BLOCK
;
189 req
->cmd
[0] = REQ_LB_OP_FLUSH
;
192 static int virtblk_ioctl(struct block_device
*bdev
, fmode_t mode
,
193 unsigned cmd
, unsigned long data
)
195 struct gendisk
*disk
= bdev
->bd_disk
;
196 struct virtio_blk
*vblk
= disk
->private_data
;
199 * Only allow the generic SCSI ioctls if the host can support it.
201 if (!virtio_has_feature(vblk
->vdev
, VIRTIO_BLK_F_SCSI
))
204 return scsi_cmd_ioctl(disk
->queue
, disk
, mode
, cmd
,
205 (void __user
*)data
);
208 /* We provide getgeo only to please some old bootloader/partitioning tools */
209 static int virtblk_getgeo(struct block_device
*bd
, struct hd_geometry
*geo
)
211 struct virtio_blk
*vblk
= bd
->bd_disk
->private_data
;
212 struct virtio_blk_geometry vgeo
;
215 /* see if the host passed in geometry config */
216 err
= virtio_config_val(vblk
->vdev
, VIRTIO_BLK_F_GEOMETRY
,
217 offsetof(struct virtio_blk_config
, geometry
),
221 geo
->heads
= vgeo
.heads
;
222 geo
->sectors
= vgeo
.sectors
;
223 geo
->cylinders
= vgeo
.cylinders
;
225 /* some standard values, similar to sd */
227 geo
->sectors
= 1 << 5;
228 geo
->cylinders
= get_capacity(bd
->bd_disk
) >> 11;
233 static const struct block_device_operations virtblk_fops
= {
234 .locked_ioctl
= virtblk_ioctl
,
235 .owner
= THIS_MODULE
,
236 .getgeo
= virtblk_getgeo
,
239 static int index_to_minor(int index
)
241 return index
<< PART_BITS
;
244 static int __devinit
virtblk_probe(struct virtio_device
*vdev
)
246 struct virtio_blk
*vblk
;
247 struct request_queue
*q
;
250 u32 v
, blk_size
, sg_elems
, opt_io_size
;
252 u8 physical_block_exp
, alignment_offset
;
254 if (index_to_minor(index
) >= 1 << MINORBITS
)
257 /* We need to know how many segments before we allocate. */
258 err
= virtio_config_val(vdev
, VIRTIO_BLK_F_SEG_MAX
,
259 offsetof(struct virtio_blk_config
, seg_max
),
264 /* We need an extra sg elements at head and tail. */
266 vdev
->priv
= vblk
= kmalloc(sizeof(*vblk
) +
267 sizeof(vblk
->sg
[0]) * sg_elems
, GFP_KERNEL
);
273 INIT_LIST_HEAD(&vblk
->reqs
);
274 spin_lock_init(&vblk
->lock
);
276 vblk
->sg_elems
= sg_elems
;
277 sg_init_table(vblk
->sg
, vblk
->sg_elems
);
279 /* We expect one virtqueue, for output. */
280 vblk
->vq
= virtio_find_single_vq(vdev
, blk_done
, "requests");
281 if (IS_ERR(vblk
->vq
)) {
282 err
= PTR_ERR(vblk
->vq
);
286 vblk
->pool
= mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req
));
292 /* FIXME: How many partitions? How long is a piece of string? */
293 vblk
->disk
= alloc_disk(1 << PART_BITS
);
299 q
= vblk
->disk
->queue
= blk_init_queue(do_virtblk_request
, &vblk
->lock
);
308 sprintf(vblk
->disk
->disk_name
, "vd%c", 'a' + index
% 26);
309 } else if (index
< (26 + 1) * 26) {
310 sprintf(vblk
->disk
->disk_name
, "vd%c%c",
311 'a' + index
/ 26 - 1, 'a' + index
% 26);
313 const unsigned int m1
= (index
/ 26 - 1) / 26 - 1;
314 const unsigned int m2
= (index
/ 26 - 1) % 26;
315 const unsigned int m3
= index
% 26;
316 sprintf(vblk
->disk
->disk_name
, "vd%c%c%c",
317 'a' + m1
, 'a' + m2
, 'a' + m3
);
320 vblk
->disk
->major
= major
;
321 vblk
->disk
->first_minor
= index_to_minor(index
);
322 vblk
->disk
->private_data
= vblk
;
323 vblk
->disk
->fops
= &virtblk_fops
;
324 vblk
->disk
->driverfs_dev
= &vdev
->dev
;
327 /* If barriers are supported, tell block layer that queue is ordered */
328 if (virtio_has_feature(vdev
, VIRTIO_BLK_F_FLUSH
))
329 blk_queue_ordered(q
, QUEUE_ORDERED_DRAIN_FLUSH
,
330 virtblk_prepare_flush
);
331 else if (virtio_has_feature(vdev
, VIRTIO_BLK_F_BARRIER
))
332 blk_queue_ordered(q
, QUEUE_ORDERED_TAG
, NULL
);
334 /* If disk is read-only in the host, the guest should obey */
335 if (virtio_has_feature(vdev
, VIRTIO_BLK_F_RO
))
336 set_disk_ro(vblk
->disk
, 1);
338 /* Host must always specify the capacity. */
339 vdev
->config
->get(vdev
, offsetof(struct virtio_blk_config
, capacity
),
342 /* If capacity is too big, truncate with warning. */
343 if ((sector_t
)cap
!= cap
) {
344 dev_warn(&vdev
->dev
, "Capacity %llu too large: truncating\n",
345 (unsigned long long)cap
);
348 set_capacity(vblk
->disk
, cap
);
350 /* We can handle whatever the host told us to handle. */
351 blk_queue_max_segments(q
, vblk
->sg_elems
-2);
353 /* No need to bounce any requests */
354 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
356 /* No real sector limit. */
357 blk_queue_max_hw_sectors(q
, -1U);
359 /* Host can optionally specify maximum segment size and number of
361 err
= virtio_config_val(vdev
, VIRTIO_BLK_F_SIZE_MAX
,
362 offsetof(struct virtio_blk_config
, size_max
),
365 blk_queue_max_segment_size(q
, v
);
367 blk_queue_max_segment_size(q
, -1U);
369 /* Host can optionally specify the block size of the device */
370 err
= virtio_config_val(vdev
, VIRTIO_BLK_F_BLK_SIZE
,
371 offsetof(struct virtio_blk_config
, blk_size
),
374 blk_queue_logical_block_size(q
, blk_size
);
376 blk_size
= queue_logical_block_size(q
);
378 /* Use topology information if available */
379 err
= virtio_config_val(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
380 offsetof(struct virtio_blk_config
, physical_block_exp
),
381 &physical_block_exp
);
382 if (!err
&& physical_block_exp
)
383 blk_queue_physical_block_size(q
,
384 blk_size
* (1 << physical_block_exp
));
386 err
= virtio_config_val(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
387 offsetof(struct virtio_blk_config
, alignment_offset
),
389 if (!err
&& alignment_offset
)
390 blk_queue_alignment_offset(q
, blk_size
* alignment_offset
);
392 err
= virtio_config_val(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
393 offsetof(struct virtio_blk_config
, min_io_size
),
395 if (!err
&& min_io_size
)
396 blk_queue_io_min(q
, blk_size
* min_io_size
);
398 err
= virtio_config_val(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
399 offsetof(struct virtio_blk_config
, opt_io_size
),
401 if (!err
&& opt_io_size
)
402 blk_queue_io_opt(q
, blk_size
* opt_io_size
);
405 add_disk(vblk
->disk
);
409 put_disk(vblk
->disk
);
411 mempool_destroy(vblk
->pool
);
413 vdev
->config
->del_vqs(vdev
);
420 static void __devexit
virtblk_remove(struct virtio_device
*vdev
)
422 struct virtio_blk
*vblk
= vdev
->priv
;
424 /* Nothing should be pending. */
425 BUG_ON(!list_empty(&vblk
->reqs
));
427 /* Stop all the virtqueues. */
428 vdev
->config
->reset(vdev
);
430 del_gendisk(vblk
->disk
);
431 blk_cleanup_queue(vblk
->disk
->queue
);
432 put_disk(vblk
->disk
);
433 mempool_destroy(vblk
->pool
);
434 vdev
->config
->del_vqs(vdev
);
438 static const struct virtio_device_id id_table
[] = {
439 { VIRTIO_ID_BLOCK
, VIRTIO_DEV_ANY_ID
},
443 static unsigned int features
[] = {
444 VIRTIO_BLK_F_BARRIER
, VIRTIO_BLK_F_SEG_MAX
, VIRTIO_BLK_F_SIZE_MAX
,
445 VIRTIO_BLK_F_GEOMETRY
, VIRTIO_BLK_F_RO
, VIRTIO_BLK_F_BLK_SIZE
,
446 VIRTIO_BLK_F_SCSI
, VIRTIO_BLK_F_FLUSH
, VIRTIO_BLK_F_TOPOLOGY
450 * virtio_blk causes spurious section mismatch warning by
451 * simultaneously referring to a __devinit and a __devexit function.
452 * Use __refdata to avoid this warning.
454 static struct virtio_driver __refdata virtio_blk
= {
455 .feature_table
= features
,
456 .feature_table_size
= ARRAY_SIZE(features
),
457 .driver
.name
= KBUILD_MODNAME
,
458 .driver
.owner
= THIS_MODULE
,
459 .id_table
= id_table
,
460 .probe
= virtblk_probe
,
461 .remove
= __devexit_p(virtblk_remove
),
464 static int __init
init(void)
466 major
= register_blkdev(0, "virtblk");
469 return register_virtio_driver(&virtio_blk
);
472 static void __exit
fini(void)
474 unregister_blkdev(major
, "virtblk");
475 unregister_virtio_driver(&virtio_blk
);
480 MODULE_DEVICE_TABLE(virtio
, id_table
);
481 MODULE_DESCRIPTION("Virtio block driver");
482 MODULE_LICENSE("GPL");