2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/blkdev.h>
25 #include <linux/major.h>
26 #include <linux/delay.h>
27 #include <linux/hdreg.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_dbg.h>
36 #include "version_info.h"
38 #include "storvsc_api.h"
41 #define BLKVSC_MINORS 64
43 enum blkvsc_device_type
{
56 * This request ties the struct request and struct
57 * blkvsc_request/hv_storvsc_request together A struct request may be
58 * represented by 1 or more struct blkvsc_request
60 struct blkvsc_request_group
{
63 struct list_head blkvsc_req_list
; /* list of blkvsc_requests */
66 struct blkvsc_request
{
67 /* blkvsc_request_group.blkvsc_req_list */
68 struct list_head req_entry
;
70 /* block_device_context.pending_list */
71 struct list_head pend_entry
;
73 /* This may be null if we generate a request internally */
76 struct block_device_context
*dev
;
78 /* The group this request is part of. Maybe null */
79 struct blkvsc_request_group
*group
;
82 sector_t sector_start
;
83 unsigned long sector_count
;
85 unsigned char sense_buffer
[SCSI_SENSE_BUFFERSIZE
];
86 unsigned char cmd_len
;
87 unsigned char cmnd
[MAX_COMMAND_SIZE
];
89 struct hv_storvsc_request request
;
92 /* Per device structure */
93 struct block_device_context
{
94 /* point back to our device context */
95 struct hv_device
*device_ctx
;
96 struct kmem_cache
*request_pool
;
99 enum blkvsc_device_type device_type
;
100 struct list_head pending_list
;
102 unsigned char device_id
[64];
103 unsigned int device_id_len
;
104 int num_outstanding_reqs
;
106 unsigned int sector_size
;
110 unsigned char target
;
114 static DEFINE_MUTEX(blkvsc_mutex
);
116 static const char *g_blk_driver_name
= "blkvsc";
118 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
119 static const struct hv_guid g_blk_device_type
= {
121 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
122 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
127 * There is a circular dependency involving blkvsc_request_completion()
128 * and blkvsc_do_request().
130 static void blkvsc_request_completion(struct hv_storvsc_request
*request
);
132 static int blkvsc_ringbuffer_size
= BLKVSC_RING_BUFFER_SIZE
;
134 module_param(blkvsc_ringbuffer_size
, int, S_IRUGO
);
135 MODULE_PARM_DESC(ring_size
, "Ring buffer size (in bytes)");
138 * There is a circular dependency involving blkvsc_probe()
141 static int blkvsc_probe(struct device
*dev
);
143 static int blk_vsc_on_device_add(struct hv_device
*device
,
144 void *additional_info
)
146 struct storvsc_device_info
*device_info
;
149 device_info
= (struct storvsc_device_info
*)additional_info
;
151 ret
= stor_vsc_on_device_add(device
, additional_info
);
156 * We need to use the device instance guid to set the path and target
157 * id. For IDE devices, the device instance id is formatted as
158 * <bus id> * - <device id> - 8899 - 000000000000.
160 device_info
->path_id
= device
->dev_instance
.data
[3] << 24 |
161 device
->dev_instance
.data
[2] << 16 |
162 device
->dev_instance
.data
[1] << 8 |
163 device
->dev_instance
.data
[0];
165 device_info
->target_id
= device
->dev_instance
.data
[5] << 8 |
166 device
->dev_instance
.data
[4];
172 static int blk_vsc_initialize(struct hv_driver
*driver
)
174 struct storvsc_driver_object
*stor_driver
;
177 stor_driver
= hvdr_to_stordr(driver
);
179 /* Make sure we are at least 2 pages since 1 page is used for control */
181 driver
->name
= g_blk_driver_name
;
182 memcpy(&driver
->dev_type
, &g_blk_device_type
, sizeof(struct hv_guid
));
186 * Divide the ring buffer data size (which is 1 page less than the ring
187 * buffer size since that page is reserved for the ring buffer indices)
188 * by the max request size (which is
189 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
191 stor_driver
->max_outstanding_req_per_channel
=
192 ((stor_driver
->ring_buffer_size
- PAGE_SIZE
) /
193 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET
+
194 sizeof(struct vstor_packet
) + sizeof(u64
),
197 DPRINT_INFO(BLKVSC
, "max io outstd %u",
198 stor_driver
->max_outstanding_req_per_channel
);
200 /* Setup the dispatch table */
201 stor_driver
->base
.dev_add
= blk_vsc_on_device_add
;
202 stor_driver
->base
.dev_rm
= stor_vsc_on_device_remove
;
203 stor_driver
->base
.cleanup
= stor_vsc_on_cleanup
;
204 stor_driver
->on_io_request
= stor_vsc_on_io_request
;
210 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
,
211 void (*request_completion
)(struct hv_storvsc_request
*))
213 struct block_device_context
*blkdev
= blkvsc_req
->dev
;
214 struct hv_device
*device_ctx
= blkdev
->device_ctx
;
215 struct hv_driver
*drv
=
216 drv_to_hv_drv(device_ctx
->device
.driver
);
217 struct storvsc_driver_object
*storvsc_drv_obj
=
219 struct hv_storvsc_request
*storvsc_req
;
220 struct vmscsi_request
*vm_srb
;
224 storvsc_req
= &blkvsc_req
->request
;
225 vm_srb
= &storvsc_req
->vstor_packet
.vm_srb
;
227 vm_srb
->data_in
= blkvsc_req
->write
? WRITE_TYPE
: READ_TYPE
;
229 storvsc_req
->on_io_completion
= request_completion
;
230 storvsc_req
->context
= blkvsc_req
;
232 vm_srb
->port_number
= blkdev
->port
;
233 vm_srb
->path_id
= blkdev
->path
;
234 vm_srb
->target_id
= blkdev
->target
;
235 vm_srb
->lun
= 0; /* this is not really used at all */
237 vm_srb
->cdb_length
= blkvsc_req
->cmd_len
;
239 memcpy(vm_srb
->cdb
, blkvsc_req
->cmnd
, vm_srb
->cdb_length
);
241 storvsc_req
->sense_buffer
= blkvsc_req
->sense_buffer
;
243 ret
= storvsc_drv_obj
->on_io_request(blkdev
->device_ctx
,
244 &blkvsc_req
->request
);
246 blkdev
->num_outstanding_reqs
++;
252 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
)
254 struct block_device_context
*blkdev
= bdev
->bd_disk
->private_data
;
257 mutex_lock(&blkvsc_mutex
);
258 spin_lock(&blkdev
->lock
);
260 if (!blkdev
->users
&& blkdev
->device_type
== DVD_TYPE
) {
261 spin_unlock(&blkdev
->lock
);
262 check_disk_change(bdev
);
263 spin_lock(&blkdev
->lock
);
268 spin_unlock(&blkdev
->lock
);
269 mutex_unlock(&blkvsc_mutex
);
274 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
276 sector_t nsect
= get_capacity(bd
->bd_disk
);
277 sector_t cylinders
= nsect
;
280 * We are making up these values; let us keep it simple.
284 sector_div(cylinders
, hg
->heads
* hg
->sectors
);
285 hg
->cylinders
= cylinders
;
286 if ((sector_t
)(hg
->cylinders
+ 1) * hg
->heads
* hg
->sectors
< nsect
)
287 hg
->cylinders
= 0xffff;
293 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
)
296 blkvsc_req
->cmd_len
= 16;
298 if (rq_data_dir(blkvsc_req
->req
)) {
299 blkvsc_req
->write
= 1;
300 blkvsc_req
->cmnd
[0] = WRITE_16
;
302 blkvsc_req
->write
= 0;
303 blkvsc_req
->cmnd
[0] = READ_16
;
306 blkvsc_req
->cmnd
[1] |=
307 (blkvsc_req
->req
->cmd_flags
& REQ_FUA
) ? 0x8 : 0;
309 *(unsigned long long *)&blkvsc_req
->cmnd
[2] =
310 cpu_to_be64(blkvsc_req
->sector_start
);
311 *(unsigned int *)&blkvsc_req
->cmnd
[10] =
312 cpu_to_be32(blkvsc_req
->sector_count
);
316 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
317 unsigned cmd
, unsigned long arg
)
319 struct block_device_context
*blkdev
= bd
->bd_disk
->private_data
;
323 case HDIO_GET_IDENTITY
:
324 if (copy_to_user((void __user
*)arg
, blkdev
->device_id
,
325 blkdev
->device_id_len
))
336 static void blkvsc_cmd_completion(struct hv_storvsc_request
*request
)
338 struct blkvsc_request
*blkvsc_req
=
339 (struct blkvsc_request
*)request
->context
;
340 struct block_device_context
*blkdev
=
341 (struct block_device_context
*)blkvsc_req
->dev
;
342 struct scsi_sense_hdr sense_hdr
;
343 struct vmscsi_request
*vm_srb
;
346 vm_srb
= &blkvsc_req
->request
.vstor_packet
.vm_srb
;
347 blkdev
->num_outstanding_reqs
--;
349 if (vm_srb
->scsi_status
)
350 if (scsi_normalize_sense(blkvsc_req
->sense_buffer
,
351 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
))
352 scsi_print_sense_hdr("blkvsc", &sense_hdr
);
354 complete(&blkvsc_req
->request
.wait_event
);
358 static int blkvsc_do_operation(struct block_device_context
*blkdev
,
359 enum blkvsc_op_type op
)
361 struct blkvsc_request
*blkvsc_req
;
362 struct page
*page_buf
;
364 unsigned char device_type
;
365 struct scsi_sense_hdr sense_hdr
;
366 struct vmscsi_request
*vm_srb
;
370 blkvsc_req
= kmem_cache_zalloc(blkdev
->request_pool
, GFP_KERNEL
);
374 page_buf
= alloc_page(GFP_KERNEL
);
376 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
380 vm_srb
= &blkvsc_req
->request
.vstor_packet
.vm_srb
;
381 init_completion(&blkvsc_req
->request
.wait_event
);
382 blkvsc_req
->dev
= blkdev
;
383 blkvsc_req
->req
= NULL
;
384 blkvsc_req
->write
= 0;
386 blkvsc_req
->request
.data_buffer
.pfn_array
[0] =
387 page_to_pfn(page_buf
);
388 blkvsc_req
->request
.data_buffer
.offset
= 0;
392 blkvsc_req
->cmnd
[0] = INQUIRY
;
393 blkvsc_req
->cmnd
[1] = 0x1; /* Get product data */
394 blkvsc_req
->cmnd
[2] = 0x83; /* mode page 83 */
395 blkvsc_req
->cmnd
[4] = 64;
396 blkvsc_req
->cmd_len
= 6;
397 blkvsc_req
->request
.data_buffer
.len
= 64;
401 blkdev
->sector_size
= 0;
402 blkdev
->capacity
= 0;
404 blkvsc_req
->cmnd
[0] = READ_CAPACITY
;
405 blkvsc_req
->cmd_len
= 16;
406 blkvsc_req
->request
.data_buffer
.len
= 8;
410 blkvsc_req
->cmnd
[0] = SYNCHRONIZE_CACHE
;
411 blkvsc_req
->cmd_len
= 10;
412 blkvsc_req
->request
.data_buffer
.pfn_array
[0] = 0;
413 blkvsc_req
->request
.data_buffer
.len
= 0;
420 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
422 wait_for_completion_interruptible(&blkvsc_req
->request
.wait_event
);
425 if (vm_srb
->scsi_status
) {
426 scsi_normalize_sense(blkvsc_req
->sense_buffer
,
427 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
432 buf
= kmap(page_buf
);
436 device_type
= buf
[0] & 0x1F;
438 if (device_type
== 0x0)
439 blkdev
->device_type
= HARDDISK_TYPE
;
441 blkdev
->device_type
= UNKNOWN_DEV_TYPE
;
443 blkdev
->device_id_len
= buf
[7];
444 if (blkdev
->device_id_len
> 64)
445 blkdev
->device_id_len
= 64;
447 memcpy(blkdev
->device_id
, &buf
[8], blkdev
->device_id_len
);
453 ((buf
[0] << 24) | (buf
[1] << 16) |
454 (buf
[2] << 8) | buf
[3]) + 1;
456 blkdev
->sector_size
=
457 (buf
[4] << 24) | (buf
[5] << 16) |
458 (buf
[6] << 8) | buf
[7];
469 __free_page(page_buf
);
471 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
477 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
)
479 struct blkvsc_request
*pend_req
, *tmp
;
480 struct blkvsc_request
*comp_req
, *tmp2
;
481 struct vmscsi_request
*vm_srb
;
486 /* Flush the pending list first */
487 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
490 * The pend_req could be part of a partially completed
491 * request. If so, complete those req first until we
494 list_for_each_entry_safe(comp_req
, tmp2
,
495 &pend_req
->group
->blkvsc_req_list
,
498 if (comp_req
== pend_req
)
501 list_del(&comp_req
->req_entry
);
505 &comp_req
->request
.vstor_packet
.
507 ret
= __blk_end_request(comp_req
->req
,
508 (!vm_srb
->scsi_status
? 0 : -EIO
),
509 comp_req
->sector_count
*
510 blkdev
->sector_size
);
512 /* FIXME: shouldn't this do more than return? */
517 kmem_cache_free(blkdev
->request_pool
, comp_req
);
520 list_del(&pend_req
->pend_entry
);
522 list_del(&pend_req
->req_entry
);
525 if (!__blk_end_request(pend_req
->req
, -EIO
,
526 pend_req
->sector_count
*
527 blkdev
->sector_size
)) {
529 * All the sectors have been xferred ie the
532 kmem_cache_free(blkdev
->request_pool
,
537 kmem_cache_free(blkdev
->request_pool
, pend_req
);
546 * blkvsc_remove() - Callback when our device is removed
548 static int blkvsc_remove(struct device
*device
)
550 struct hv_driver
*drv
=
551 drv_to_hv_drv(device
->driver
);
552 struct storvsc_driver_object
*storvsc_drv_obj
=
554 struct hv_device
*device_obj
= device_to_hv_device(device
);
555 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
559 * Call to the vsc driver to let it know that the device is being
562 storvsc_drv_obj
->base
.dev_rm(device_obj
);
564 /* Get to a known state */
565 spin_lock_irqsave(&blkdev
->lock
, flags
);
567 blkdev
->shutting_down
= 1;
569 blk_stop_queue(blkdev
->gd
->queue
);
571 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
573 while (blkdev
->num_outstanding_reqs
) {
574 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...",
575 blkdev
->num_outstanding_reqs
);
579 blkvsc_do_operation(blkdev
, DO_FLUSH
);
581 spin_lock_irqsave(&blkdev
->lock
, flags
);
583 blkvsc_cancel_pending_reqs(blkdev
);
585 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
587 blk_cleanup_queue(blkdev
->gd
->queue
);
589 del_gendisk(blkdev
->gd
);
591 kmem_cache_destroy(blkdev
->request_pool
);
599 static void blkvsc_shutdown(struct device
*device
)
601 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
607 spin_lock_irqsave(&blkdev
->lock
, flags
);
609 blkdev
->shutting_down
= 1;
611 blk_stop_queue(blkdev
->gd
->queue
);
613 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
615 while (blkdev
->num_outstanding_reqs
) {
616 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...",
617 blkdev
->num_outstanding_reqs
);
621 blkvsc_do_operation(blkdev
, DO_FLUSH
);
623 spin_lock_irqsave(&blkdev
->lock
, flags
);
625 blkvsc_cancel_pending_reqs(blkdev
);
627 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
630 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
)
632 struct block_device_context
*blkdev
= disk
->private_data
;
634 mutex_lock(&blkvsc_mutex
);
635 spin_lock(&blkdev
->lock
);
636 if (blkdev
->users
== 1) {
637 spin_unlock(&blkdev
->lock
);
638 blkvsc_do_operation(blkdev
, DO_FLUSH
);
639 spin_lock(&blkdev
->lock
);
644 spin_unlock(&blkdev
->lock
);
645 mutex_unlock(&blkvsc_mutex
);
651 * We break the request into 1 or more blkvsc_requests and submit
652 * them. If we cant submit them all, we put them on the
653 * pending_list. The blkvsc_request() will work on the pending_list.
655 static int blkvsc_do_request(struct block_device_context
*blkdev
,
658 struct bio
*bio
= NULL
;
659 struct bio_vec
*bvec
= NULL
;
660 struct bio_vec
*prev_bvec
= NULL
;
661 struct blkvsc_request
*blkvsc_req
= NULL
;
662 struct blkvsc_request
*tmp
;
665 sector_t start_sector
;
666 unsigned long num_sectors
= 0;
669 struct blkvsc_request_group
*group
= NULL
;
671 /* Create a group to tie req to list of blkvsc_reqs */
672 group
= kmem_cache_zalloc(blkdev
->request_pool
, GFP_ATOMIC
);
676 INIT_LIST_HEAD(&group
->blkvsc_req_list
);
677 group
->outstanding
= group
->status
= 0;
679 start_sector
= blk_rq_pos(req
);
681 /* foreach bio in the request */
683 for (bio
= req
->bio
; bio
; bio
= bio
->bi_next
) {
685 * Map this bio into an existing or new storvsc request
687 bio_for_each_segment(bvec
, bio
, seg_idx
) {
688 /* Get a new storvsc request */
691 (databuf_idx
>= MAX_MULTIPAGE_BUFFER_COUNT
)
692 /* hole at the begin of page */
693 || (bvec
->bv_offset
!= 0) ||
694 /* hold at the end of page */
696 (prev_bvec
->bv_len
!= PAGE_SIZE
))) {
697 /* submit the prev one */
699 blkvsc_req
->sector_start
=
702 blkvsc_req
->sector_start
,
703 (blkdev
->sector_size
>> 9));
705 blkvsc_req
->sector_count
=
707 (blkdev
->sector_size
>> 9);
708 blkvsc_init_rw(blkvsc_req
);
712 * Create new blkvsc_req to represent
717 blkdev
->request_pool
, GFP_ATOMIC
);
719 /* free up everything */
720 list_for_each_entry_safe(
722 &group
->blkvsc_req_list
,
725 &blkvsc_req
->req_entry
);
727 blkdev
->request_pool
,
732 blkdev
->request_pool
, group
);
736 memset(blkvsc_req
, 0,
737 sizeof(struct blkvsc_request
));
739 blkvsc_req
->dev
= blkdev
;
740 blkvsc_req
->req
= req
;
747 /* Add to the group */
748 blkvsc_req
->group
= group
;
749 blkvsc_req
->group
->outstanding
++;
750 list_add_tail(&blkvsc_req
->req_entry
,
751 &blkvsc_req
->group
->blkvsc_req_list
);
753 start_sector
+= num_sectors
;
759 * Add the curr bvec/segment to the curr
762 blkvsc_req
->request
.data_buffer
.
763 pfn_array
[databuf_idx
]
764 = page_to_pfn(bvec
->bv_page
);
765 blkvsc_req
->request
.data_buffer
.len
771 num_sectors
+= bvec
->bv_len
>> 9;
773 } /* bio_for_each_segment */
775 } /* rq_for_each_bio */
778 /* Handle the last one */
780 blkvsc_req
->sector_start
= start_sector
;
781 sector_div(blkvsc_req
->sector_start
,
782 (blkdev
->sector_size
>> 9));
784 blkvsc_req
->sector_count
= num_sectors
/
785 (blkdev
->sector_size
>> 9);
787 blkvsc_init_rw(blkvsc_req
);
790 list_for_each_entry(blkvsc_req
, &group
->blkvsc_req_list
, req_entry
) {
793 list_add_tail(&blkvsc_req
->pend_entry
,
794 &blkdev
->pending_list
);
796 ret
= blkvsc_submit_request(blkvsc_req
,
797 blkvsc_request_completion
);
800 list_add_tail(&blkvsc_req
->pend_entry
,
801 &blkdev
->pending_list
);
810 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
)
812 struct blkvsc_request
*pend_req
, *tmp
;
815 /* Flush the pending list first */
816 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
819 ret
= blkvsc_submit_request(pend_req
,
820 blkvsc_request_completion
);
824 list_del(&pend_req
->pend_entry
);
831 static void blkvsc_request(struct request_queue
*queue
)
833 struct block_device_context
*blkdev
= NULL
;
837 while ((req
= blk_peek_request(queue
)) != NULL
) {
839 blkdev
= req
->rq_disk
->private_data
;
840 if (blkdev
->shutting_down
|| req
->cmd_type
!= REQ_TYPE_FS
) {
841 __blk_end_request_cur(req
, 0);
845 ret
= blkvsc_do_pending_reqs(blkdev
);
848 blk_stop_queue(queue
);
852 blk_start_request(req
);
854 ret
= blkvsc_do_request(blkdev
, req
);
856 blk_stop_queue(queue
);
858 } else if (ret
< 0) {
859 blk_requeue_request(queue
, req
);
860 blk_stop_queue(queue
);
868 /* The one and only one */
869 static struct storvsc_driver_object g_blkvsc_drv
;
871 static const struct block_device_operations block_ops
= {
872 .owner
= THIS_MODULE
,
874 .release
= blkvsc_release
,
875 .getgeo
= blkvsc_getgeo
,
876 .ioctl
= blkvsc_ioctl
,
880 * blkvsc_drv_init - BlkVsc driver initialization.
882 static int blkvsc_drv_init(void)
884 struct storvsc_driver_object
*storvsc_drv_obj
= &g_blkvsc_drv
;
885 struct hv_driver
*drv
= &g_blkvsc_drv
.base
;
888 storvsc_drv_obj
->ring_buffer_size
= blkvsc_ringbuffer_size
;
890 drv
->priv
= storvsc_drv_obj
;
892 /* Callback to client driver to complete the initialization */
893 blk_vsc_initialize(&storvsc_drv_obj
->base
);
895 drv
->driver
.name
= storvsc_drv_obj
->base
.name
;
897 drv
->driver
.probe
= blkvsc_probe
;
898 drv
->driver
.remove
= blkvsc_remove
;
899 drv
->driver
.shutdown
= blkvsc_shutdown
;
901 /* The driver belongs to vmbus */
902 ret
= vmbus_child_driver_register(&drv
->driver
);
907 static int blkvsc_drv_exit_cb(struct device
*dev
, void *data
)
909 struct device
**curr
= (struct device
**)data
;
911 return 1; /* stop iterating */
914 static void blkvsc_drv_exit(void)
916 struct storvsc_driver_object
*storvsc_drv_obj
= &g_blkvsc_drv
;
917 struct hv_driver
*drv
= &g_blkvsc_drv
.base
;
918 struct device
*current_dev
;
925 ret
= driver_for_each_device(&drv
->driver
, NULL
,
926 (void *) ¤t_dev
,
930 DPRINT_WARN(BLKVSC_DRV
,
931 "driver_for_each_device returned %d", ret
);
934 if (current_dev
== NULL
)
937 /* Initiate removal from the top-down */
938 device_unregister(current_dev
);
941 if (storvsc_drv_obj
->base
.cleanup
)
942 storvsc_drv_obj
->base
.cleanup(&storvsc_drv_obj
->base
);
944 vmbus_child_driver_unregister(&drv
->driver
);
950 * blkvsc_probe - Add a new device for this driver
952 static int blkvsc_probe(struct device
*device
)
954 struct hv_driver
*drv
=
955 drv_to_hv_drv(device
->driver
);
956 struct storvsc_driver_object
*storvsc_drv_obj
=
958 struct hv_device
*device_obj
= device_to_hv_device(device
);
960 struct block_device_context
*blkdev
= NULL
;
961 struct storvsc_device_info device_info
;
965 static int ide0_registered
;
966 static int ide1_registered
;
969 if (!storvsc_drv_obj
->base
.dev_add
) {
970 DPRINT_ERR(BLKVSC_DRV
, "OnDeviceAdd() not set");
975 blkdev
= kzalloc(sizeof(struct block_device_context
), GFP_KERNEL
);
981 INIT_LIST_HEAD(&blkdev
->pending_list
);
983 /* Initialize what we can here */
984 spin_lock_init(&blkdev
->lock
);
987 blkdev
->request_pool
= kmem_cache_create(dev_name(&device_obj
->device
),
988 sizeof(struct blkvsc_request
), 0,
989 SLAB_HWCACHE_ALIGN
, NULL
);
990 if (!blkdev
->request_pool
) {
996 /* Call to the vsc driver to add the device */
997 ret
= storvsc_drv_obj
->base
.dev_add(device_obj
, &device_info
);
999 DPRINT_ERR(BLKVSC_DRV
, "unable to add blkvsc device");
1003 blkdev
->device_ctx
= device_obj
;
1004 /* this identified the device 0 or 1 */
1005 blkdev
->target
= device_info
.target_id
;
1006 /* this identified the ide ctrl 0 or 1 */
1007 blkdev
->path
= device_info
.path_id
;
1009 dev_set_drvdata(device
, blkdev
);
1011 /* Calculate the major and device num */
1012 if (blkdev
->path
== 0) {
1014 devnum
= blkdev
->path
+ blkdev
->target
; /* 0 or 1 */
1016 if (!ide0_registered
) {
1017 ret
= register_blkdev(major
, "ide");
1019 DPRINT_ERR(BLKVSC_DRV
,
1020 "register_blkdev() failed! ret %d",
1025 ide0_registered
= 1;
1027 } else if (blkdev
->path
== 1) {
1029 devnum
= blkdev
->path
+ blkdev
->target
+ 1; /* 2 or 3 */
1031 if (!ide1_registered
) {
1032 ret
= register_blkdev(major
, "ide");
1034 DPRINT_ERR(BLKVSC_DRV
,
1035 "register_blkdev() failed! ret %d",
1040 ide1_registered
= 1;
1043 DPRINT_ERR(BLKVSC_DRV
, "invalid pathid");
1048 DPRINT_INFO(BLKVSC_DRV
, "blkvsc registered for major %d!!", major
);
1050 blkdev
->gd
= alloc_disk(BLKVSC_MINORS
);
1052 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
1057 blkdev
->gd
->queue
= blk_init_queue(blkvsc_request
, &blkdev
->lock
);
1059 blk_queue_max_segment_size(blkdev
->gd
->queue
, PAGE_SIZE
);
1060 blk_queue_max_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
1061 blk_queue_segment_boundary(blkdev
->gd
->queue
, PAGE_SIZE
-1);
1062 blk_queue_bounce_limit(blkdev
->gd
->queue
, BLK_BOUNCE_ANY
);
1063 blk_queue_dma_alignment(blkdev
->gd
->queue
, 511);
1065 blkdev
->gd
->major
= major
;
1066 if (devnum
== 1 || devnum
== 3)
1067 blkdev
->gd
->first_minor
= BLKVSC_MINORS
;
1069 blkdev
->gd
->first_minor
= 0;
1070 blkdev
->gd
->fops
= &block_ops
;
1071 blkdev
->gd
->events
= DISK_EVENT_MEDIA_CHANGE
;
1072 blkdev
->gd
->private_data
= blkdev
;
1073 blkdev
->gd
->driverfs_dev
= &(blkdev
->device_ctx
->device
);
1074 sprintf(blkdev
->gd
->disk_name
, "hd%c", 'a' + devnum
);
1076 blkvsc_do_operation(blkdev
, DO_INQUIRY
);
1077 blkvsc_do_operation(blkdev
, DO_CAPACITY
);
1079 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
1080 blk_queue_logical_block_size(blkdev
->gd
->queue
, blkdev
->sector_size
);
1082 add_disk(blkdev
->gd
);
1084 DPRINT_INFO(BLKVSC_DRV
, "%s added!! capacity %lu sector_size %d",
1085 blkdev
->gd
->disk_name
, (unsigned long)blkdev
->capacity
,
1086 blkdev
->sector_size
);
1091 storvsc_drv_obj
->base
.dev_rm(device_obj
);
1095 if (blkdev
->request_pool
) {
1096 kmem_cache_destroy(blkdev
->request_pool
);
1097 blkdev
->request_pool
= NULL
;
1106 static void blkvsc_request_completion(struct hv_storvsc_request
*request
)
1108 struct blkvsc_request
*blkvsc_req
=
1109 (struct blkvsc_request
*)request
->context
;
1110 struct block_device_context
*blkdev
=
1111 (struct block_device_context
*)blkvsc_req
->dev
;
1112 unsigned long flags
;
1113 struct blkvsc_request
*comp_req
, *tmp
;
1114 struct vmscsi_request
*vm_srb
;
1117 spin_lock_irqsave(&blkdev
->lock
, flags
);
1119 blkdev
->num_outstanding_reqs
--;
1120 blkvsc_req
->group
->outstanding
--;
1123 * Only start processing when all the blkvsc_reqs are
1124 * completed. This guarantees no out-of-order blkvsc_req
1125 * completion when calling end_that_request_first()
1127 if (blkvsc_req
->group
->outstanding
== 0) {
1128 list_for_each_entry_safe(comp_req
, tmp
,
1129 &blkvsc_req
->group
->blkvsc_req_list
,
1132 list_del(&comp_req
->req_entry
);
1135 &comp_req
->request
.vstor_packet
.vm_srb
;
1136 if (!__blk_end_request(comp_req
->req
,
1137 (!vm_srb
->scsi_status
? 0 : -EIO
),
1138 comp_req
->sector_count
* blkdev
->sector_size
)) {
1140 * All the sectors have been xferred ie the
1143 kmem_cache_free(blkdev
->request_pool
,
1147 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1150 if (!blkdev
->shutting_down
) {
1151 blkvsc_do_pending_reqs(blkdev
);
1152 blk_start_queue(blkdev
->gd
->queue
);
1153 blkvsc_request(blkdev
->gd
->queue
);
1157 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
1160 static int __init
blkvsc_init(void)
1164 BUILD_BUG_ON(sizeof(sector_t
) != 8);
1166 ret
= blkvsc_drv_init();
1171 static void __exit
blkvsc_exit(void)
1176 MODULE_LICENSE("GPL");
1177 MODULE_VERSION(HV_DRV_VERSION
);
1178 MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1179 module_init(blkvsc_init
);
1180 module_exit(blkvsc_exit
);