2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/blkdev.h>
25 #include <linux/major.h>
26 #include <linux/delay.h>
27 #include <linux/hdreg.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_dbg.h>
36 #include "version_info.h"
38 #include "storvsc_api.h"
41 #define BLKVSC_MINORS 64
43 enum blkvsc_device_type
{
50 * This request ties the struct request and struct
51 * blkvsc_request/hv_storvsc_request together A struct request may be
52 * represented by 1 or more struct blkvsc_request
54 struct blkvsc_request_group
{
57 struct list_head blkvsc_req_list
; /* list of blkvsc_requests */
60 struct blkvsc_request
{
61 /* blkvsc_request_group.blkvsc_req_list */
62 struct list_head req_entry
;
64 /* block_device_context.pending_list */
65 struct list_head pend_entry
;
67 /* This may be null if we generate a request internally */
70 struct block_device_context
*dev
;
72 /* The group this request is part of. Maybe null */
73 struct blkvsc_request_group
*group
;
75 wait_queue_head_t wevent
;
79 sector_t sector_start
;
80 unsigned long sector_count
;
82 unsigned char sense_buffer
[SCSI_SENSE_BUFFERSIZE
];
83 unsigned char cmd_len
;
84 unsigned char cmnd
[MAX_COMMAND_SIZE
];
86 struct hv_storvsc_request request
;
88 * !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap,
89 * because - The extension buffer falls right here and is pointed to by
91 * Which sounds like a horrible idea, who designed this?
95 /* Per device structure */
96 struct block_device_context
{
97 /* point back to our device context */
98 struct vm_device
*device_ctx
;
99 struct kmem_cache
*request_pool
;
102 enum blkvsc_device_type device_type
;
103 struct list_head pending_list
;
105 unsigned char device_id
[64];
106 unsigned int device_id_len
;
107 int num_outstanding_reqs
;
109 int media_not_present
;
110 unsigned int sector_size
;
114 unsigned char target
;
119 struct blkvsc_driver_context
{
120 /* !! These must be the first 2 fields !! */
121 /* FIXME this is a bug! */
122 struct driver_context drv_ctx
;
123 struct storvsc_driver_object drv_obj
;
127 static DEFINE_MUTEX(blkvsc_mutex
);
128 static int blkvsc_probe(struct device
*dev
);
129 static int blkvsc_remove(struct device
*device
);
130 static void blkvsc_shutdown(struct device
*device
);
132 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
);
133 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
);
134 static int blkvsc_media_changed(struct gendisk
*gd
);
135 static int blkvsc_revalidate_disk(struct gendisk
*gd
);
136 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
);
137 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
138 unsigned cmd
, unsigned long argument
);
139 static void blkvsc_request(struct request_queue
*queue
);
140 static void blkvsc_request_completion(struct hv_storvsc_request
*request
);
141 static int blkvsc_do_request(struct block_device_context
*blkdev
,
142 struct request
*req
);
143 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
,
144 void (*request_completion
)(struct hv_storvsc_request
*));
145 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
);
146 static void blkvsc_cmd_completion(struct hv_storvsc_request
*request
);
147 static int blkvsc_do_inquiry(struct block_device_context
*blkdev
);
148 static int blkvsc_do_read_capacity(struct block_device_context
*blkdev
);
149 static int blkvsc_do_read_capacity16(struct block_device_context
*blkdev
);
150 static int blkvsc_do_flush(struct block_device_context
*blkdev
);
151 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
);
152 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
);
154 static int blkvsc_ringbuffer_size
= BLKVSC_RING_BUFFER_SIZE
;
155 module_param(blkvsc_ringbuffer_size
, int, S_IRUGO
);
156 MODULE_PARM_DESC(ring_size
, "Ring buffer size (in bytes)");
158 /* The one and only one */
159 static struct blkvsc_driver_context g_blkvsc_drv
;
161 static const struct block_device_operations block_ops
= {
162 .owner
= THIS_MODULE
,
164 .release
= blkvsc_release
,
165 .media_changed
= blkvsc_media_changed
,
166 .revalidate_disk
= blkvsc_revalidate_disk
,
167 .getgeo
= blkvsc_getgeo
,
168 .ioctl
= blkvsc_ioctl
,
172 * blkvsc_drv_init - BlkVsc driver initialization.
174 static int blkvsc_drv_init(int (*drv_init
)(struct hv_driver
*drv
))
176 struct storvsc_driver_object
*storvsc_drv_obj
= &g_blkvsc_drv
.drv_obj
;
177 struct driver_context
*drv_ctx
= &g_blkvsc_drv
.drv_ctx
;
180 vmbus_get_interface(&storvsc_drv_obj
->Base
.VmbusChannelInterface
);
182 storvsc_drv_obj
->RingBufferSize
= blkvsc_ringbuffer_size
;
184 /* Callback to client driver to complete the initialization */
185 drv_init(&storvsc_drv_obj
->Base
);
187 drv_ctx
->driver
.name
= storvsc_drv_obj
->Base
.name
;
188 memcpy(&drv_ctx
->class_id
, &storvsc_drv_obj
->Base
.deviceType
,
189 sizeof(struct hv_guid
));
191 drv_ctx
->probe
= blkvsc_probe
;
192 drv_ctx
->remove
= blkvsc_remove
;
193 drv_ctx
->shutdown
= blkvsc_shutdown
;
195 /* The driver belongs to vmbus */
196 ret
= vmbus_child_driver_register(drv_ctx
);
201 static int blkvsc_drv_exit_cb(struct device
*dev
, void *data
)
203 struct device
**curr
= (struct device
**)data
;
205 return 1; /* stop iterating */
208 static void blkvsc_drv_exit(void)
210 struct storvsc_driver_object
*storvsc_drv_obj
= &g_blkvsc_drv
.drv_obj
;
211 struct driver_context
*drv_ctx
= &g_blkvsc_drv
.drv_ctx
;
212 struct device
*current_dev
;
219 ret
= driver_for_each_device(&drv_ctx
->driver
, NULL
,
220 (void *) ¤t_dev
,
224 DPRINT_WARN(BLKVSC_DRV
,
225 "driver_for_each_device returned %d", ret
);
228 if (current_dev
== NULL
)
231 /* Initiate removal from the top-down */
232 device_unregister(current_dev
);
235 if (storvsc_drv_obj
->Base
.OnCleanup
)
236 storvsc_drv_obj
->Base
.OnCleanup(&storvsc_drv_obj
->Base
);
238 vmbus_child_driver_unregister(drv_ctx
);
244 * blkvsc_probe - Add a new device for this driver
246 static int blkvsc_probe(struct device
*device
)
248 struct driver_context
*driver_ctx
=
249 driver_to_driver_context(device
->driver
);
250 struct blkvsc_driver_context
*blkvsc_drv_ctx
=
251 (struct blkvsc_driver_context
*)driver_ctx
;
252 struct storvsc_driver_object
*storvsc_drv_obj
=
253 &blkvsc_drv_ctx
->drv_obj
;
254 struct vm_device
*device_ctx
= device_to_vm_device(device
);
255 struct hv_device
*device_obj
= &device_ctx
->device_obj
;
257 struct block_device_context
*blkdev
= NULL
;
258 struct storvsc_device_info device_info
;
262 static int ide0_registered
;
263 static int ide1_registered
;
265 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_probe - enter");
267 if (!storvsc_drv_obj
->Base
.OnDeviceAdd
) {
268 DPRINT_ERR(BLKVSC_DRV
, "OnDeviceAdd() not set");
273 blkdev
= kzalloc(sizeof(struct block_device_context
), GFP_KERNEL
);
279 INIT_LIST_HEAD(&blkdev
->pending_list
);
281 /* Initialize what we can here */
282 spin_lock_init(&blkdev
->lock
);
284 /* ASSERT(sizeof(struct blkvsc_request_group) <= */
285 /* sizeof(struct blkvsc_request)); */
287 blkdev
->request_pool
= kmem_cache_create(dev_name(&device_ctx
->device
),
288 sizeof(struct blkvsc_request
) +
289 storvsc_drv_obj
->RequestExtSize
, 0,
290 SLAB_HWCACHE_ALIGN
, NULL
);
291 if (!blkdev
->request_pool
) {
297 /* Call to the vsc driver to add the device */
298 ret
= storvsc_drv_obj
->Base
.OnDeviceAdd(device_obj
, &device_info
);
300 DPRINT_ERR(BLKVSC_DRV
, "unable to add blkvsc device");
304 blkdev
->device_ctx
= device_ctx
;
305 /* this identified the device 0 or 1 */
306 blkdev
->target
= device_info
.TargetId
;
307 /* this identified the ide ctrl 0 or 1 */
308 blkdev
->path
= device_info
.PathId
;
310 dev_set_drvdata(device
, blkdev
);
312 /* Calculate the major and device num */
313 if (blkdev
->path
== 0) {
315 devnum
= blkdev
->path
+ blkdev
->target
; /* 0 or 1 */
317 if (!ide0_registered
) {
318 ret
= register_blkdev(major
, "ide");
320 DPRINT_ERR(BLKVSC_DRV
,
321 "register_blkdev() failed! ret %d",
328 } else if (blkdev
->path
== 1) {
330 devnum
= blkdev
->path
+ blkdev
->target
+ 1; /* 2 or 3 */
332 if (!ide1_registered
) {
333 ret
= register_blkdev(major
, "ide");
335 DPRINT_ERR(BLKVSC_DRV
,
336 "register_blkdev() failed! ret %d",
344 DPRINT_ERR(BLKVSC_DRV
, "invalid pathid");
349 DPRINT_INFO(BLKVSC_DRV
, "blkvsc registered for major %d!!", major
);
351 blkdev
->gd
= alloc_disk(BLKVSC_MINORS
);
353 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
358 blkdev
->gd
->queue
= blk_init_queue(blkvsc_request
, &blkdev
->lock
);
360 blk_queue_max_segment_size(blkdev
->gd
->queue
, PAGE_SIZE
);
361 blk_queue_max_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
362 blk_queue_segment_boundary(blkdev
->gd
->queue
, PAGE_SIZE
-1);
363 blk_queue_bounce_limit(blkdev
->gd
->queue
, BLK_BOUNCE_ANY
);
364 blk_queue_dma_alignment(blkdev
->gd
->queue
, 511);
366 blkdev
->gd
->major
= major
;
367 if (devnum
== 1 || devnum
== 3)
368 blkdev
->gd
->first_minor
= BLKVSC_MINORS
;
370 blkdev
->gd
->first_minor
= 0;
371 blkdev
->gd
->fops
= &block_ops
;
372 blkdev
->gd
->private_data
= blkdev
;
373 sprintf(blkdev
->gd
->disk_name
, "hd%c", 'a' + devnum
);
375 blkvsc_do_inquiry(blkdev
);
376 if (blkdev
->device_type
== DVD_TYPE
) {
377 set_disk_ro(blkdev
->gd
, 1);
378 blkdev
->gd
->flags
|= GENHD_FL_REMOVABLE
;
379 blkvsc_do_read_capacity(blkdev
);
381 blkvsc_do_read_capacity16(blkdev
);
384 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
385 blk_queue_logical_block_size(blkdev
->gd
->queue
, blkdev
->sector_size
);
387 add_disk(blkdev
->gd
);
389 DPRINT_INFO(BLKVSC_DRV
, "%s added!! capacity %lu sector_size %d",
390 blkdev
->gd
->disk_name
, (unsigned long)blkdev
->capacity
,
391 blkdev
->sector_size
);
396 storvsc_drv_obj
->Base
.OnDeviceRemove(device_obj
);
400 if (blkdev
->request_pool
) {
401 kmem_cache_destroy(blkdev
->request_pool
);
402 blkdev
->request_pool
= NULL
;
411 static void blkvsc_shutdown(struct device
*device
)
413 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
419 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_shutdown - users %d disk %s\n",
420 blkdev
->users
, blkdev
->gd
->disk_name
);
422 spin_lock_irqsave(&blkdev
->lock
, flags
);
424 blkdev
->shutting_down
= 1;
426 blk_stop_queue(blkdev
->gd
->queue
);
428 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
430 while (blkdev
->num_outstanding_reqs
) {
431 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...",
432 blkdev
->num_outstanding_reqs
);
436 blkvsc_do_flush(blkdev
);
438 spin_lock_irqsave(&blkdev
->lock
, flags
);
440 blkvsc_cancel_pending_reqs(blkdev
);
442 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
445 static int blkvsc_do_flush(struct block_device_context
*blkdev
)
447 struct blkvsc_request
*blkvsc_req
;
449 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_flush()\n");
451 if (blkdev
->device_type
!= HARDDISK_TYPE
)
454 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
458 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
459 init_waitqueue_head(&blkvsc_req
->wevent
);
460 blkvsc_req
->dev
= blkdev
;
461 blkvsc_req
->req
= NULL
;
462 blkvsc_req
->write
= 0;
464 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = 0;
465 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
466 blkvsc_req
->request
.DataBuffer
.Length
= 0;
468 blkvsc_req
->cmnd
[0] = SYNCHRONIZE_CACHE
;
469 blkvsc_req
->cmd_len
= 10;
472 * Set this here since the completion routine may be invoked and
473 * completed before we return
475 blkvsc_req
->cond
= 0;
476 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
478 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
480 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
485 /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
486 static int blkvsc_do_inquiry(struct block_device_context
*blkdev
)
488 struct blkvsc_request
*blkvsc_req
;
489 struct page
*page_buf
;
491 unsigned char device_type
;
493 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_inquiry()\n");
495 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
499 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
500 page_buf
= alloc_page(GFP_KERNEL
);
502 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
506 init_waitqueue_head(&blkvsc_req
->wevent
);
507 blkvsc_req
->dev
= blkdev
;
508 blkvsc_req
->req
= NULL
;
509 blkvsc_req
->write
= 0;
511 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
512 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
513 blkvsc_req
->request
.DataBuffer
.Length
= 64;
515 blkvsc_req
->cmnd
[0] = INQUIRY
;
516 blkvsc_req
->cmnd
[1] = 0x1; /* Get product data */
517 blkvsc_req
->cmnd
[2] = 0x83; /* mode page 83 */
518 blkvsc_req
->cmnd
[4] = 64;
519 blkvsc_req
->cmd_len
= 6;
522 * Set this here since the completion routine may be invoked and
523 * completed before we return
525 blkvsc_req
->cond
= 0;
527 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
529 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n",
530 blkvsc_req
, blkvsc_req
->cond
);
532 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
534 buf
= kmap(page_buf
);
536 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
538 device_type
= buf
[0] & 0x1F;
540 if (device_type
== 0x0) {
541 blkdev
->device_type
= HARDDISK_TYPE
;
542 } else if (device_type
== 0x5) {
543 blkdev
->device_type
= DVD_TYPE
;
545 /* TODO: this is currently unsupported device type */
546 blkdev
->device_type
= UNKNOWN_DEV_TYPE
;
549 DPRINT_DBG(BLKVSC_DRV
, "device type %d\n", device_type
);
551 blkdev
->device_id_len
= buf
[7];
552 if (blkdev
->device_id_len
> 64)
553 blkdev
->device_id_len
= 64;
555 memcpy(blkdev
->device_id
, &buf
[8], blkdev
->device_id_len
);
556 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
557 * blkdev->device_id_len); */
561 __free_page(page_buf
);
563 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
568 /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
569 static int blkvsc_do_read_capacity(struct block_device_context
*blkdev
)
571 struct blkvsc_request
*blkvsc_req
;
572 struct page
*page_buf
;
574 struct scsi_sense_hdr sense_hdr
;
576 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_read_capacity()\n");
578 blkdev
->sector_size
= 0;
579 blkdev
->capacity
= 0;
580 blkdev
->media_not_present
= 0; /* assume a disk is present */
582 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
586 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
587 page_buf
= alloc_page(GFP_KERNEL
);
589 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
593 init_waitqueue_head(&blkvsc_req
->wevent
);
594 blkvsc_req
->dev
= blkdev
;
595 blkvsc_req
->req
= NULL
;
596 blkvsc_req
->write
= 0;
598 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
599 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
600 blkvsc_req
->request
.DataBuffer
.Length
= 8;
602 blkvsc_req
->cmnd
[0] = READ_CAPACITY
;
603 blkvsc_req
->cmd_len
= 16;
606 * Set this here since the completion routine may be invoked
607 * and completed before we return
609 blkvsc_req
->cond
= 0;
611 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
613 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n",
614 blkvsc_req
, blkvsc_req
->cond
);
616 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
619 if (blkvsc_req
->request
.Status
) {
620 scsi_normalize_sense(blkvsc_req
->sense_buffer
,
621 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
623 if (sense_hdr
.asc
== 0x3A) {
624 /* Medium not present */
625 blkdev
->media_not_present
= 1;
629 buf
= kmap(page_buf
);
632 blkdev
->capacity
= ((buf
[0] << 24) | (buf
[1] << 16) |
633 (buf
[2] << 8) | buf
[3]) + 1;
634 blkdev
->sector_size
= (buf
[4] << 24) | (buf
[5] << 16) |
635 (buf
[6] << 8) | buf
[7];
639 __free_page(page_buf
);
641 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
646 static int blkvsc_do_read_capacity16(struct block_device_context
*blkdev
)
648 struct blkvsc_request
*blkvsc_req
;
649 struct page
*page_buf
;
651 struct scsi_sense_hdr sense_hdr
;
653 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_read_capacity16()\n");
655 blkdev
->sector_size
= 0;
656 blkdev
->capacity
= 0;
657 blkdev
->media_not_present
= 0; /* assume a disk is present */
659 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
663 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
664 page_buf
= alloc_page(GFP_KERNEL
);
666 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
670 init_waitqueue_head(&blkvsc_req
->wevent
);
671 blkvsc_req
->dev
= blkdev
;
672 blkvsc_req
->req
= NULL
;
673 blkvsc_req
->write
= 0;
675 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
676 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
677 blkvsc_req
->request
.DataBuffer
.Length
= 12;
679 blkvsc_req
->cmnd
[0] = 0x9E; /* READ_CAPACITY16; */
680 blkvsc_req
->cmd_len
= 16;
683 * Set this here since the completion routine may be invoked
684 * and completed before we return
686 blkvsc_req
->cond
= 0;
688 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
690 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n",
691 blkvsc_req
, blkvsc_req
->cond
);
693 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
696 if (blkvsc_req
->request
.Status
) {
697 scsi_normalize_sense(blkvsc_req
->sense_buffer
,
698 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
699 if (sense_hdr
.asc
== 0x3A) {
700 /* Medium not present */
701 blkdev
->media_not_present
= 1;
705 buf
= kmap(page_buf
);
708 blkdev
->capacity
= be64_to_cpu(*(unsigned long long *) &buf
[0]) + 1;
709 blkdev
->sector_size
= be32_to_cpu(*(unsigned int *)&buf
[8]);
712 blkdev
->capacity
= ((buf
[0] << 24) | (buf
[1] << 16) |
713 (buf
[2] << 8) | buf
[3]) + 1;
714 blkdev
->sector_size
= (buf
[4] << 24) | (buf
[5] << 16) |
715 (buf
[6] << 8) | buf
[7];
720 __free_page(page_buf
);
722 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
728 * blkvsc_remove() - Callback when our device is removed
730 static int blkvsc_remove(struct device
*device
)
732 struct driver_context
*driver_ctx
=
733 driver_to_driver_context(device
->driver
);
734 struct blkvsc_driver_context
*blkvsc_drv_ctx
=
735 (struct blkvsc_driver_context
*)driver_ctx
;
736 struct storvsc_driver_object
*storvsc_drv_obj
=
737 &blkvsc_drv_ctx
->drv_obj
;
738 struct vm_device
*device_ctx
= device_to_vm_device(device
);
739 struct hv_device
*device_obj
= &device_ctx
->device_obj
;
740 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
744 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_remove()\n");
746 if (!storvsc_drv_obj
->Base
.OnDeviceRemove
)
750 * Call to the vsc driver to let it know that the device is being
753 ret
= storvsc_drv_obj
->Base
.OnDeviceRemove(device_obj
);
756 DPRINT_ERR(BLKVSC_DRV
,
757 "unable to remove blkvsc device (ret %d)", ret
);
760 /* Get to a known state */
761 spin_lock_irqsave(&blkdev
->lock
, flags
);
763 blkdev
->shutting_down
= 1;
765 blk_stop_queue(blkdev
->gd
->queue
);
767 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
769 while (blkdev
->num_outstanding_reqs
) {
770 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...",
771 blkdev
->num_outstanding_reqs
);
775 blkvsc_do_flush(blkdev
);
777 spin_lock_irqsave(&blkdev
->lock
, flags
);
779 blkvsc_cancel_pending_reqs(blkdev
);
781 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
783 blk_cleanup_queue(blkdev
->gd
->queue
);
785 del_gendisk(blkdev
->gd
);
787 kmem_cache_destroy(blkdev
->request_pool
);
794 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
)
796 /* ASSERT(blkvsc_req->req); */
797 /* ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8)); */
799 blkvsc_req
->cmd_len
= 16;
801 if (blkvsc_req
->sector_start
> 0xffffffff) {
802 if (rq_data_dir(blkvsc_req
->req
)) {
803 blkvsc_req
->write
= 1;
804 blkvsc_req
->cmnd
[0] = WRITE_16
;
806 blkvsc_req
->write
= 0;
807 blkvsc_req
->cmnd
[0] = READ_16
;
810 blkvsc_req
->cmnd
[1] |=
811 (blkvsc_req
->req
->cmd_flags
& REQ_FUA
) ? 0x8 : 0;
813 *(unsigned long long *)&blkvsc_req
->cmnd
[2] =
814 cpu_to_be64(blkvsc_req
->sector_start
);
815 *(unsigned int *)&blkvsc_req
->cmnd
[10] =
816 cpu_to_be32(blkvsc_req
->sector_count
);
817 } else if ((blkvsc_req
->sector_count
> 0xff) ||
818 (blkvsc_req
->sector_start
> 0x1fffff)) {
819 if (rq_data_dir(blkvsc_req
->req
)) {
820 blkvsc_req
->write
= 1;
821 blkvsc_req
->cmnd
[0] = WRITE_10
;
823 blkvsc_req
->write
= 0;
824 blkvsc_req
->cmnd
[0] = READ_10
;
827 blkvsc_req
->cmnd
[1] |=
828 (blkvsc_req
->req
->cmd_flags
& REQ_FUA
) ? 0x8 : 0;
830 *(unsigned int *)&blkvsc_req
->cmnd
[2] =
831 cpu_to_be32(blkvsc_req
->sector_start
);
832 *(unsigned short *)&blkvsc_req
->cmnd
[7] =
833 cpu_to_be16(blkvsc_req
->sector_count
);
835 if (rq_data_dir(blkvsc_req
->req
)) {
836 blkvsc_req
->write
= 1;
837 blkvsc_req
->cmnd
[0] = WRITE_6
;
839 blkvsc_req
->write
= 0;
840 blkvsc_req
->cmnd
[0] = READ_6
;
843 *(unsigned int *)&blkvsc_req
->cmnd
[1] =
844 cpu_to_be32(blkvsc_req
->sector_start
) >> 8;
845 blkvsc_req
->cmnd
[1] &= 0x1f;
846 blkvsc_req
->cmnd
[4] = (unsigned char)blkvsc_req
->sector_count
;
850 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
,
851 void (*request_completion
)(struct hv_storvsc_request
*))
853 struct block_device_context
*blkdev
= blkvsc_req
->dev
;
854 struct vm_device
*device_ctx
= blkdev
->device_ctx
;
855 struct driver_context
*driver_ctx
=
856 driver_to_driver_context(device_ctx
->device
.driver
);
857 struct blkvsc_driver_context
*blkvsc_drv_ctx
=
858 (struct blkvsc_driver_context
*)driver_ctx
;
859 struct storvsc_driver_object
*storvsc_drv_obj
=
860 &blkvsc_drv_ctx
->drv_obj
;
861 struct hv_storvsc_request
*storvsc_req
;
864 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_submit_request() - "
865 "req %p type %s start_sector %lu count %ld offset %d "
866 "len %d\n", blkvsc_req
,
867 (blkvsc_req
->write
) ? "WRITE" : "READ",
868 (unsigned long) blkvsc_req
->sector_start
,
869 blkvsc_req
->sector_count
,
870 blkvsc_req
->request
.DataBuffer
.Offset
,
871 blkvsc_req
->request
.DataBuffer
.Length
);
873 for (i
= 0; i
< (blkvsc_req
->request
.DataBuffer
.Length
>> 12); i
++) {
874 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_submit_request() - "
875 "req %p pfn[%d] %llx\n",
877 blkvsc_req
->request
.DataBuffer
.PfnArray
[i
]);
881 storvsc_req
= &blkvsc_req
->request
;
882 storvsc_req
->Extension
= (void *)((unsigned long)blkvsc_req
+
883 sizeof(struct blkvsc_request
));
885 storvsc_req
->Type
= blkvsc_req
->write
? WRITE_TYPE
: READ_TYPE
;
887 storvsc_req
->OnIOCompletion
= request_completion
;
888 storvsc_req
->Context
= blkvsc_req
;
890 storvsc_req
->Host
= blkdev
->port
;
891 storvsc_req
->Bus
= blkdev
->path
;
892 storvsc_req
->TargetId
= blkdev
->target
;
893 storvsc_req
->LunId
= 0; /* this is not really used at all */
895 storvsc_req
->CdbLen
= blkvsc_req
->cmd_len
;
896 storvsc_req
->Cdb
= blkvsc_req
->cmnd
;
898 storvsc_req
->SenseBuffer
= blkvsc_req
->sense_buffer
;
899 storvsc_req
->SenseBufferSize
= SCSI_SENSE_BUFFERSIZE
;
901 ret
= storvsc_drv_obj
->OnIORequest(&blkdev
->device_ctx
->device_obj
,
902 &blkvsc_req
->request
);
904 blkdev
->num_outstanding_reqs
++;
910 * We break the request into 1 or more blkvsc_requests and submit
911 * them. If we cant submit them all, we put them on the
912 * pending_list. The blkvsc_request() will work on the pending_list.
914 static int blkvsc_do_request(struct block_device_context
*blkdev
,
917 struct bio
*bio
= NULL
;
918 struct bio_vec
*bvec
= NULL
;
919 struct bio_vec
*prev_bvec
= NULL
;
920 struct blkvsc_request
*blkvsc_req
= NULL
;
921 struct blkvsc_request
*tmp
;
924 sector_t start_sector
;
925 unsigned long num_sectors
= 0;
928 struct blkvsc_request_group
*group
= NULL
;
930 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p req %p sect %lu\n", blkdev
, req
,
931 (unsigned long)blk_rq_pos(req
));
933 /* Create a group to tie req to list of blkvsc_reqs */
934 group
= kmem_cache_alloc(blkdev
->request_pool
, GFP_ATOMIC
);
938 INIT_LIST_HEAD(&group
->blkvsc_req_list
);
939 group
->outstanding
= group
->status
= 0;
941 start_sector
= blk_rq_pos(req
);
943 /* foreach bio in the request */
945 for (bio
= req
->bio
; bio
; bio
= bio
->bi_next
) {
947 * Map this bio into an existing or new storvsc request
949 bio_for_each_segment(bvec
, bio
, seg_idx
) {
950 DPRINT_DBG(BLKVSC_DRV
, "bio_for_each_segment() "
951 "- req %p bio %p bvec %p seg_idx %d "
952 "databuf_idx %d\n", req
, bio
, bvec
,
953 seg_idx
, databuf_idx
);
955 /* Get a new storvsc request */
958 (databuf_idx
>= MAX_MULTIPAGE_BUFFER_COUNT
)
959 /* hole at the begin of page */
960 || (bvec
->bv_offset
!= 0) ||
961 /* hold at the end of page */
963 (prev_bvec
->bv_len
!= PAGE_SIZE
))) {
964 /* submit the prev one */
966 blkvsc_req
->sector_start
= start_sector
;
967 sector_div(blkvsc_req
->sector_start
, (blkdev
->sector_size
>> 9));
969 blkvsc_req
->sector_count
= num_sectors
/ (blkdev
->sector_size
>> 9);
970 blkvsc_init_rw(blkvsc_req
);
974 * Create new blkvsc_req to represent
977 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_ATOMIC
);
979 /* free up everything */
980 list_for_each_entry_safe(
982 &group
->blkvsc_req_list
,
984 list_del(&blkvsc_req
->req_entry
);
985 kmem_cache_free(blkdev
->request_pool
, blkvsc_req
);
988 kmem_cache_free(blkdev
->request_pool
, group
);
992 memset(blkvsc_req
, 0,
993 sizeof(struct blkvsc_request
));
995 blkvsc_req
->dev
= blkdev
;
996 blkvsc_req
->req
= req
;
997 blkvsc_req
->request
.DataBuffer
.Offset
= bvec
->bv_offset
;
998 blkvsc_req
->request
.DataBuffer
.Length
= 0;
1000 /* Add to the group */
1001 blkvsc_req
->group
= group
;
1002 blkvsc_req
->group
->outstanding
++;
1003 list_add_tail(&blkvsc_req
->req_entry
,
1004 &blkvsc_req
->group
->blkvsc_req_list
);
1006 start_sector
+= num_sectors
;
1011 /* Add the curr bvec/segment to the curr blkvsc_req */
1012 blkvsc_req
->request
.DataBuffer
.PfnArray
[databuf_idx
] = page_to_pfn(bvec
->bv_page
);
1013 blkvsc_req
->request
.DataBuffer
.Length
+= bvec
->bv_len
;
1018 num_sectors
+= bvec
->bv_len
>> 9;
1020 } /* bio_for_each_segment */
1022 } /* rq_for_each_bio */
1025 /* Handle the last one */
1027 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p req %p group %p count %d\n",
1028 blkdev
, req
, blkvsc_req
->group
,
1029 blkvsc_req
->group
->outstanding
);
1031 blkvsc_req
->sector_start
= start_sector
;
1032 sector_div(blkvsc_req
->sector_start
,
1033 (blkdev
->sector_size
>> 9));
1035 blkvsc_req
->sector_count
= num_sectors
/
1036 (blkdev
->sector_size
>> 9);
1038 blkvsc_init_rw(blkvsc_req
);
1041 list_for_each_entry(blkvsc_req
, &group
->blkvsc_req_list
, req_entry
) {
1043 DPRINT_DBG(BLKVSC_DRV
, "adding blkvsc_req to "
1044 "pending_list - blkvsc_req %p start_sect %lu"
1045 " sect_count %ld (%lu %ld)\n", blkvsc_req
,
1046 (unsigned long)blkvsc_req
->sector_start
,
1047 blkvsc_req
->sector_count
,
1048 (unsigned long)start_sector
,
1049 (unsigned long)num_sectors
);
1051 list_add_tail(&blkvsc_req
->pend_entry
,
1052 &blkdev
->pending_list
);
1054 ret
= blkvsc_submit_request(blkvsc_req
,
1055 blkvsc_request_completion
);
1058 list_add_tail(&blkvsc_req
->pend_entry
,
1059 &blkdev
->pending_list
);
1062 DPRINT_DBG(BLKVSC_DRV
, "submitted blkvsc_req %p "
1063 "start_sect %lu sect_count %ld (%lu %ld) "
1064 "ret %d\n", blkvsc_req
,
1065 (unsigned long)blkvsc_req
->sector_start
,
1066 blkvsc_req
->sector_count
,
1067 (unsigned long)start_sector
,
1075 static void blkvsc_cmd_completion(struct hv_storvsc_request
*request
)
1077 struct blkvsc_request
*blkvsc_req
=
1078 (struct blkvsc_request
*)request
->Context
;
1079 struct block_device_context
*blkdev
=
1080 (struct block_device_context
*)blkvsc_req
->dev
;
1081 struct scsi_sense_hdr sense_hdr
;
1083 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cmd_completion() - req %p\n",
1086 blkdev
->num_outstanding_reqs
--;
1088 if (blkvsc_req
->request
.Status
)
1089 if (scsi_normalize_sense(blkvsc_req
->sense_buffer
,
1090 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
))
1091 scsi_print_sense_hdr("blkvsc", &sense_hdr
);
1093 blkvsc_req
->cond
= 1;
1094 wake_up_interruptible(&blkvsc_req
->wevent
);
1097 static void blkvsc_request_completion(struct hv_storvsc_request
*request
)
1099 struct blkvsc_request
*blkvsc_req
=
1100 (struct blkvsc_request
*)request
->Context
;
1101 struct block_device_context
*blkdev
=
1102 (struct block_device_context
*)blkvsc_req
->dev
;
1103 unsigned long flags
;
1104 struct blkvsc_request
*comp_req
, *tmp
;
1106 /* ASSERT(blkvsc_req->group); */
1108 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p blkvsc_req %p group %p type %s "
1109 "sect_start %lu sect_count %ld len %d group outstd %d "
1110 "total outstd %d\n",
1111 blkdev
, blkvsc_req
, blkvsc_req
->group
,
1112 (blkvsc_req
->write
) ? "WRITE" : "READ",
1113 (unsigned long)blkvsc_req
->sector_start
,
1114 blkvsc_req
->sector_count
,
1115 blkvsc_req
->request
.DataBuffer
.Length
,
1116 blkvsc_req
->group
->outstanding
,
1117 blkdev
->num_outstanding_reqs
);
1119 spin_lock_irqsave(&blkdev
->lock
, flags
);
1121 blkdev
->num_outstanding_reqs
--;
1122 blkvsc_req
->group
->outstanding
--;
1125 * Only start processing when all the blkvsc_reqs are
1126 * completed. This guarantees no out-of-order blkvsc_req
1127 * completion when calling end_that_request_first()
1129 if (blkvsc_req
->group
->outstanding
== 0) {
1130 list_for_each_entry_safe(comp_req
, tmp
,
1131 &blkvsc_req
->group
->blkvsc_req_list
,
1133 DPRINT_DBG(BLKVSC_DRV
, "completing blkvsc_req %p "
1134 "sect_start %lu sect_count %ld\n",
1136 (unsigned long)comp_req
->sector_start
,
1137 comp_req
->sector_count
);
1139 list_del(&comp_req
->req_entry
);
1141 if (!__blk_end_request(comp_req
->req
,
1142 (!comp_req
->request
.Status
? 0 : -EIO
),
1143 comp_req
->sector_count
* blkdev
->sector_size
)) {
1145 * All the sectors have been xferred ie the
1148 DPRINT_DBG(BLKVSC_DRV
, "req %p COMPLETED\n",
1150 kmem_cache_free(blkdev
->request_pool
,
1154 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1157 if (!blkdev
->shutting_down
) {
1158 blkvsc_do_pending_reqs(blkdev
);
1159 blk_start_queue(blkdev
->gd
->queue
);
1160 blkvsc_request(blkdev
->gd
->queue
);
1164 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
1167 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
)
1169 struct blkvsc_request
*pend_req
, *tmp
;
1170 struct blkvsc_request
*comp_req
, *tmp2
;
1174 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cancel_pending_reqs()");
1176 /* Flush the pending list first */
1177 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
1180 * The pend_req could be part of a partially completed
1181 * request. If so, complete those req first until we
1184 list_for_each_entry_safe(comp_req
, tmp2
,
1185 &pend_req
->group
->blkvsc_req_list
,
1187 DPRINT_DBG(BLKVSC_DRV
, "completing blkvsc_req %p "
1188 "sect_start %lu sect_count %ld\n",
1190 (unsigned long) comp_req
->sector_start
,
1191 comp_req
->sector_count
);
1193 if (comp_req
== pend_req
)
1196 list_del(&comp_req
->req_entry
);
1198 if (comp_req
->req
) {
1199 ret
= __blk_end_request(comp_req
->req
,
1200 (!comp_req
->request
.Status
? 0 : -EIO
),
1201 comp_req
->sector_count
*
1202 blkdev
->sector_size
);
1204 /* FIXME: shouldn't this do more than return? */
1209 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1212 DPRINT_DBG(BLKVSC_DRV
, "cancelling pending request - %p\n",
1215 list_del(&pend_req
->pend_entry
);
1217 list_del(&pend_req
->req_entry
);
1219 if (comp_req
->req
) {
1220 if (!__blk_end_request(pend_req
->req
, -EIO
,
1221 pend_req
->sector_count
*
1222 blkdev
->sector_size
)) {
1224 * All the sectors have been xferred ie the
1227 DPRINT_DBG(BLKVSC_DRV
,
1228 "blkvsc_cancel_pending_reqs() - "
1229 "req %p COMPLETED\n", pend_req
->req
);
1230 kmem_cache_free(blkdev
->request_pool
,
1235 kmem_cache_free(blkdev
->request_pool
, pend_req
);
1242 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
)
1244 struct blkvsc_request
*pend_req
, *tmp
;
1247 /* Flush the pending list first */
1248 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
1250 DPRINT_DBG(BLKVSC_DRV
, "working off pending_list - %p\n",
1253 ret
= blkvsc_submit_request(pend_req
,
1254 blkvsc_request_completion
);
1258 list_del(&pend_req
->pend_entry
);
1264 static void blkvsc_request(struct request_queue
*queue
)
1266 struct block_device_context
*blkdev
= NULL
;
1267 struct request
*req
;
1270 DPRINT_DBG(BLKVSC_DRV
, "- enter\n");
1271 while ((req
= blk_peek_request(queue
)) != NULL
) {
1272 DPRINT_DBG(BLKVSC_DRV
, "- req %p\n", req
);
1274 blkdev
= req
->rq_disk
->private_data
;
1275 if (blkdev
->shutting_down
|| req
->cmd_type
!= REQ_TYPE_FS
||
1276 blkdev
->media_not_present
) {
1277 __blk_end_request_cur(req
, 0);
1281 ret
= blkvsc_do_pending_reqs(blkdev
);
1284 DPRINT_DBG(BLKVSC_DRV
,
1285 "- stop queue - pending_list not empty\n");
1286 blk_stop_queue(queue
);
1290 blk_start_request(req
);
1292 ret
= blkvsc_do_request(blkdev
, req
);
1294 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - no room\n");
1295 blk_stop_queue(queue
);
1297 } else if (ret
< 0) {
1298 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - no mem\n");
1299 blk_requeue_request(queue
, req
);
1300 blk_stop_queue(queue
);
1306 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
)
1308 struct block_device_context
*blkdev
= bdev
->bd_disk
->private_data
;
1310 DPRINT_DBG(BLKVSC_DRV
, "- users %d disk %s\n", blkdev
->users
,
1311 blkdev
->gd
->disk_name
);
1313 mutex_lock(&blkvsc_mutex
);
1314 spin_lock(&blkdev
->lock
);
1316 if (!blkdev
->users
&& blkdev
->device_type
== DVD_TYPE
) {
1317 spin_unlock(&blkdev
->lock
);
1318 check_disk_change(bdev
);
1319 spin_lock(&blkdev
->lock
);
1324 spin_unlock(&blkdev
->lock
);
1325 mutex_unlock(&blkvsc_mutex
);
1329 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
)
1331 struct block_device_context
*blkdev
= disk
->private_data
;
1333 DPRINT_DBG(BLKVSC_DRV
, "- users %d disk %s\n", blkdev
->users
,
1334 blkdev
->gd
->disk_name
);
1336 mutex_lock(&blkvsc_mutex
);
1337 spin_lock(&blkdev
->lock
);
1338 if (blkdev
->users
== 1) {
1339 spin_unlock(&blkdev
->lock
);
1340 blkvsc_do_flush(blkdev
);
1341 spin_lock(&blkdev
->lock
);
1346 spin_unlock(&blkdev
->lock
);
1347 mutex_unlock(&blkvsc_mutex
);
1351 static int blkvsc_media_changed(struct gendisk
*gd
)
1353 DPRINT_DBG(BLKVSC_DRV
, "- enter\n");
1357 static int blkvsc_revalidate_disk(struct gendisk
*gd
)
1359 struct block_device_context
*blkdev
= gd
->private_data
;
1361 DPRINT_DBG(BLKVSC_DRV
, "- enter\n");
1363 if (blkdev
->device_type
== DVD_TYPE
) {
1364 blkvsc_do_read_capacity(blkdev
);
1365 set_capacity(blkdev
->gd
, blkdev
->capacity
*
1366 (blkdev
->sector_size
/512));
1367 blk_queue_logical_block_size(gd
->queue
, blkdev
->sector_size
);
1372 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
1374 sector_t total_sectors
= get_capacity(bd
->bd_disk
);
1375 sector_t cylinder_times_heads
= 0;
1378 int sectors_per_track
= 0;
1383 if (total_sectors
> (65535 * 16 * 255))
1384 total_sectors
= (65535 * 16 * 255);
1386 if (total_sectors
>= (65535 * 16 * 63)) {
1387 sectors_per_track
= 255;
1390 cylinder_times_heads
= total_sectors
;
1391 /* sector_div stores the quotient in cylinder_times_heads */
1392 rem
= sector_div(cylinder_times_heads
, sectors_per_track
);
1394 sectors_per_track
= 17;
1396 cylinder_times_heads
= total_sectors
;
1397 /* sector_div stores the quotient in cylinder_times_heads */
1398 rem
= sector_div(cylinder_times_heads
, sectors_per_track
);
1400 temp
= cylinder_times_heads
+ 1023;
1401 /* sector_div stores the quotient in temp */
1402 rem
= sector_div(temp
, 1024);
1410 if (cylinder_times_heads
>= (heads
* 1024) || (heads
> 16)) {
1411 sectors_per_track
= 31;
1414 cylinder_times_heads
= total_sectors
;
1416 * sector_div stores the quotient in
1417 * cylinder_times_heads
1419 rem
= sector_div(cylinder_times_heads
,
1423 if (cylinder_times_heads
>= (heads
* 1024)) {
1424 sectors_per_track
= 63;
1427 cylinder_times_heads
= total_sectors
;
1429 * sector_div stores the quotient in
1430 * cylinder_times_heads
1432 rem
= sector_div(cylinder_times_heads
,
1437 temp
= cylinder_times_heads
;
1438 /* sector_div stores the quotient in temp */
1439 rem
= sector_div(temp
, heads
);
1443 hg
->sectors
= sectors_per_track
;
1444 hg
->cylinders
= cylinders
;
1446 DPRINT_INFO(BLKVSC_DRV
, "CHS (%d, %d, %d)", cylinders
, heads
,
1452 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
1453 unsigned cmd
, unsigned long argument
)
1455 /* struct block_device_context *blkdev = bd->bd_disk->private_data; */
1460 * TODO: I think there is certain format for HDIO_GET_IDENTITY rather
1461 * than just a GUID. Commented it out for now.
1464 case HDIO_GET_IDENTITY
:
1465 DPRINT_INFO(BLKVSC_DRV
, "HDIO_GET_IDENTITY\n");
1466 if (copy_to_user((void __user
*)arg
, blkdev
->device_id
,
1467 blkdev
->device_id_len
))
1479 static int __init
blkvsc_init(void)
1483 BUILD_BUG_ON(sizeof(sector_t
) != 8);
1485 DPRINT_INFO(BLKVSC_DRV
, "Blkvsc initializing....");
1487 ret
= blkvsc_drv_init(BlkVscInitialize
);
1492 static void __exit
blkvsc_exit(void)
1497 MODULE_LICENSE("GPL");
1498 MODULE_VERSION(HV_DRV_VERSION
);
1499 MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1500 module_init(blkvsc_init
);
1501 module_exit(blkvsc_exit
);