3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Hank Janssen <hjanssen@microsoft.com>
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/blkdev.h>
27 #include <linux/major.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_dbg.h>
37 #include "include/logging.h"
38 #include "include/vmbus.h"
40 #include "include/StorVscApi.h"
45 #define BLKVSC_MINORS 64
50 enum blkvsc_device_type
{
57 * This request ties the struct request and struct
58 * blkvsc_request/hv_storvsc_request together A struct request may be
59 * represented by 1 or more struct blkvsc_request
61 struct blkvsc_request_group
{
65 struct list_head blkvsc_req_list
; /* list of blkvsc_requests */
69 struct blkvsc_request
{
70 struct list_head req_entry
; /* blkvsc_request_group.blkvsc_req_list */
72 struct list_head pend_entry
; /* block_device_context.pending_list */
74 struct request
*req
; /* This may be null if we generate a request internally */
75 struct block_device_context
*dev
;
76 struct blkvsc_request_group
*group
; /* The group this request is part of. Maybe null */
78 wait_queue_head_t wevent
;
82 sector_t sector_start
;
83 unsigned long sector_count
;
85 unsigned char sense_buffer
[SCSI_SENSE_BUFFERSIZE
];
86 unsigned char cmd_len
;
87 unsigned char cmnd
[MAX_COMMAND_SIZE
];
89 struct hv_storvsc_request request
;
90 /* !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because - */
91 /* The extension buffer falls right here and is pointed to by request.Extension; */
94 /* Per device structure */
95 struct block_device_context
{
96 struct device_context
*device_ctx
; /* point back to our device context */
97 struct kmem_cache
*request_pool
;
100 enum blkvsc_device_type device_type
;
101 struct list_head pending_list
;
103 unsigned char device_id
[64];
104 unsigned int device_id_len
;
105 int num_outstanding_reqs
;
107 int media_not_present
;
108 unsigned int sector_size
;
112 unsigned char target
;
117 struct blkvsc_driver_context
{
118 /* !! These must be the first 2 fields !! */
119 struct driver_context drv_ctx
;
120 STORVSC_DRIVER_OBJECT drv_obj
;
124 static int blkvsc_probe(struct device
*dev
);
125 static int blkvsc_remove(struct device
*device
);
126 static void blkvsc_shutdown(struct device
*device
);
128 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
);
129 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
);
130 static int blkvsc_media_changed(struct gendisk
*gd
);
131 static int blkvsc_revalidate_disk(struct gendisk
*gd
);
132 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
);
133 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
134 unsigned cmd
, unsigned long argument
);
135 static void blkvsc_request(struct request_queue
*queue
);
136 static void blkvsc_request_completion(struct hv_storvsc_request
*request
);
137 static int blkvsc_do_request(struct block_device_context
*blkdev
, struct request
*req
);
138 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
, void (*request_completion
)(struct hv_storvsc_request
*) );
139 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
);
140 static void blkvsc_cmd_completion(struct hv_storvsc_request
*request
);
141 static int blkvsc_do_inquiry(struct block_device_context
*blkdev
);
142 static int blkvsc_do_read_capacity(struct block_device_context
*blkdev
);
143 static int blkvsc_do_read_capacity16(struct block_device_context
*blkdev
);
144 static int blkvsc_do_flush(struct block_device_context
*blkdev
);
145 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
);
146 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
);
149 static int blkvsc_ringbuffer_size
= BLKVSC_RING_BUFFER_SIZE
;
151 /* The one and only one */
152 static struct blkvsc_driver_context g_blkvsc_drv
;
155 static struct block_device_operations block_ops
=
157 .owner
= THIS_MODULE
,
159 .release
= blkvsc_release
,
160 .media_changed
= blkvsc_media_changed
,
161 .revalidate_disk
= blkvsc_revalidate_disk
,
162 .getgeo
= blkvsc_getgeo
,
163 .ioctl
= blkvsc_ioctl
,
168 Name: blkvsc_drv_init()
170 Desc: BlkVsc driver initialization.
173 static int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init
)
176 STORVSC_DRIVER_OBJECT
*storvsc_drv_obj
=&g_blkvsc_drv
.drv_obj
;
177 struct driver_context
*drv_ctx
=&g_blkvsc_drv
.drv_ctx
;
179 DPRINT_ENTER(BLKVSC_DRV
);
181 vmbus_get_interface(&storvsc_drv_obj
->Base
.VmbusChannelInterface
);
183 storvsc_drv_obj
->RingBufferSize
= blkvsc_ringbuffer_size
;
185 /* Callback to client driver to complete the initialization */
186 pfn_drv_init(&storvsc_drv_obj
->Base
);
188 drv_ctx
->driver
.name
= storvsc_drv_obj
->Base
.name
;
189 memcpy(&drv_ctx
->class_id
, &storvsc_drv_obj
->Base
.deviceType
, sizeof(GUID
));
191 drv_ctx
->probe
= blkvsc_probe
;
192 drv_ctx
->remove
= blkvsc_remove
;
193 drv_ctx
->shutdown
= blkvsc_shutdown
;
195 /* The driver belongs to vmbus */
196 ret
= vmbus_child_driver_register(drv_ctx
);
198 DPRINT_EXIT(BLKVSC_DRV
);
204 static int blkvsc_drv_exit_cb(struct device
*dev
, void *data
)
206 struct device
**curr
= (struct device
**)data
;
208 return 1; /* stop iterating */
213 Name: blkvsc_drv_exit()
218 static void blkvsc_drv_exit(void)
220 STORVSC_DRIVER_OBJECT
*storvsc_drv_obj
=&g_blkvsc_drv
.drv_obj
;
221 struct driver_context
*drv_ctx
=&g_blkvsc_drv
.drv_ctx
;
222 struct device
*current_dev
=NULL
;
225 DPRINT_ENTER(BLKVSC_DRV
);
232 ret
= driver_for_each_device(&drv_ctx
->driver
, NULL
,
233 (void *) ¤t_dev
,
237 DPRINT_WARN(BLKVSC_DRV
,
238 "driver_for_each_device returned %d", ret
);
241 if (current_dev
== NULL
)
244 /* Initiate removal from the top-down */
245 device_unregister(current_dev
);
248 if (storvsc_drv_obj
->Base
.OnCleanup
)
249 storvsc_drv_obj
->Base
.OnCleanup(&storvsc_drv_obj
->Base
);
251 vmbus_child_driver_unregister(drv_ctx
);
253 DPRINT_EXIT(BLKVSC_DRV
);
262 Desc: Add a new device for this driver
265 static int blkvsc_probe(struct device
*device
)
269 struct driver_context
*driver_ctx
= driver_to_driver_context(device
->driver
);
270 struct blkvsc_driver_context
*blkvsc_drv_ctx
= (struct blkvsc_driver_context
*)driver_ctx
;
271 STORVSC_DRIVER_OBJECT
* storvsc_drv_obj
= &blkvsc_drv_ctx
->drv_obj
;
273 struct device_context
*device_ctx
= device_to_device_context(device
);
274 struct hv_device
*device_obj
= &device_ctx
->device_obj
;
276 struct block_device_context
*blkdev
=NULL
;
277 STORVSC_DEVICE_INFO device_info
;
281 static int ide0_registered
=0;
282 static int ide1_registered
=0;
284 DPRINT_ENTER(BLKVSC_DRV
);
286 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_probe - enter");
288 if (!storvsc_drv_obj
->Base
.OnDeviceAdd
)
290 DPRINT_ERR(BLKVSC_DRV
, "OnDeviceAdd() not set");
296 blkdev
= kzalloc(sizeof(struct block_device_context
), GFP_KERNEL
);
303 INIT_LIST_HEAD(&blkdev
->pending_list
);
305 /* Initialize what we can here */
306 spin_lock_init(&blkdev
->lock
);
308 ASSERT(sizeof(struct blkvsc_request_group
) <= sizeof(struct blkvsc_request
));
310 blkdev
->request_pool
= kmem_cache_create(dev_name(&device_ctx
->device
),
311 sizeof(struct blkvsc_request
) + storvsc_drv_obj
->RequestExtSize
, 0,
312 SLAB_HWCACHE_ALIGN
, NULL
);
313 if (!blkdev
->request_pool
)
320 /* Call to the vsc driver to add the device */
321 ret
= storvsc_drv_obj
->Base
.OnDeviceAdd(device_obj
, &device_info
);
324 DPRINT_ERR(BLKVSC_DRV
, "unable to add blkvsc device");
328 blkdev
->device_ctx
= device_ctx
;
329 blkdev
->target
= device_info
.TargetId
; /* this identified the device 0 or 1 */
330 blkdev
->path
= device_info
.PathId
; /* this identified the ide ctrl 0 or 1 */
332 dev_set_drvdata(device
, blkdev
);
334 /* Calculate the major and device num */
335 if (blkdev
->path
== 0)
338 devnum
= blkdev
->path
+ blkdev
->target
; /* 0 or 1 */
340 if (!ide0_registered
)
342 ret
= register_blkdev(major
, "ide");
345 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
352 else if (blkdev
->path
== 1)
355 devnum
= blkdev
->path
+ blkdev
->target
+ 1; /* 2 or 3 */
357 if (!ide1_registered
)
359 ret
= register_blkdev(major
, "ide");
362 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
372 DPRINT_ERR(BLKVSC_DRV
, "invalid pathid");
377 DPRINT_INFO(BLKVSC_DRV
, "blkvsc registered for major %d!!", major
);
379 blkdev
->gd
= alloc_disk(BLKVSC_MINORS
);
382 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
387 blkdev
->gd
->queue
= blk_init_queue(blkvsc_request
, &blkdev
->lock
);
389 blk_queue_max_segment_size(blkdev
->gd
->queue
, PAGE_SIZE
);
390 blk_queue_max_phys_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
391 blk_queue_max_hw_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
392 blk_queue_segment_boundary(blkdev
->gd
->queue
, PAGE_SIZE
-1);
393 blk_queue_bounce_limit(blkdev
->gd
->queue
, BLK_BOUNCE_ANY
);
394 blk_queue_dma_alignment(blkdev
->gd
->queue
, 511);
396 blkdev
->gd
->major
= major
;
397 if (devnum
== 1 || devnum
== 3)
398 blkdev
->gd
->first_minor
= BLKVSC_MINORS
;
400 blkdev
->gd
->first_minor
= 0;
401 blkdev
->gd
->fops
= &block_ops
;
402 blkdev
->gd
->private_data
= blkdev
;
403 sprintf(blkdev
->gd
->disk_name
, "hd%c", 'a'+ devnum
);
405 blkvsc_do_inquiry(blkdev
);
406 if (blkdev
->device_type
== DVD_TYPE
)
408 set_disk_ro(blkdev
->gd
, 1);
409 blkdev
->gd
->flags
|= GENHD_FL_REMOVABLE
;
410 blkvsc_do_read_capacity(blkdev
);
414 blkvsc_do_read_capacity16(blkdev
);
417 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
418 blk_queue_logical_block_size(blkdev
->gd
->queue
, blkdev
->sector_size
);
420 add_disk(blkdev
->gd
);
422 DPRINT_INFO(BLKVSC_DRV
, "%s added!! capacity %lu sector_size %d", blkdev
->gd
->disk_name
, (unsigned long) blkdev
->capacity
, blkdev
->sector_size
);
427 storvsc_drv_obj
->Base
.OnDeviceRemove(device_obj
);
432 if (blkdev
->request_pool
)
434 kmem_cache_destroy(blkdev
->request_pool
);
435 blkdev
->request_pool
= NULL
;
441 DPRINT_EXIT(BLKVSC_DRV
);
446 static void blkvsc_shutdown(struct device
*device
)
448 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
454 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_shutdown - users %d disk %s\n", blkdev
->users
, blkdev
->gd
->disk_name
);
456 spin_lock_irqsave(&blkdev
->lock
, flags
);
458 blkdev
->shutting_down
= 1;
460 blk_stop_queue(blkdev
->gd
->queue
);
462 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
464 while (blkdev
->num_outstanding_reqs
)
466 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...", blkdev
->num_outstanding_reqs
);
471 blkvsc_do_flush(blkdev
);
473 spin_lock_irqsave(&blkdev
->lock
, flags
);
475 blkvsc_cancel_pending_reqs(blkdev
);
477 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
480 static int blkvsc_do_flush(struct block_device_context
*blkdev
)
482 struct blkvsc_request
*blkvsc_req
=NULL
;
484 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_flush()\n");
486 if (blkdev
->device_type
!= HARDDISK_TYPE
)
489 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
495 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
496 init_waitqueue_head(&blkvsc_req
->wevent
);
497 blkvsc_req
->dev
= blkdev
;
498 blkvsc_req
->req
= NULL
;
499 blkvsc_req
->write
= 0;
501 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = 0;
502 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
503 blkvsc_req
->request
.DataBuffer
.Length
= 0;
505 blkvsc_req
->cmnd
[0] = SYNCHRONIZE_CACHE
;
506 blkvsc_req
->cmd_len
= 10;
508 /* Set this here since the completion routine may be invoked and completed before we return */
510 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
512 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
514 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
519 /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
520 static int blkvsc_do_inquiry(struct block_device_context
*blkdev
)
522 struct blkvsc_request
*blkvsc_req
=NULL
;
523 struct page
*page_buf
;
525 unsigned char device_type
;
527 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_inquiry()\n");
529 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
535 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
536 page_buf
= alloc_page(GFP_KERNEL
);
539 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
543 init_waitqueue_head(&blkvsc_req
->wevent
);
544 blkvsc_req
->dev
= blkdev
;
545 blkvsc_req
->req
= NULL
;
546 blkvsc_req
->write
= 0;
548 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
549 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
550 blkvsc_req
->request
.DataBuffer
.Length
= 64;
552 blkvsc_req
->cmnd
[0] = INQUIRY
;
553 blkvsc_req
->cmnd
[1] = 0x1; /* Get product data */
554 blkvsc_req
->cmnd
[2] = 0x83; /* mode page 83 */
555 blkvsc_req
->cmnd
[4] = 64;
556 blkvsc_req
->cmd_len
= 6;
558 /* Set this here since the completion routine may be invoked and completed before we return */
561 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
563 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n", blkvsc_req
, blkvsc_req
->cond
);
565 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
567 buf
= kmap(page_buf
);
569 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
571 device_type
= buf
[0] & 0x1F;
573 if (device_type
== 0x0)
575 blkdev
->device_type
= HARDDISK_TYPE
;
577 else if (device_type
== 0x5)
579 blkdev
->device_type
= DVD_TYPE
;
583 /* TODO: this is currently unsupported device type */
584 blkdev
->device_type
= UNKNOWN_DEV_TYPE
;
587 DPRINT_DBG(BLKVSC_DRV
, "device type %d \n", device_type
);
589 blkdev
->device_id_len
= buf
[7];
590 if (blkdev
->device_id_len
> 64)
591 blkdev
->device_id_len
= 64;
593 memcpy(blkdev
->device_id
, &buf
[8], blkdev
->device_id_len
);
594 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
595 * blkdev->device_id_len); */
599 __free_page(page_buf
);
601 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
606 /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
607 static int blkvsc_do_read_capacity(struct block_device_context
*blkdev
)
609 struct blkvsc_request
*blkvsc_req
=NULL
;
610 struct page
*page_buf
;
612 struct scsi_sense_hdr sense_hdr
;
614 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_read_capacity()\n");
616 blkdev
->sector_size
= 0;
617 blkdev
->capacity
= 0;
618 blkdev
->media_not_present
= 0; /* assume a disk is present */
620 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
626 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
627 page_buf
= alloc_page(GFP_KERNEL
);
630 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
634 init_waitqueue_head(&blkvsc_req
->wevent
);
635 blkvsc_req
->dev
= blkdev
;
636 blkvsc_req
->req
= NULL
;
637 blkvsc_req
->write
= 0;
639 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
640 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
641 blkvsc_req
->request
.DataBuffer
.Length
= 8;
643 blkvsc_req
->cmnd
[0] = READ_CAPACITY
;
644 blkvsc_req
->cmd_len
= 16;
647 * Set this here since the completion routine may be invoked
648 * and completed before we return
652 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
654 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n", blkvsc_req
, blkvsc_req
->cond
);
656 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
659 if (blkvsc_req
->request
.Status
)
661 scsi_normalize_sense(blkvsc_req
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
663 if (sense_hdr
.asc
== 0x3A) /* Medium not present */
665 blkdev
->media_not_present
= 1;
670 buf
= kmap(page_buf
);
673 blkdev
->capacity
= ((buf
[0] << 24) | (buf
[1] << 16) | (buf
[2] << 8) | buf
[3]) + 1;
674 blkdev
->sector_size
= (buf
[4] << 24) | (buf
[5] << 16) | (buf
[6] << 8) | buf
[7];
678 __free_page(page_buf
);
680 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
686 static int blkvsc_do_read_capacity16(struct block_device_context
*blkdev
)
688 struct blkvsc_request
*blkvsc_req
=NULL
;
689 struct page
*page_buf
;
691 struct scsi_sense_hdr sense_hdr
;
693 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_read_capacity16()\n");
695 blkdev
->sector_size
= 0;
696 blkdev
->capacity
= 0;
697 blkdev
->media_not_present
= 0; /* assume a disk is present */
699 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
705 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
706 page_buf
= alloc_page(GFP_KERNEL
);
709 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
713 init_waitqueue_head(&blkvsc_req
->wevent
);
714 blkvsc_req
->dev
= blkdev
;
715 blkvsc_req
->req
= NULL
;
716 blkvsc_req
->write
= 0;
718 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
719 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
720 blkvsc_req
->request
.DataBuffer
.Length
= 12;
722 blkvsc_req
->cmnd
[0] = 0x9E; /* READ_CAPACITY16; */
723 blkvsc_req
->cmd_len
= 16;
726 * Set this here since the completion routine may be invoked
727 * and completed before we return
731 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
733 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n", blkvsc_req
, blkvsc_req
->cond
);
735 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
738 if (blkvsc_req
->request
.Status
)
740 scsi_normalize_sense(blkvsc_req
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
742 if (sense_hdr
.asc
== 0x3A) /* Medium not present */
744 blkdev
->media_not_present
= 1;
749 buf
= kmap(page_buf
);
752 blkdev
->capacity
= be64_to_cpu(*(unsigned long long*) &buf
[0]) + 1;
753 blkdev
->sector_size
= be32_to_cpu(*(unsigned int*)&buf
[8]);
755 /* blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; */
756 /* blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; */
760 __free_page(page_buf
);
762 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
769 Name: blkvsc_remove()
771 Desc: Callback when our device is removed
774 static int blkvsc_remove(struct device
*device
)
778 struct driver_context
*driver_ctx
= driver_to_driver_context(device
->driver
);
779 struct blkvsc_driver_context
*blkvsc_drv_ctx
= (struct blkvsc_driver_context
*)driver_ctx
;
780 STORVSC_DRIVER_OBJECT
* storvsc_drv_obj
= &blkvsc_drv_ctx
->drv_obj
;
782 struct device_context
*device_ctx
= device_to_device_context(device
);
783 struct hv_device
*device_obj
= &device_ctx
->device_obj
;
784 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
787 DPRINT_ENTER(BLKVSC_DRV
);
789 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_remove()\n");
791 if (!storvsc_drv_obj
->Base
.OnDeviceRemove
)
793 DPRINT_EXIT(BLKVSC_DRV
);
797 /* Call to the vsc driver to let it know that the device is being removed */
798 ret
= storvsc_drv_obj
->Base
.OnDeviceRemove(device_obj
);
802 DPRINT_ERR(BLKVSC_DRV
, "unable to remove blkvsc device (ret %d)", ret
);
805 /* Get to a known state */
806 spin_lock_irqsave(&blkdev
->lock
, flags
);
808 blkdev
->shutting_down
= 1;
810 blk_stop_queue(blkdev
->gd
->queue
);
812 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
814 while (blkdev
->num_outstanding_reqs
)
816 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...", blkdev
->num_outstanding_reqs
);
821 blkvsc_do_flush(blkdev
);
823 spin_lock_irqsave(&blkdev
->lock
, flags
);
825 blkvsc_cancel_pending_reqs(blkdev
);
827 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
829 blk_cleanup_queue(blkdev
->gd
->queue
);
831 del_gendisk(blkdev
->gd
);
833 kmem_cache_destroy(blkdev
->request_pool
);
837 DPRINT_EXIT(BLKVSC_DRV
);
842 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
)
844 ASSERT(blkvsc_req
->req
);
845 ASSERT(blkvsc_req
->sector_count
<= (MAX_MULTIPAGE_BUFFER_COUNT
*8));
847 blkvsc_req
->cmd_len
= 16;
849 if (blkvsc_req
->sector_start
> 0xffffffff)
851 if (rq_data_dir(blkvsc_req
->req
))
853 blkvsc_req
->write
= 1;
854 blkvsc_req
->cmnd
[0] = WRITE_16
;
858 blkvsc_req
->write
= 0;
859 blkvsc_req
->cmnd
[0] = READ_16
;
862 blkvsc_req
->cmnd
[1] |= blk_fua_rq(blkvsc_req
->req
) ? 0x8 : 0;
864 *(unsigned long long*)&blkvsc_req
->cmnd
[2] = cpu_to_be64(blkvsc_req
->sector_start
);
865 *(unsigned int*)&blkvsc_req
->cmnd
[10] = cpu_to_be32(blkvsc_req
->sector_count
);
867 else if ((blkvsc_req
->sector_count
> 0xff) || (blkvsc_req
->sector_start
> 0x1fffff))
869 if (rq_data_dir(blkvsc_req
->req
))
871 blkvsc_req
->write
= 1;
872 blkvsc_req
->cmnd
[0] = WRITE_10
;
876 blkvsc_req
->write
= 0;
877 blkvsc_req
->cmnd
[0] = READ_10
;
880 blkvsc_req
->cmnd
[1] |= blk_fua_rq(blkvsc_req
->req
) ? 0x8 : 0;
882 *(unsigned int *)&blkvsc_req
->cmnd
[2] = cpu_to_be32(blkvsc_req
->sector_start
);
883 *(unsigned short*)&blkvsc_req
->cmnd
[7] = cpu_to_be16(blkvsc_req
->sector_count
);
887 if (rq_data_dir(blkvsc_req
->req
))
889 blkvsc_req
->write
= 1;
890 blkvsc_req
->cmnd
[0] = WRITE_6
;
894 blkvsc_req
->write
= 0;
895 blkvsc_req
->cmnd
[0] = READ_6
;
898 *(unsigned int *)&blkvsc_req
->cmnd
[1] = cpu_to_be32(blkvsc_req
->sector_start
) >> 8;
899 blkvsc_req
->cmnd
[1] &= 0x1f;
900 blkvsc_req
->cmnd
[4] = (unsigned char) blkvsc_req
->sector_count
;
904 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
, void (*request_completion
)(struct hv_storvsc_request
*) )
906 struct block_device_context
*blkdev
= blkvsc_req
->dev
;
907 struct device_context
*device_ctx
=blkdev
->device_ctx
;
908 struct driver_context
*driver_ctx
= driver_to_driver_context(device_ctx
->device
.driver
);
909 struct blkvsc_driver_context
*blkvsc_drv_ctx
= (struct blkvsc_driver_context
*)driver_ctx
;
910 STORVSC_DRIVER_OBJECT
* storvsc_drv_obj
= &blkvsc_drv_ctx
->drv_obj
;
913 struct hv_storvsc_request
*storvsc_req
;
915 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_submit_request() - req %p type %s start_sector %lu count %ld offset %d len %d\n",
917 (blkvsc_req
->write
)?"WRITE":"READ",
918 (unsigned long) blkvsc_req
->sector_start
,
919 blkvsc_req
->sector_count
,
920 blkvsc_req
->request
.DataBuffer
.Offset
,
921 blkvsc_req
->request
.DataBuffer
.Length
);
923 /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++)
925 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n",
928 blkvsc_req->request.DataBuffer.PfnArray[i]);
931 storvsc_req
= &blkvsc_req
->request
;
932 storvsc_req
->Extension
= (void*)((unsigned long)blkvsc_req
+ sizeof(struct blkvsc_request
));
934 storvsc_req
->Type
= blkvsc_req
->write
? WRITE_TYPE
: READ_TYPE
;
936 storvsc_req
->OnIOCompletion
= request_completion
;
937 storvsc_req
->Context
= blkvsc_req
;
939 storvsc_req
->Host
= blkdev
->port
;
940 storvsc_req
->Bus
= blkdev
->path
;
941 storvsc_req
->TargetId
= blkdev
->target
;
942 storvsc_req
->LunId
= 0; /* this is not really used at all */
944 storvsc_req
->CdbLen
= blkvsc_req
->cmd_len
;
945 storvsc_req
->Cdb
= blkvsc_req
->cmnd
;
947 storvsc_req
->SenseBuffer
= blkvsc_req
->sense_buffer
;
948 storvsc_req
->SenseBufferSize
= SCSI_SENSE_BUFFERSIZE
;
950 ret
= storvsc_drv_obj
->OnIORequest(&blkdev
->device_ctx
->device_obj
, &blkvsc_req
->request
);
953 blkdev
->num_outstanding_reqs
++;
961 * We break the request into 1 or more blkvsc_requests and submit
962 * them. If we cant submit them all, we put them on the
963 * pending_list. The blkvsc_request() will work on the pending_list.
966 static int blkvsc_do_request(struct block_device_context
*blkdev
, struct request
*req
)
968 struct bio
*bio
=NULL
;
969 struct bio_vec
*bvec
=NULL
;
970 struct bio_vec
*prev_bvec
=NULL
;
972 struct blkvsc_request
*blkvsc_req
=NULL
;
973 struct blkvsc_request
*tmp
;
977 sector_t start_sector
;
978 unsigned long num_sectors
= 0;
981 struct blkvsc_request_group
*group
=NULL
;
983 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p req %p sect %lu \n", blkdev
, req
, (unsigned long) blk_rq_pos(req
));
985 /* Create a group to tie req to list of blkvsc_reqs */
986 group
= (struct blkvsc_request_group
*)kmem_cache_alloc(blkdev
->request_pool
, GFP_ATOMIC
);
992 INIT_LIST_HEAD(&group
->blkvsc_req_list
);
993 group
->outstanding
= group
->status
= 0;
995 start_sector
= blk_rq_pos(req
);
997 /* foreach bio in the request */
999 for (bio
= req
->bio
; bio
; bio
= bio
->bi_next
)
1001 /* Map this bio into an existing or new storvsc request */
1002 bio_for_each_segment (bvec
, bio
, seg_idx
)
1004 DPRINT_DBG(BLKVSC_DRV
, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
1005 req
, bio
, bvec
, seg_idx
, databuf_idx
);
1007 /* Get a new storvsc request */
1008 if ( (!blkvsc_req
) || /* 1st-time */
1009 (databuf_idx
>= MAX_MULTIPAGE_BUFFER_COUNT
) ||
1010 (bvec
->bv_offset
!= 0) || /* hole at the begin of page */
1011 (prev_bvec
&& (prev_bvec
->bv_len
!= PAGE_SIZE
)) ) /* hold at the end of page */
1013 /* submit the prev one */
1016 blkvsc_req
->sector_start
= start_sector
;
1017 sector_div(blkvsc_req
->sector_start
, (blkdev
->sector_size
>> 9));
1019 blkvsc_req
->sector_count
= num_sectors
/ (blkdev
->sector_size
>> 9);
1021 blkvsc_init_rw(blkvsc_req
);
1024 /* Create new blkvsc_req to represent the current bvec */
1025 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_ATOMIC
);
1028 /* free up everything */
1029 list_for_each_entry_safe(blkvsc_req
, tmp
, &group
->blkvsc_req_list
, req_entry
)
1031 list_del(&blkvsc_req
->req_entry
);
1032 kmem_cache_free(blkdev
->request_pool
, blkvsc_req
);
1035 kmem_cache_free(blkdev
->request_pool
, group
);
1039 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
1041 blkvsc_req
->dev
= blkdev
;
1042 blkvsc_req
->req
= req
;
1043 blkvsc_req
->request
.DataBuffer
.Offset
= bvec
->bv_offset
;
1044 blkvsc_req
->request
.DataBuffer
.Length
= 0;
1046 /* Add to the group */
1047 blkvsc_req
->group
= group
;
1048 blkvsc_req
->group
->outstanding
++;
1049 list_add_tail(&blkvsc_req
->req_entry
, &blkvsc_req
->group
->blkvsc_req_list
);
1051 start_sector
+= num_sectors
;
1056 /* Add the curr bvec/segment to the curr blkvsc_req */
1057 blkvsc_req
->request
.DataBuffer
.PfnArray
[databuf_idx
] = page_to_pfn(bvec
->bv_page
);
1058 blkvsc_req
->request
.DataBuffer
.Length
+= bvec
->bv_len
;
1063 num_sectors
+= bvec
->bv_len
>> 9;
1065 } /* bio_for_each_segment */
1067 } /* rq_for_each_bio */
1069 /* Handle the last one */
1072 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p req %p group %p count %d\n", blkdev
, req
, blkvsc_req
->group
, blkvsc_req
->group
->outstanding
);
1074 blkvsc_req
->sector_start
= start_sector
;
1075 sector_div(blkvsc_req
->sector_start
, (blkdev
->sector_size
>> 9));
1077 blkvsc_req
->sector_count
= num_sectors
/ (blkdev
->sector_size
>> 9);
1079 blkvsc_init_rw(blkvsc_req
);
1082 list_for_each_entry(blkvsc_req
, &group
->blkvsc_req_list
, req_entry
)
1086 DPRINT_DBG(BLKVSC_DRV
, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld)\n",
1087 blkvsc_req
, (unsigned long)blkvsc_req
->sector_start
, blkvsc_req
->sector_count
, (unsigned long) start_sector
, (unsigned long) num_sectors
);
1089 list_add_tail(&blkvsc_req
->pend_entry
, &blkdev
->pending_list
);
1093 ret
= blkvsc_submit_request(blkvsc_req
, blkvsc_request_completion
);
1097 list_add_tail(&blkvsc_req
->pend_entry
, &blkdev
->pending_list
);
1100 DPRINT_DBG(BLKVSC_DRV
, "submitted blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld) ret %d\n",
1101 blkvsc_req
, (unsigned long) blkvsc_req
->sector_start
, blkvsc_req
->sector_count
, (unsigned long) start_sector
, num_sectors
, ret
);
1108 static void blkvsc_cmd_completion(struct hv_storvsc_request
*request
)
1110 struct blkvsc_request
*blkvsc_req
=(struct blkvsc_request
*)request
->Context
;
1111 struct block_device_context
*blkdev
= (struct block_device_context
*)blkvsc_req
->dev
;
1113 struct scsi_sense_hdr sense_hdr
;
1115 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cmd_completion() - req %p\n", blkvsc_req
);
1117 blkdev
->num_outstanding_reqs
--;
1119 if (blkvsc_req
->request
.Status
)
1121 if (scsi_normalize_sense(blkvsc_req
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
, &sense_hdr
))
1123 scsi_print_sense_hdr("blkvsc", &sense_hdr
);
1127 blkvsc_req
->cond
=1;
1128 wake_up_interruptible(&blkvsc_req
->wevent
);
1131 static void blkvsc_request_completion(struct hv_storvsc_request
*request
)
1133 struct blkvsc_request
*blkvsc_req
=(struct blkvsc_request
*)request
->Context
;
1134 struct block_device_context
*blkdev
= (struct block_device_context
*)blkvsc_req
->dev
;
1135 unsigned long flags
;
1136 struct blkvsc_request
*comp_req
, *tmp
;
1138 ASSERT(blkvsc_req
->group
);
1140 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p blkvsc_req %p group %p type %s sect_start %lu sect_count %ld len %d group outstd %d total outstd %d\n",
1144 (blkvsc_req
->write
)?"WRITE":"READ",
1145 (unsigned long) blkvsc_req
->sector_start
,
1146 blkvsc_req
->sector_count
,
1147 blkvsc_req
->request
.DataBuffer
.Length
,
1148 blkvsc_req
->group
->outstanding
,
1149 blkdev
->num_outstanding_reqs
);
1151 spin_lock_irqsave(&blkdev
->lock
, flags
);
1153 blkdev
->num_outstanding_reqs
--;
1154 blkvsc_req
->group
->outstanding
--;
1157 * Only start processing when all the blkvsc_reqs are
1158 * completed. This guarantees no out-of-order blkvsc_req
1159 * completion when calling end_that_request_first()
1161 if (blkvsc_req
->group
->outstanding
== 0)
1163 list_for_each_entry_safe(comp_req
, tmp
, &blkvsc_req
->group
->blkvsc_req_list
, req_entry
)
1165 DPRINT_DBG(BLKVSC_DRV
, "completing blkvsc_req %p sect_start %lu sect_count %ld \n",
1167 (unsigned long) comp_req
->sector_start
,
1168 comp_req
->sector_count
);
1170 list_del(&comp_req
->req_entry
);
1172 if (!__blk_end_request(
1174 (!comp_req
->request
.Status
? 0: -EIO
),
1175 comp_req
->sector_count
* blkdev
->sector_size
))
1177 /* All the sectors have been xferred ie the request is done */
1178 DPRINT_DBG(BLKVSC_DRV
, "req %p COMPLETED\n", comp_req
->req
);
1179 kmem_cache_free(blkdev
->request_pool
, comp_req
->group
);
1182 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1185 if (!blkdev
->shutting_down
)
1187 blkvsc_do_pending_reqs(blkdev
);
1188 blk_start_queue(blkdev
->gd
->queue
);
1189 blkvsc_request(blkdev
->gd
->queue
);
1193 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
1196 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
)
1198 struct blkvsc_request
*pend_req
, *tmp
;
1199 struct blkvsc_request
*comp_req
, *tmp2
;
1203 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cancel_pending_reqs()");
1205 /* Flush the pending list first */
1206 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
, pend_entry
)
1209 * The pend_req could be part of a partially completed
1210 * request. If so, complete those req first until we
1213 list_for_each_entry_safe(comp_req
, tmp2
, &pend_req
->group
->blkvsc_req_list
, req_entry
)
1215 DPRINT_DBG(BLKVSC_DRV
, "completing blkvsc_req %p sect_start %lu sect_count %ld \n",
1217 (unsigned long) comp_req
->sector_start
,
1218 comp_req
->sector_count
);
1220 if (comp_req
== pend_req
)
1223 list_del(&comp_req
->req_entry
);
1227 ret
= __blk_end_request(
1229 (!comp_req
->request
.Status
? 0 : -EIO
),
1230 comp_req
->sector_count
* blkdev
->sector_size
);
1234 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1237 DPRINT_DBG(BLKVSC_DRV
, "cancelling pending request - %p\n", pend_req
);
1239 list_del(&pend_req
->pend_entry
);
1241 list_del(&pend_req
->req_entry
);
1245 if (!__blk_end_request(
1248 pend_req
->sector_count
* blkdev
->sector_size
))
1250 /* All the sectors have been xferred ie the request is done */
1251 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req
->req
);
1252 kmem_cache_free(blkdev
->request_pool
, pend_req
->group
);
1256 kmem_cache_free(blkdev
->request_pool
, pend_req
);
1262 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
)
1264 struct blkvsc_request
*pend_req
, *tmp
;
1267 /* Flush the pending list first */
1268 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
, pend_entry
)
1270 DPRINT_DBG(BLKVSC_DRV
, "working off pending_list - %p\n", pend_req
);
1272 ret
= blkvsc_submit_request(pend_req
, blkvsc_request_completion
);
1279 list_del(&pend_req
->pend_entry
);
1286 static void blkvsc_request(struct request_queue
*queue
)
1288 struct block_device_context
*blkdev
= NULL
;
1289 struct request
*req
;
1292 DPRINT_DBG(BLKVSC_DRV
, "- enter \n");
1293 while ((req
= blk_peek_request(queue
)) != NULL
)
1295 DPRINT_DBG(BLKVSC_DRV
, "- req %p\n", req
);
1297 blkdev
= req
->rq_disk
->private_data
;
1298 if (blkdev
->shutting_down
|| !blk_fs_request(req
) || blkdev
->media_not_present
) {
1299 __blk_end_request_cur(req
, 0);
1303 ret
= blkvsc_do_pending_reqs(blkdev
);
1307 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - pending_list not empty\n");
1308 blk_stop_queue(queue
);
1312 blk_start_request(req
);
1314 ret
= blkvsc_do_request(blkdev
, req
);
1317 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - no room\n");
1318 blk_stop_queue(queue
);
1323 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - no mem\n");
1324 blk_requeue_request(queue
, req
);
1325 blk_stop_queue(queue
);
1331 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
)
1333 struct block_device_context
*blkdev
= bdev
->bd_disk
->private_data
;
1335 DPRINT_DBG(BLKVSC_DRV
, "- users %d disk %s\n", blkdev
->users
, blkdev
->gd
->disk_name
);
1337 spin_lock(&blkdev
->lock
);
1339 if (!blkdev
->users
&& blkdev
->device_type
== DVD_TYPE
)
1341 spin_unlock(&blkdev
->lock
);
1342 check_disk_change(bdev
);
1343 spin_lock(&blkdev
->lock
);
1348 spin_unlock(&blkdev
->lock
);
1352 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
)
1354 struct block_device_context
*blkdev
= disk
->private_data
;
1356 DPRINT_DBG(BLKVSC_DRV
, "- users %d disk %s\n", blkdev
->users
, blkdev
->gd
->disk_name
);
1358 spin_lock(&blkdev
->lock
);
1359 if (blkdev
->users
== 1)
1361 spin_unlock(&blkdev
->lock
);
1362 blkvsc_do_flush(blkdev
);
1363 spin_lock(&blkdev
->lock
);
1368 spin_unlock(&blkdev
->lock
);
1372 static int blkvsc_media_changed(struct gendisk
*gd
)
1374 DPRINT_DBG(BLKVSC_DRV
, "- enter\n");
1379 static int blkvsc_revalidate_disk(struct gendisk
*gd
)
1381 struct block_device_context
*blkdev
= gd
->private_data
;
1383 DPRINT_DBG(BLKVSC_DRV
, "- enter\n");
1385 if (blkdev
->device_type
== DVD_TYPE
)
1387 blkvsc_do_read_capacity(blkdev
);
1388 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
1389 blk_queue_logical_block_size(gd
->queue
, blkdev
->sector_size
);
1394 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
1396 sector_t total_sectors
= get_capacity(bd
->bd_disk
);
1397 sector_t cylinder_times_heads
=0;
1400 int sectors_per_track
=0;
1405 if (total_sectors
> (65535 * 16 * 255)) {
1406 total_sectors
= (65535 * 16 * 255);
1409 if (total_sectors
>= (65535 * 16 * 63)) {
1410 sectors_per_track
= 255;
1413 cylinder_times_heads
= total_sectors
;
1414 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1418 sectors_per_track
= 17;
1420 cylinder_times_heads
= total_sectors
;
1421 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1423 temp
= cylinder_times_heads
+ 1023;
1424 rem
= sector_div(temp
, 1024); /* sector_div stores the quotient in temp */
1432 if (cylinder_times_heads
>= (heads
* 1024) || (heads
> 16)) {
1433 sectors_per_track
= 31;
1436 cylinder_times_heads
= total_sectors
;
1437 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1440 if (cylinder_times_heads
>= (heads
* 1024)) {
1441 sectors_per_track
= 63;
1444 cylinder_times_heads
= total_sectors
;
1445 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1449 temp
= cylinder_times_heads
;
1450 rem
= sector_div(temp
, heads
); /* sector_div stores the quotient in temp */
1454 hg
->sectors
= sectors_per_track
;
1455 hg
->cylinders
= cylinders
;
1457 DPRINT_INFO(BLKVSC_DRV
, "CHS (%d, %d, %d)", cylinders
, heads
, sectors_per_track
);
1462 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
1463 unsigned cmd
, unsigned long argument
)
1465 /* struct block_device_context *blkdev = bd->bd_disk->private_data; */
1470 /* TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just */
1471 /* a GUID. Commented it out for now. */
1472 /*case HDIO_GET_IDENTITY:
1473 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1475 if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len))
1490 MODULE_LICENSE("GPL");
1492 static int __init
blkvsc_init(void)
1496 ASSERT(sizeof(sector_t
) == 8); /* Make sure CONFIG_LBD is set */
1498 DPRINT_ENTER(BLKVSC_DRV
);
1500 DPRINT_INFO(BLKVSC_DRV
, "Blkvsc initializing....");
1502 ret
= blkvsc_drv_init(BlkVscInitialize
);
1504 DPRINT_EXIT(BLKVSC_DRV
);
1509 static void __exit
blkvsc_exit(void)
1511 DPRINT_ENTER(BLKVSC_DRV
);
1515 DPRINT_ENTER(BLKVSC_DRV
);
1518 module_param(blkvsc_ringbuffer_size
, int, S_IRUGO
);
1520 module_init(blkvsc_init
);
1521 module_exit(blkvsc_exit
);