3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Hank Janssen <hjanssen@microsoft.com>
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/blkdev.h>
27 #include <linux/major.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_dbg.h>
36 #include "include/logging.h"
37 #include "include/vmbus.h"
39 #include "include/StorVscApi.h"
44 #define BLKVSC_MINORS 64
49 enum blkvsc_device_type
{
56 * This request ties the struct request and struct
57 * blkvsc_request/STORVSC_REQUEST together A struct request may be
58 * represented by 1 or more struct blkvsc_request
60 struct blkvsc_request_group
{
64 struct list_head blkvsc_req_list
; /* list of blkvsc_requests */
68 struct blkvsc_request
{
69 struct list_head req_entry
; /* blkvsc_request_group.blkvsc_req_list */
71 struct list_head pend_entry
; /* block_device_context.pending_list */
73 struct request
*req
; /* This may be null if we generate a request internally */
74 struct block_device_context
*dev
;
75 struct blkvsc_request_group
*group
; /* The group this request is part of. Maybe null */
77 wait_queue_head_t wevent
;
81 sector_t sector_start
;
82 unsigned long sector_count
;
84 unsigned char sense_buffer
[SCSI_SENSE_BUFFERSIZE
];
85 unsigned char cmd_len
;
86 unsigned char cmnd
[MAX_COMMAND_SIZE
];
88 STORVSC_REQUEST request
;
89 /* !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because - */
90 /* The extension buffer falls right here and is pointed to by request.Extension; */
93 /* Per device structure */
94 struct block_device_context
{
95 struct device_context
*device_ctx
; /* point back to our device context */
96 struct kmem_cache
*request_pool
;
99 enum blkvsc_device_type device_type
;
100 struct list_head pending_list
;
102 unsigned char device_id
[64];
103 unsigned int device_id_len
;
104 int num_outstanding_reqs
;
106 int media_not_present
;
107 unsigned int sector_size
;
111 unsigned char target
;
116 struct blkvsc_driver_context
{
117 /* !! These must be the first 2 fields !! */
118 struct driver_context drv_ctx
;
119 STORVSC_DRIVER_OBJECT drv_obj
;
123 static int blkvsc_probe(struct device
*dev
);
124 static int blkvsc_remove(struct device
*device
);
125 static void blkvsc_shutdown(struct device
*device
);
127 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
);
128 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
);
129 static int blkvsc_media_changed(struct gendisk
*gd
);
130 static int blkvsc_revalidate_disk(struct gendisk
*gd
);
131 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
);
132 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
133 unsigned cmd
, unsigned long argument
);
134 static void blkvsc_request(struct request_queue
*queue
);
135 static void blkvsc_request_completion(STORVSC_REQUEST
* request
);
136 static int blkvsc_do_request(struct block_device_context
*blkdev
, struct request
*req
);
137 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
, void (*request_completion
)(STORVSC_REQUEST
*) );
138 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
);
139 static void blkvsc_cmd_completion(STORVSC_REQUEST
* request
);
140 static int blkvsc_do_inquiry(struct block_device_context
*blkdev
);
141 static int blkvsc_do_read_capacity(struct block_device_context
*blkdev
);
142 static int blkvsc_do_read_capacity16(struct block_device_context
*blkdev
);
143 static int blkvsc_do_flush(struct block_device_context
*blkdev
);
144 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
);
145 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
);
148 static int blkvsc_ringbuffer_size
= BLKVSC_RING_BUFFER_SIZE
;
150 /* The one and only one */
151 static struct blkvsc_driver_context g_blkvsc_drv
;
154 static struct block_device_operations block_ops
=
156 .owner
= THIS_MODULE
,
158 .release
= blkvsc_release
,
159 .media_changed
= blkvsc_media_changed
,
160 .revalidate_disk
= blkvsc_revalidate_disk
,
161 .getgeo
= blkvsc_getgeo
,
162 .ioctl
= blkvsc_ioctl
,
167 Name: blkvsc_drv_init()
169 Desc: BlkVsc driver initialization.
172 int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init
)
175 STORVSC_DRIVER_OBJECT
*storvsc_drv_obj
=&g_blkvsc_drv
.drv_obj
;
176 struct driver_context
*drv_ctx
=&g_blkvsc_drv
.drv_ctx
;
178 DPRINT_ENTER(BLKVSC_DRV
);
180 vmbus_get_interface(&storvsc_drv_obj
->Base
.VmbusChannelInterface
);
182 storvsc_drv_obj
->RingBufferSize
= blkvsc_ringbuffer_size
;
184 /* Callback to client driver to complete the initialization */
185 pfn_drv_init(&storvsc_drv_obj
->Base
);
187 drv_ctx
->driver
.name
= storvsc_drv_obj
->Base
.name
;
188 memcpy(&drv_ctx
->class_id
, &storvsc_drv_obj
->Base
.deviceType
, sizeof(GUID
));
190 drv_ctx
->probe
= blkvsc_probe
;
191 drv_ctx
->remove
= blkvsc_remove
;
192 drv_ctx
->shutdown
= blkvsc_shutdown
;
194 /* The driver belongs to vmbus */
195 ret
= vmbus_child_driver_register(drv_ctx
);
197 DPRINT_EXIT(BLKVSC_DRV
);
203 static int blkvsc_drv_exit_cb(struct device
*dev
, void *data
)
205 struct device
**curr
= (struct device
**)data
;
207 return 1; /* stop iterating */
212 Name: blkvsc_drv_exit()
217 void blkvsc_drv_exit(void)
219 STORVSC_DRIVER_OBJECT
*storvsc_drv_obj
=&g_blkvsc_drv
.drv_obj
;
220 struct driver_context
*drv_ctx
=&g_blkvsc_drv
.drv_ctx
;
222 struct device
*current_dev
=NULL
;
224 DPRINT_ENTER(BLKVSC_DRV
);
231 driver_for_each_device(&drv_ctx
->driver
, NULL
, (void*)¤t_dev
, blkvsc_drv_exit_cb
);
233 if (current_dev
== NULL
)
236 /* Initiate removal from the top-down */
237 device_unregister(current_dev
);
240 if (storvsc_drv_obj
->Base
.OnCleanup
)
241 storvsc_drv_obj
->Base
.OnCleanup(&storvsc_drv_obj
->Base
);
243 vmbus_child_driver_unregister(drv_ctx
);
245 DPRINT_EXIT(BLKVSC_DRV
);
254 Desc: Add a new device for this driver
257 static int blkvsc_probe(struct device
*device
)
261 struct driver_context
*driver_ctx
= driver_to_driver_context(device
->driver
);
262 struct blkvsc_driver_context
*blkvsc_drv_ctx
= (struct blkvsc_driver_context
*)driver_ctx
;
263 STORVSC_DRIVER_OBJECT
* storvsc_drv_obj
= &blkvsc_drv_ctx
->drv_obj
;
265 struct device_context
*device_ctx
= device_to_device_context(device
);
266 struct hv_device
*device_obj
= &device_ctx
->device_obj
;
268 struct block_device_context
*blkdev
=NULL
;
269 STORVSC_DEVICE_INFO device_info
;
273 static int ide0_registered
=0;
274 static int ide1_registered
=0;
276 DPRINT_ENTER(BLKVSC_DRV
);
278 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_probe - enter");
280 if (!storvsc_drv_obj
->Base
.OnDeviceAdd
)
282 DPRINT_ERR(BLKVSC_DRV
, "OnDeviceAdd() not set");
288 blkdev
= kzalloc(sizeof(struct block_device_context
), GFP_KERNEL
);
295 INIT_LIST_HEAD(&blkdev
->pending_list
);
297 /* Initialize what we can here */
298 spin_lock_init(&blkdev
->lock
);
300 ASSERT(sizeof(struct blkvsc_request_group
) <= sizeof(struct blkvsc_request
));
302 blkdev
->request_pool
= kmem_cache_create(dev_name(&device_ctx
->device
),
303 sizeof(struct blkvsc_request
) + storvsc_drv_obj
->RequestExtSize
, 0,
304 SLAB_HWCACHE_ALIGN
, NULL
);
305 if (!blkdev
->request_pool
)
312 /* Call to the vsc driver to add the device */
313 ret
= storvsc_drv_obj
->Base
.OnDeviceAdd(device_obj
, &device_info
);
316 DPRINT_ERR(BLKVSC_DRV
, "unable to add blkvsc device");
320 blkdev
->device_ctx
= device_ctx
;
321 blkdev
->target
= device_info
.TargetId
; /* this identified the device 0 or 1 */
322 blkdev
->path
= device_info
.PathId
; /* this identified the ide ctrl 0 or 1 */
324 dev_set_drvdata(device
, blkdev
);
326 /* Calculate the major and device num */
327 if (blkdev
->path
== 0)
330 devnum
= blkdev
->path
+ blkdev
->target
; /* 0 or 1 */
332 if (!ide0_registered
)
334 ret
= register_blkdev(major
, "ide");
337 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
344 else if (blkdev
->path
== 1)
347 devnum
= blkdev
->path
+ blkdev
->target
+ 1; /* 2 or 3 */
349 if (!ide1_registered
)
351 ret
= register_blkdev(major
, "ide");
354 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
364 DPRINT_ERR(BLKVSC_DRV
, "invalid pathid");
369 DPRINT_INFO(BLKVSC_DRV
, "blkvsc registered for major %d!!", major
);
371 blkdev
->gd
= alloc_disk(BLKVSC_MINORS
);
374 DPRINT_ERR(BLKVSC_DRV
, "register_blkdev() failed! ret %d", ret
);
379 blkdev
->gd
->queue
= blk_init_queue(blkvsc_request
, &blkdev
->lock
);
381 blk_queue_max_segment_size(blkdev
->gd
->queue
, PAGE_SIZE
);
382 blk_queue_max_phys_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
383 blk_queue_max_hw_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
384 blk_queue_segment_boundary(blkdev
->gd
->queue
, PAGE_SIZE
-1);
385 blk_queue_bounce_limit(blkdev
->gd
->queue
, BLK_BOUNCE_ANY
);
386 blk_queue_dma_alignment(blkdev
->gd
->queue
, 511);
388 blkdev
->gd
->major
= major
;
389 if (devnum
== 1 || devnum
== 3)
390 blkdev
->gd
->first_minor
= BLKVSC_MINORS
;
392 blkdev
->gd
->first_minor
= 0;
393 blkdev
->gd
->fops
= &block_ops
;
394 blkdev
->gd
->private_data
= blkdev
;
395 sprintf(blkdev
->gd
->disk_name
, "hd%c", 'a'+ devnum
);
397 blkvsc_do_inquiry(blkdev
);
398 if (blkdev
->device_type
== DVD_TYPE
)
400 set_disk_ro(blkdev
->gd
, 1);
401 blkdev
->gd
->flags
|= GENHD_FL_REMOVABLE
;
402 blkvsc_do_read_capacity(blkdev
);
406 blkvsc_do_read_capacity16(blkdev
);
409 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
410 blk_queue_logical_block_size(blkdev
->gd
->queue
, blkdev
->sector_size
);
412 add_disk(blkdev
->gd
);
414 DPRINT_INFO(BLKVSC_DRV
, "%s added!! capacity %lu sector_size %d", blkdev
->gd
->disk_name
, (unsigned long) blkdev
->capacity
, blkdev
->sector_size
);
419 storvsc_drv_obj
->Base
.OnDeviceRemove(device_obj
);
424 if (blkdev
->request_pool
)
426 kmem_cache_destroy(blkdev
->request_pool
);
427 blkdev
->request_pool
= NULL
;
433 DPRINT_EXIT(BLKVSC_DRV
);
438 static void blkvsc_shutdown(struct device
*device
)
440 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
446 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_shutdown - users %d disk %s\n", blkdev
->users
, blkdev
->gd
->disk_name
);
448 spin_lock_irqsave(&blkdev
->lock
, flags
);
450 blkdev
->shutting_down
= 1;
452 blk_stop_queue(blkdev
->gd
->queue
);
454 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
456 while (blkdev
->num_outstanding_reqs
)
458 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...", blkdev
->num_outstanding_reqs
);
463 blkvsc_do_flush(blkdev
);
465 spin_lock_irqsave(&blkdev
->lock
, flags
);
467 blkvsc_cancel_pending_reqs(blkdev
);
469 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
472 static int blkvsc_do_flush(struct block_device_context
*blkdev
)
474 struct blkvsc_request
*blkvsc_req
=NULL
;
476 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_flush()\n");
478 if (blkdev
->device_type
!= HARDDISK_TYPE
)
481 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
487 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
488 init_waitqueue_head(&blkvsc_req
->wevent
);
489 blkvsc_req
->dev
= blkdev
;
490 blkvsc_req
->req
= NULL
;
491 blkvsc_req
->write
= 0;
493 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = 0;
494 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
495 blkvsc_req
->request
.DataBuffer
.Length
= 0;
497 blkvsc_req
->cmnd
[0] = SYNCHRONIZE_CACHE
;
498 blkvsc_req
->cmd_len
= 10;
500 /* Set this here since the completion routine may be invoked and completed before we return */
502 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
504 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
506 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
511 /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
512 static int blkvsc_do_inquiry(struct block_device_context
*blkdev
)
514 struct blkvsc_request
*blkvsc_req
=NULL
;
515 struct page
*page_buf
;
517 unsigned char device_type
;
519 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_inquiry()\n");
521 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
527 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
528 page_buf
= alloc_page(GFP_KERNEL
);
531 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
535 init_waitqueue_head(&blkvsc_req
->wevent
);
536 blkvsc_req
->dev
= blkdev
;
537 blkvsc_req
->req
= NULL
;
538 blkvsc_req
->write
= 0;
540 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
541 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
542 blkvsc_req
->request
.DataBuffer
.Length
= 64;
544 blkvsc_req
->cmnd
[0] = INQUIRY
;
545 blkvsc_req
->cmnd
[1] = 0x1; /* Get product data */
546 blkvsc_req
->cmnd
[2] = 0x83; /* mode page 83 */
547 blkvsc_req
->cmnd
[4] = 64;
548 blkvsc_req
->cmd_len
= 6;
550 /* Set this here since the completion routine may be invoked and completed before we return */
553 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
555 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n", blkvsc_req
, blkvsc_req
->cond
);
557 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
559 buf
= kmap(page_buf
);
561 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
563 device_type
= buf
[0] & 0x1F;
565 if (device_type
== 0x0)
567 blkdev
->device_type
= HARDDISK_TYPE
;
569 else if (device_type
== 0x5)
571 blkdev
->device_type
= DVD_TYPE
;
575 /* TODO: this is currently unsupported device type */
576 blkdev
->device_type
= UNKNOWN_DEV_TYPE
;
579 DPRINT_DBG(BLKVSC_DRV
, "device type %d \n", device_type
);
581 blkdev
->device_id_len
= buf
[7];
582 if (blkdev
->device_id_len
> 64)
583 blkdev
->device_id_len
= 64;
585 memcpy(blkdev
->device_id
, &buf
[8], blkdev
->device_id_len
);
586 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
587 * blkdev->device_id_len); */
591 __free_page(page_buf
);
593 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
598 /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
599 static int blkvsc_do_read_capacity(struct block_device_context
*blkdev
)
601 struct blkvsc_request
*blkvsc_req
=NULL
;
602 struct page
*page_buf
;
604 struct scsi_sense_hdr sense_hdr
;
606 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_read_capacity()\n");
608 blkdev
->sector_size
= 0;
609 blkdev
->capacity
= 0;
610 blkdev
->media_not_present
= 0; /* assume a disk is present */
612 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
618 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
619 page_buf
= alloc_page(GFP_KERNEL
);
622 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
626 init_waitqueue_head(&blkvsc_req
->wevent
);
627 blkvsc_req
->dev
= blkdev
;
628 blkvsc_req
->req
= NULL
;
629 blkvsc_req
->write
= 0;
631 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
632 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
633 blkvsc_req
->request
.DataBuffer
.Length
= 8;
635 blkvsc_req
->cmnd
[0] = READ_CAPACITY
;
636 blkvsc_req
->cmd_len
= 16;
639 * Set this here since the completion routine may be invoked
640 * and completed before we return
644 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
646 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n", blkvsc_req
, blkvsc_req
->cond
);
648 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
651 if (blkvsc_req
->request
.Status
)
653 scsi_normalize_sense(blkvsc_req
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
655 if (sense_hdr
.asc
== 0x3A) /* Medium not present */
657 blkdev
->media_not_present
= 1;
662 buf
= kmap(page_buf
);
665 blkdev
->capacity
= ((buf
[0] << 24) | (buf
[1] << 16) | (buf
[2] << 8) | buf
[3]) + 1;
666 blkdev
->sector_size
= (buf
[4] << 24) | (buf
[5] << 16) | (buf
[6] << 8) | buf
[7];
670 __free_page(page_buf
);
672 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
678 static int blkvsc_do_read_capacity16(struct block_device_context
*blkdev
)
680 struct blkvsc_request
*blkvsc_req
=NULL
;
681 struct page
*page_buf
;
683 struct scsi_sense_hdr sense_hdr
;
685 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_do_read_capacity16()\n");
687 blkdev
->sector_size
= 0;
688 blkdev
->capacity
= 0;
689 blkdev
->media_not_present
= 0; /* assume a disk is present */
691 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_KERNEL
);
697 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
698 page_buf
= alloc_page(GFP_KERNEL
);
701 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
705 init_waitqueue_head(&blkvsc_req
->wevent
);
706 blkvsc_req
->dev
= blkdev
;
707 blkvsc_req
->req
= NULL
;
708 blkvsc_req
->write
= 0;
710 blkvsc_req
->request
.DataBuffer
.PfnArray
[0] = page_to_pfn(page_buf
);
711 blkvsc_req
->request
.DataBuffer
.Offset
= 0;
712 blkvsc_req
->request
.DataBuffer
.Length
= 12;
714 blkvsc_req
->cmnd
[0] = 0x9E; /* READ_CAPACITY16; */
715 blkvsc_req
->cmd_len
= 16;
718 * Set this here since the completion routine may be invoked
719 * and completed before we return
723 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
725 DPRINT_DBG(BLKVSC_DRV
, "waiting %p to complete - cond %d\n", blkvsc_req
, blkvsc_req
->cond
);
727 wait_event_interruptible(blkvsc_req
->wevent
, blkvsc_req
->cond
);
730 if (blkvsc_req
->request
.Status
)
732 scsi_normalize_sense(blkvsc_req
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
734 if (sense_hdr
.asc
== 0x3A) /* Medium not present */
736 blkdev
->media_not_present
= 1;
741 buf
= kmap(page_buf
);
744 blkdev
->capacity
= be64_to_cpu(*(unsigned long long*) &buf
[0]) + 1;
745 blkdev
->sector_size
= be32_to_cpu(*(unsigned int*)&buf
[8]);
747 /* blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; */
748 /* blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; */
752 __free_page(page_buf
);
754 kmem_cache_free(blkvsc_req
->dev
->request_pool
, blkvsc_req
);
761 Name: blkvsc_remove()
763 Desc: Callback when our device is removed
766 static int blkvsc_remove(struct device
*device
)
770 struct driver_context
*driver_ctx
= driver_to_driver_context(device
->driver
);
771 struct blkvsc_driver_context
*blkvsc_drv_ctx
= (struct blkvsc_driver_context
*)driver_ctx
;
772 STORVSC_DRIVER_OBJECT
* storvsc_drv_obj
= &blkvsc_drv_ctx
->drv_obj
;
774 struct device_context
*device_ctx
= device_to_device_context(device
);
775 struct hv_device
*device_obj
= &device_ctx
->device_obj
;
776 struct block_device_context
*blkdev
= dev_get_drvdata(device
);
779 DPRINT_ENTER(BLKVSC_DRV
);
781 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_remove()\n");
783 if (!storvsc_drv_obj
->Base
.OnDeviceRemove
)
785 DPRINT_EXIT(BLKVSC_DRV
);
789 /* Call to the vsc driver to let it know that the device is being removed */
790 ret
= storvsc_drv_obj
->Base
.OnDeviceRemove(device_obj
);
794 DPRINT_ERR(BLKVSC_DRV
, "unable to remove blkvsc device (ret %d)", ret
);
797 /* Get to a known state */
798 spin_lock_irqsave(&blkdev
->lock
, flags
);
800 blkdev
->shutting_down
= 1;
802 blk_stop_queue(blkdev
->gd
->queue
);
804 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
806 while (blkdev
->num_outstanding_reqs
)
808 DPRINT_INFO(STORVSC
, "waiting for %d requests to complete...", blkdev
->num_outstanding_reqs
);
813 blkvsc_do_flush(blkdev
);
815 spin_lock_irqsave(&blkdev
->lock
, flags
);
817 blkvsc_cancel_pending_reqs(blkdev
);
819 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
821 blk_cleanup_queue(blkdev
->gd
->queue
);
823 del_gendisk(blkdev
->gd
);
825 kmem_cache_destroy(blkdev
->request_pool
);
829 DPRINT_EXIT(BLKVSC_DRV
);
834 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
)
836 ASSERT(blkvsc_req
->req
);
837 ASSERT(blkvsc_req
->sector_count
<= (MAX_MULTIPAGE_BUFFER_COUNT
*8));
839 blkvsc_req
->cmd_len
= 16;
841 if (blkvsc_req
->sector_start
> 0xffffffff)
843 if (rq_data_dir(blkvsc_req
->req
))
845 blkvsc_req
->write
= 1;
846 blkvsc_req
->cmnd
[0] = WRITE_16
;
850 blkvsc_req
->write
= 0;
851 blkvsc_req
->cmnd
[0] = READ_16
;
854 blkvsc_req
->cmnd
[1] |= blk_fua_rq(blkvsc_req
->req
) ? 0x8 : 0;
856 *(unsigned long long*)&blkvsc_req
->cmnd
[2] = cpu_to_be64(blkvsc_req
->sector_start
);
857 *(unsigned int*)&blkvsc_req
->cmnd
[10] = cpu_to_be32(blkvsc_req
->sector_count
);
859 else if ((blkvsc_req
->sector_count
> 0xff) || (blkvsc_req
->sector_start
> 0x1fffff))
861 if (rq_data_dir(blkvsc_req
->req
))
863 blkvsc_req
->write
= 1;
864 blkvsc_req
->cmnd
[0] = WRITE_10
;
868 blkvsc_req
->write
= 0;
869 blkvsc_req
->cmnd
[0] = READ_10
;
872 blkvsc_req
->cmnd
[1] |= blk_fua_rq(blkvsc_req
->req
) ? 0x8 : 0;
874 *(unsigned int *)&blkvsc_req
->cmnd
[2] = cpu_to_be32(blkvsc_req
->sector_start
);
875 *(unsigned short*)&blkvsc_req
->cmnd
[7] = cpu_to_be16(blkvsc_req
->sector_count
);
879 if (rq_data_dir(blkvsc_req
->req
))
881 blkvsc_req
->write
= 1;
882 blkvsc_req
->cmnd
[0] = WRITE_6
;
886 blkvsc_req
->write
= 0;
887 blkvsc_req
->cmnd
[0] = READ_6
;
890 *(unsigned int *)&blkvsc_req
->cmnd
[1] = cpu_to_be32(blkvsc_req
->sector_start
) >> 8;
891 blkvsc_req
->cmnd
[1] &= 0x1f;
892 blkvsc_req
->cmnd
[4] = (unsigned char) blkvsc_req
->sector_count
;
896 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
, void (*request_completion
)(STORVSC_REQUEST
*) )
898 struct block_device_context
*blkdev
= blkvsc_req
->dev
;
899 struct device_context
*device_ctx
=blkdev
->device_ctx
;
900 struct driver_context
*driver_ctx
= driver_to_driver_context(device_ctx
->device
.driver
);
901 struct blkvsc_driver_context
*blkvsc_drv_ctx
= (struct blkvsc_driver_context
*)driver_ctx
;
902 STORVSC_DRIVER_OBJECT
* storvsc_drv_obj
= &blkvsc_drv_ctx
->drv_obj
;
905 STORVSC_REQUEST
*storvsc_req
;
907 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_submit_request() - req %p type %s start_sector %lu count %ld offset %d len %d\n",
909 (blkvsc_req
->write
)?"WRITE":"READ",
910 (unsigned long) blkvsc_req
->sector_start
,
911 blkvsc_req
->sector_count
,
912 blkvsc_req
->request
.DataBuffer
.Offset
,
913 blkvsc_req
->request
.DataBuffer
.Length
);
915 /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++)
917 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n",
920 blkvsc_req->request.DataBuffer.PfnArray[i]);
923 storvsc_req
= &blkvsc_req
->request
;
924 storvsc_req
->Extension
= (void*)((unsigned long)blkvsc_req
+ sizeof(struct blkvsc_request
));
926 storvsc_req
->Type
= blkvsc_req
->write
? WRITE_TYPE
: READ_TYPE
;
928 storvsc_req
->OnIOCompletion
= request_completion
;
929 storvsc_req
->Context
= blkvsc_req
;
931 storvsc_req
->Host
= blkdev
->port
;
932 storvsc_req
->Bus
= blkdev
->path
;
933 storvsc_req
->TargetId
= blkdev
->target
;
934 storvsc_req
->LunId
= 0; /* this is not really used at all */
936 storvsc_req
->CdbLen
= blkvsc_req
->cmd_len
;
937 storvsc_req
->Cdb
= blkvsc_req
->cmnd
;
939 storvsc_req
->SenseBuffer
= blkvsc_req
->sense_buffer
;
940 storvsc_req
->SenseBufferSize
= SCSI_SENSE_BUFFERSIZE
;
942 ret
= storvsc_drv_obj
->OnIORequest(&blkdev
->device_ctx
->device_obj
, &blkvsc_req
->request
);
945 blkdev
->num_outstanding_reqs
++;
953 * We break the request into 1 or more blkvsc_requests and submit
954 * them. If we cant submit them all, we put them on the
955 * pending_list. The blkvsc_request() will work on the pending_list.
958 static int blkvsc_do_request(struct block_device_context
*blkdev
, struct request
*req
)
960 struct bio
*bio
=NULL
;
961 struct bio_vec
*bvec
=NULL
;
962 struct bio_vec
*prev_bvec
=NULL
;
964 struct blkvsc_request
*blkvsc_req
=NULL
;
965 struct blkvsc_request
*tmp
;
969 sector_t start_sector
;
970 unsigned long num_sectors
= 0;
973 struct blkvsc_request_group
*group
=NULL
;
975 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p req %p sect %lu \n", blkdev
, req
, (unsigned long) blk_rq_pos(req
));
977 /* Create a group to tie req to list of blkvsc_reqs */
978 group
= (struct blkvsc_request_group
*)kmem_cache_alloc(blkdev
->request_pool
, GFP_ATOMIC
);
984 INIT_LIST_HEAD(&group
->blkvsc_req_list
);
985 group
->outstanding
= group
->status
= 0;
987 start_sector
= blk_rq_pos(req
);
989 /* foreach bio in the request */
991 for (bio
= req
->bio
; bio
; bio
= bio
->bi_next
)
993 /* Map this bio into an existing or new storvsc request */
994 bio_for_each_segment (bvec
, bio
, seg_idx
)
996 DPRINT_DBG(BLKVSC_DRV
, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
997 req
, bio
, bvec
, seg_idx
, databuf_idx
);
999 /* Get a new storvsc request */
1000 if ( (!blkvsc_req
) || /* 1st-time */
1001 (databuf_idx
>= MAX_MULTIPAGE_BUFFER_COUNT
) ||
1002 (bvec
->bv_offset
!= 0) || /* hole at the begin of page */
1003 (prev_bvec
&& (prev_bvec
->bv_len
!= PAGE_SIZE
)) ) /* hold at the end of page */
1005 /* submit the prev one */
1008 blkvsc_req
->sector_start
= start_sector
;
1009 sector_div(blkvsc_req
->sector_start
, (blkdev
->sector_size
>> 9));
1011 blkvsc_req
->sector_count
= num_sectors
/ (blkdev
->sector_size
>> 9);
1013 blkvsc_init_rw(blkvsc_req
);
1016 /* Create new blkvsc_req to represent the current bvec */
1017 blkvsc_req
= kmem_cache_alloc(blkdev
->request_pool
, GFP_ATOMIC
);
1020 /* free up everything */
1021 list_for_each_entry_safe(blkvsc_req
, tmp
, &group
->blkvsc_req_list
, req_entry
)
1023 list_del(&blkvsc_req
->req_entry
);
1024 kmem_cache_free(blkdev
->request_pool
, blkvsc_req
);
1027 kmem_cache_free(blkdev
->request_pool
, group
);
1031 memset(blkvsc_req
, 0, sizeof(struct blkvsc_request
));
1033 blkvsc_req
->dev
= blkdev
;
1034 blkvsc_req
->req
= req
;
1035 blkvsc_req
->request
.DataBuffer
.Offset
= bvec
->bv_offset
;
1036 blkvsc_req
->request
.DataBuffer
.Length
= 0;
1038 /* Add to the group */
1039 blkvsc_req
->group
= group
;
1040 blkvsc_req
->group
->outstanding
++;
1041 list_add_tail(&blkvsc_req
->req_entry
, &blkvsc_req
->group
->blkvsc_req_list
);
1043 start_sector
+= num_sectors
;
1048 /* Add the curr bvec/segment to the curr blkvsc_req */
1049 blkvsc_req
->request
.DataBuffer
.PfnArray
[databuf_idx
] = page_to_pfn(bvec
->bv_page
);
1050 blkvsc_req
->request
.DataBuffer
.Length
+= bvec
->bv_len
;
1055 num_sectors
+= bvec
->bv_len
>> 9;
1057 } /* bio_for_each_segment */
1059 } /* rq_for_each_bio */
1061 /* Handle the last one */
1064 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p req %p group %p count %d\n", blkdev
, req
, blkvsc_req
->group
, blkvsc_req
->group
->outstanding
);
1066 blkvsc_req
->sector_start
= start_sector
;
1067 sector_div(blkvsc_req
->sector_start
, (blkdev
->sector_size
>> 9));
1069 blkvsc_req
->sector_count
= num_sectors
/ (blkdev
->sector_size
>> 9);
1071 blkvsc_init_rw(blkvsc_req
);
1074 list_for_each_entry(blkvsc_req
, &group
->blkvsc_req_list
, req_entry
)
1078 DPRINT_DBG(BLKVSC_DRV
, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld)\n",
1079 blkvsc_req
, blkvsc_req
->sector_start
, blkvsc_req
->sector_count
, (unsigned long) start_sector
, (unsigned long) num_sectors
);
1081 list_add_tail(&blkvsc_req
->pend_entry
, &blkdev
->pending_list
);
1085 ret
= blkvsc_submit_request(blkvsc_req
, blkvsc_request_completion
);
1089 list_add_tail(&blkvsc_req
->pend_entry
, &blkdev
->pending_list
);
1092 DPRINT_DBG(BLKVSC_DRV
, "submitted blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld) ret %d\n",
1093 blkvsc_req
, (unsigned long) blkvsc_req
->sector_start
, blkvsc_req
->sector_count
, (unsigned long) start_sector
, num_sectors
, ret
);
1100 static void blkvsc_cmd_completion(STORVSC_REQUEST
* request
)
1102 struct blkvsc_request
*blkvsc_req
=(struct blkvsc_request
*)request
->Context
;
1103 struct block_device_context
*blkdev
= (struct block_device_context
*)blkvsc_req
->dev
;
1105 struct scsi_sense_hdr sense_hdr
;
1107 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cmd_completion() - req %p\n", blkvsc_req
);
1109 blkdev
->num_outstanding_reqs
--;
1111 if (blkvsc_req
->request
.Status
)
1113 if (scsi_normalize_sense(blkvsc_req
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
, &sense_hdr
))
1115 scsi_print_sense_hdr("blkvsc", &sense_hdr
);
1119 blkvsc_req
->cond
=1;
1120 wake_up_interruptible(&blkvsc_req
->wevent
);
1123 static void blkvsc_request_completion(STORVSC_REQUEST
* request
)
1125 struct blkvsc_request
*blkvsc_req
=(struct blkvsc_request
*)request
->Context
;
1126 struct block_device_context
*blkdev
= (struct block_device_context
*)blkvsc_req
->dev
;
1127 unsigned long flags
;
1128 struct blkvsc_request
*comp_req
, *tmp
;
1130 ASSERT(blkvsc_req
->group
);
1132 DPRINT_DBG(BLKVSC_DRV
, "blkdev %p blkvsc_req %p group %p type %s sect_start %lu sect_count %ld len %d group outstd %d total outstd %d\n",
1136 (blkvsc_req
->write
)?"WRITE":"READ",
1137 (unsigned long) blkvsc_req
->sector_start
,
1138 blkvsc_req
->sector_count
,
1139 blkvsc_req
->request
.DataBuffer
.Length
,
1140 blkvsc_req
->group
->outstanding
,
1141 blkdev
->num_outstanding_reqs
);
1143 spin_lock_irqsave(&blkdev
->lock
, flags
);
1145 blkdev
->num_outstanding_reqs
--;
1146 blkvsc_req
->group
->outstanding
--;
1149 * Only start processing when all the blkvsc_reqs are
1150 * completed. This guarantees no out-of-order blkvsc_req
1151 * completion when calling end_that_request_first()
1153 if (blkvsc_req
->group
->outstanding
== 0)
1155 list_for_each_entry_safe(comp_req
, tmp
, &blkvsc_req
->group
->blkvsc_req_list
, req_entry
)
1157 DPRINT_DBG(BLKVSC_DRV
, "completing blkvsc_req %p sect_start %lu sect_count %ld \n",
1159 (unsigned long) comp_req
->sector_start
,
1160 comp_req
->sector_count
);
1162 list_del(&comp_req
->req_entry
);
1164 if (!__blk_end_request(
1166 (!comp_req
->request
.Status
? 0: -EIO
),
1167 comp_req
->sector_count
* blkdev
->sector_size
))
1169 /* All the sectors have been xferred ie the request is done */
1170 DPRINT_DBG(BLKVSC_DRV
, "req %p COMPLETED\n", comp_req
->req
);
1171 kmem_cache_free(blkdev
->request_pool
, comp_req
->group
);
1174 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1177 if (!blkdev
->shutting_down
)
1179 blkvsc_do_pending_reqs(blkdev
);
1180 blk_start_queue(blkdev
->gd
->queue
);
1181 blkvsc_request(blkdev
->gd
->queue
);
1185 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
1188 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
)
1190 struct blkvsc_request
*pend_req
, *tmp
;
1191 struct blkvsc_request
*comp_req
, *tmp2
;
1195 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cancel_pending_reqs()");
1197 /* Flush the pending list first */
1198 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
, pend_entry
)
1201 * The pend_req could be part of a partially completed
1202 * request. If so, complete those req first until we
1205 list_for_each_entry_safe(comp_req
, tmp2
, &pend_req
->group
->blkvsc_req_list
, req_entry
)
1207 DPRINT_DBG(BLKVSC_DRV
, "completing blkvsc_req %p sect_start %lu sect_count %ld \n",
1209 (unsigned long) comp_req
->sector_start
,
1210 comp_req
->sector_count
);
1212 if (comp_req
== pend_req
)
1215 list_del(&comp_req
->req_entry
);
1219 ret
= __blk_end_request(
1221 (!comp_req
->request
.Status
? 0 : -EIO
),
1222 comp_req
->sector_count
* blkdev
->sector_size
);
1226 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1229 DPRINT_DBG(BLKVSC_DRV
, "cancelling pending request - %p\n", pend_req
);
1231 list_del(&pend_req
->pend_entry
);
1233 list_del(&pend_req
->req_entry
);
1237 if (!__blk_end_request(
1240 pend_req
->sector_count
* blkdev
->sector_size
))
1242 /* All the sectors have been xferred ie the request is done */
1243 DPRINT_DBG(BLKVSC_DRV
, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req
->req
);
1244 kmem_cache_free(blkdev
->request_pool
, pend_req
->group
);
1248 kmem_cache_free(blkdev
->request_pool
, pend_req
);
1254 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
)
1256 struct blkvsc_request
*pend_req
, *tmp
;
1259 /* Flush the pending list first */
1260 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
, pend_entry
)
1262 DPRINT_DBG(BLKVSC_DRV
, "working off pending_list - %p\n", pend_req
);
1264 ret
= blkvsc_submit_request(pend_req
, blkvsc_request_completion
);
1271 list_del(&pend_req
->pend_entry
);
1278 static void blkvsc_request(struct request_queue
*queue
)
1280 struct block_device_context
*blkdev
= NULL
;
1281 struct request
*req
;
1284 DPRINT_DBG(BLKVSC_DRV
, "- enter \n");
1285 while ((req
= blk_peek_request(queue
)) != NULL
)
1287 DPRINT_DBG(BLKVSC_DRV
, "- req %p\n", req
);
1289 blkdev
= req
->rq_disk
->private_data
;
1290 if (blkdev
->shutting_down
|| !blk_fs_request(req
) || blkdev
->media_not_present
) {
1291 __blk_end_request_cur(req
, 0);
1295 ret
= blkvsc_do_pending_reqs(blkdev
);
1299 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - pending_list not empty\n");
1300 blk_stop_queue(queue
);
1304 blk_start_request(req
);
1306 ret
= blkvsc_do_request(blkdev
, req
);
1309 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - no room\n");
1310 blk_stop_queue(queue
);
1315 DPRINT_DBG(BLKVSC_DRV
, "- stop queue - no mem\n");
1316 blk_requeue_request(queue
, req
);
1317 blk_stop_queue(queue
);
1323 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
)
1325 struct block_device_context
*blkdev
= bdev
->bd_disk
->private_data
;
1327 DPRINT_DBG(BLKVSC_DRV
, "- users %d disk %s\n", blkdev
->users
, blkdev
->gd
->disk_name
);
1329 spin_lock(&blkdev
->lock
);
1331 if (!blkdev
->users
&& blkdev
->device_type
== DVD_TYPE
)
1333 spin_unlock(&blkdev
->lock
);
1334 check_disk_change(bdev
);
1335 spin_lock(&blkdev
->lock
);
1340 spin_unlock(&blkdev
->lock
);
1344 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
)
1346 struct block_device_context
*blkdev
= disk
->private_data
;
1348 DPRINT_DBG(BLKVSC_DRV
, "- users %d disk %s\n", blkdev
->users
, blkdev
->gd
->disk_name
);
1350 spin_lock(&blkdev
->lock
);
1351 if (blkdev
->users
== 1)
1353 spin_unlock(&blkdev
->lock
);
1354 blkvsc_do_flush(blkdev
);
1355 spin_lock(&blkdev
->lock
);
1360 spin_unlock(&blkdev
->lock
);
1364 static int blkvsc_media_changed(struct gendisk
*gd
)
1366 DPRINT_DBG(BLKVSC_DRV
, "- enter\n");
1371 static int blkvsc_revalidate_disk(struct gendisk
*gd
)
1373 struct block_device_context
*blkdev
= gd
->private_data
;
1375 DPRINT_DBG(BLKVSC_DRV
, "- enter\n");
1377 if (blkdev
->device_type
== DVD_TYPE
)
1379 blkvsc_do_read_capacity(blkdev
);
1380 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
1381 blk_queue_logical_block_size(gd
->queue
, blkdev
->sector_size
);
1386 int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
1388 sector_t total_sectors
= get_capacity(bd
->bd_disk
);
1389 sector_t cylinder_times_heads
=0;
1392 int sectors_per_track
=0;
1397 if (total_sectors
> (65535 * 16 * 255)) {
1398 total_sectors
= (65535 * 16 * 255);
1401 if (total_sectors
>= (65535 * 16 * 63)) {
1402 sectors_per_track
= 255;
1405 cylinder_times_heads
= total_sectors
;
1406 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1410 sectors_per_track
= 17;
1412 cylinder_times_heads
= total_sectors
;
1413 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1415 temp
= cylinder_times_heads
+ 1023;
1416 rem
= sector_div(temp
, 1024); /* sector_div stores the quotient in temp */
1424 if (cylinder_times_heads
>= (heads
* 1024) || (heads
> 16)) {
1425 sectors_per_track
= 31;
1428 cylinder_times_heads
= total_sectors
;
1429 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1432 if (cylinder_times_heads
>= (heads
* 1024)) {
1433 sectors_per_track
= 63;
1436 cylinder_times_heads
= total_sectors
;
1437 rem
= sector_div(cylinder_times_heads
, sectors_per_track
); /* sector_div stores the quotient in cylinder_times_heads */
1441 temp
= cylinder_times_heads
;
1442 rem
= sector_div(temp
, heads
); /* sector_div stores the quotient in temp */
1446 hg
->sectors
= sectors_per_track
;
1447 hg
->cylinders
= cylinders
;
1449 DPRINT_INFO(BLKVSC_DRV
, "CHS (%d, %d, %d)", cylinders
, heads
, sectors_per_track
);
1454 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
1455 unsigned cmd
, unsigned long argument
)
1457 struct block_device_context
*blkdev
= bd
->bd_disk
->private_data
;
1462 /* TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just */
1463 /* a GUID. Commented it out for now. */
1464 /*case HDIO_GET_IDENTITY:
1465 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1467 if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len))
1482 MODULE_LICENSE("GPL");
1484 static int __init
blkvsc_init(void)
1488 ASSERT(sizeof(sector_t
) == 8); /* Make sure CONFIG_LBD is set */
1490 DPRINT_ENTER(BLKVSC_DRV
);
1492 DPRINT_INFO(BLKVSC_DRV
, "Blkvsc initializing....");
1494 ret
= blkvsc_drv_init(BlkVscInitialize
);
1496 DPRINT_EXIT(BLKVSC_DRV
);
1501 static void __exit
blkvsc_exit(void)
1503 DPRINT_ENTER(BLKVSC_DRV
);
1507 DPRINT_ENTER(BLKVSC_DRV
);
1510 module_param(blkvsc_ringbuffer_size
, int, S_IRUGO
);
1512 module_init(blkvsc_init
);
1513 module_exit(blkvsc_exit
);