Staging: merge 2.6.39-rc3 into staging-next
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / hv / blkvsc_drv.c
blob94b2d247d7b15cff0f56a13cf759792c02471737
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/blkdev.h>
25 #include <linux/major.h>
26 #include <linux/delay.h>
27 #include <linux/hdreg.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_dbg.h>
34 #include "hv_api.h"
35 #include "logging.h"
36 #include "version_info.h"
37 #include "vmbus.h"
38 #include "storvsc_api.h"
41 #define BLKVSC_MINORS 64
43 enum blkvsc_device_type {
44 UNKNOWN_DEV_TYPE,
45 HARDDISK_TYPE,
46 DVD_TYPE,
50 * This request ties the struct request and struct
51 * blkvsc_request/hv_storvsc_request together A struct request may be
52 * represented by 1 or more struct blkvsc_request
54 struct blkvsc_request_group {
55 int outstanding;
56 int status;
57 struct list_head blkvsc_req_list; /* list of blkvsc_requests */
60 struct blkvsc_request {
61 /* blkvsc_request_group.blkvsc_req_list */
62 struct list_head req_entry;
64 /* block_device_context.pending_list */
65 struct list_head pend_entry;
67 /* This may be null if we generate a request internally */
68 struct request *req;
70 struct block_device_context *dev;
72 /* The group this request is part of. Maybe null */
73 struct blkvsc_request_group *group;
75 int write;
76 sector_t sector_start;
77 unsigned long sector_count;
79 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
80 unsigned char cmd_len;
81 unsigned char cmnd[MAX_COMMAND_SIZE];
83 struct hv_storvsc_request request;
86 /* Per device structure */
87 struct block_device_context {
88 /* point back to our device context */
89 struct hv_device *device_ctx;
90 struct kmem_cache *request_pool;
91 spinlock_t lock;
92 struct gendisk *gd;
93 enum blkvsc_device_type device_type;
94 struct list_head pending_list;
96 unsigned char device_id[64];
97 unsigned int device_id_len;
98 int num_outstanding_reqs;
99 int shutting_down;
100 int media_not_present;
101 unsigned int sector_size;
102 sector_t capacity;
103 unsigned int port;
104 unsigned char path;
105 unsigned char target;
106 int users;
110 static const char *g_blk_driver_name = "blkvsc";
112 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
113 static const struct hv_guid g_blk_device_type = {
114 .data = {
115 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
116 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
120 static int blk_vsc_on_device_add(struct hv_device *device,
121 void *additional_info)
123 struct storvsc_device_info *device_info;
124 int ret = 0;
126 device_info = (struct storvsc_device_info *)additional_info;
128 ret = stor_vsc_on_device_add(device, additional_info);
129 if (ret != 0)
130 return ret;
133 * We need to use the device instance guid to set the path and target
134 * id. For IDE devices, the device instance id is formatted as
135 * <bus id> * - <device id> - 8899 - 000000000000.
137 device_info->path_id = device->dev_instance.data[3] << 24 |
138 device->dev_instance.data[2] << 16 |
139 device->dev_instance.data[1] << 8 |
140 device->dev_instance.data[0];
142 device_info->target_id = device->dev_instance.data[5] << 8 |
143 device->dev_instance.data[4];
145 return ret;
149 static int blk_vsc_initialize(struct hv_driver *driver)
151 struct storvsc_driver_object *stor_driver;
152 int ret = 0;
154 stor_driver = hvdr_to_stordr(driver);
156 /* Make sure we are at least 2 pages since 1 page is used for control */
157 /* ASSERT(stor_driver->RingBufferSize >= (PAGE_SIZE << 1)); */
159 driver->name = g_blk_driver_name;
160 memcpy(&driver->dev_type, &g_blk_device_type, sizeof(struct hv_guid));
164 * Divide the ring buffer data size (which is 1 page less than the ring
165 * buffer size since that page is reserved for the ring buffer indices)
166 * by the max request size (which is
167 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
169 stor_driver->max_outstanding_req_per_channel =
170 ((stor_driver->ring_buffer_size - PAGE_SIZE) /
171 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
172 sizeof(struct vstor_packet) + sizeof(u64),
173 sizeof(u64)));
175 DPRINT_INFO(BLKVSC, "max io outstd %u",
176 stor_driver->max_outstanding_req_per_channel);
178 /* Setup the dispatch table */
179 stor_driver->base.dev_add = blk_vsc_on_device_add;
180 stor_driver->base.dev_rm = stor_vsc_on_device_remove;
181 stor_driver->base.cleanup = stor_vsc_on_cleanup;
182 stor_driver->on_io_request = stor_vsc_on_io_request;
184 return ret;
187 /* Static decl */
188 static DEFINE_MUTEX(blkvsc_mutex);
189 static int blkvsc_probe(struct device *dev);
190 static int blkvsc_remove(struct device *device);
191 static void blkvsc_shutdown(struct device *device);
193 static int blkvsc_open(struct block_device *bdev, fmode_t mode);
194 static int blkvsc_release(struct gendisk *disk, fmode_t mode);
195 static unsigned int blkvsc_check_events(struct gendisk *gd,
196 unsigned int clearing);
197 static int blkvsc_revalidate_disk(struct gendisk *gd);
198 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
199 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
200 unsigned cmd, unsigned long argument);
201 static void blkvsc_request(struct request_queue *queue);
202 static void blkvsc_request_completion(struct hv_storvsc_request *request);
203 static int blkvsc_do_request(struct block_device_context *blkdev,
204 struct request *req);
205 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
206 void (*request_completion)(struct hv_storvsc_request *));
207 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
208 static void blkvsc_cmd_completion(struct hv_storvsc_request *request);
209 static int blkvsc_do_inquiry(struct block_device_context *blkdev);
210 static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
211 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
212 static int blkvsc_do_flush(struct block_device_context *blkdev);
213 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
214 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
216 static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
217 module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
218 MODULE_PARM_DESC(ring_size, "Ring buffer size (in bytes)");
220 /* The one and only one */
221 static struct storvsc_driver_object g_blkvsc_drv;
223 static const struct block_device_operations block_ops = {
224 .owner = THIS_MODULE,
225 .open = blkvsc_open,
226 .release = blkvsc_release,
227 .check_events = blkvsc_check_events,
228 .revalidate_disk = blkvsc_revalidate_disk,
229 .getgeo = blkvsc_getgeo,
230 .ioctl = blkvsc_ioctl,
234 * blkvsc_drv_init - BlkVsc driver initialization.
236 static int blkvsc_drv_init(void)
238 struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv;
239 struct hv_driver *drv = &g_blkvsc_drv.base;
240 int ret;
242 storvsc_drv_obj->ring_buffer_size = blkvsc_ringbuffer_size;
244 drv->priv = storvsc_drv_obj;
246 /* Callback to client driver to complete the initialization */
247 blk_vsc_initialize(&storvsc_drv_obj->base);
249 drv->driver.name = storvsc_drv_obj->base.name;
251 drv->driver.probe = blkvsc_probe;
252 drv->driver.remove = blkvsc_remove;
253 drv->driver.shutdown = blkvsc_shutdown;
255 /* The driver belongs to vmbus */
256 ret = vmbus_child_driver_register(&drv->driver);
258 return ret;
261 static int blkvsc_drv_exit_cb(struct device *dev, void *data)
263 struct device **curr = (struct device **)data;
264 *curr = dev;
265 return 1; /* stop iterating */
268 static void blkvsc_drv_exit(void)
270 struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv;
271 struct hv_driver *drv = &g_blkvsc_drv.base;
272 struct device *current_dev;
273 int ret;
275 while (1) {
276 current_dev = NULL;
278 /* Get the device */
279 ret = driver_for_each_device(&drv->driver, NULL,
280 (void *) &current_dev,
281 blkvsc_drv_exit_cb);
283 if (ret)
284 DPRINT_WARN(BLKVSC_DRV,
285 "driver_for_each_device returned %d", ret);
288 if (current_dev == NULL)
289 break;
291 /* Initiate removal from the top-down */
292 device_unregister(current_dev);
295 if (storvsc_drv_obj->base.cleanup)
296 storvsc_drv_obj->base.cleanup(&storvsc_drv_obj->base);
298 vmbus_child_driver_unregister(&drv->driver);
300 return;
304 * blkvsc_probe - Add a new device for this driver
306 static int blkvsc_probe(struct device *device)
308 struct hv_driver *drv =
309 drv_to_hv_drv(device->driver);
310 struct storvsc_driver_object *storvsc_drv_obj =
311 drv->priv;
312 struct hv_device *device_obj = device_to_hv_device(device);
314 struct block_device_context *blkdev = NULL;
315 struct storvsc_device_info device_info;
316 int major = 0;
317 int devnum = 0;
318 int ret = 0;
319 static int ide0_registered;
320 static int ide1_registered;
322 DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
324 if (!storvsc_drv_obj->base.dev_add) {
325 DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
326 ret = -1;
327 goto Cleanup;
330 blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
331 if (!blkdev) {
332 ret = -ENOMEM;
333 goto Cleanup;
336 INIT_LIST_HEAD(&blkdev->pending_list);
338 /* Initialize what we can here */
339 spin_lock_init(&blkdev->lock);
341 /* ASSERT(sizeof(struct blkvsc_request_group) <= */
342 /* sizeof(struct blkvsc_request)); */
344 blkdev->request_pool = kmem_cache_create(dev_name(&device_obj->device),
345 sizeof(struct blkvsc_request), 0,
346 SLAB_HWCACHE_ALIGN, NULL);
347 if (!blkdev->request_pool) {
348 ret = -ENOMEM;
349 goto Cleanup;
353 /* Call to the vsc driver to add the device */
354 ret = storvsc_drv_obj->base.dev_add(device_obj, &device_info);
355 if (ret != 0) {
356 DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
357 goto Cleanup;
360 blkdev->device_ctx = device_obj;
361 /* this identified the device 0 or 1 */
362 blkdev->target = device_info.target_id;
363 /* this identified the ide ctrl 0 or 1 */
364 blkdev->path = device_info.path_id;
366 dev_set_drvdata(device, blkdev);
368 /* Calculate the major and device num */
369 if (blkdev->path == 0) {
370 major = IDE0_MAJOR;
371 devnum = blkdev->path + blkdev->target; /* 0 or 1 */
373 if (!ide0_registered) {
374 ret = register_blkdev(major, "ide");
375 if (ret != 0) {
376 DPRINT_ERR(BLKVSC_DRV,
377 "register_blkdev() failed! ret %d",
378 ret);
379 goto Remove;
382 ide0_registered = 1;
384 } else if (blkdev->path == 1) {
385 major = IDE1_MAJOR;
386 devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */
388 if (!ide1_registered) {
389 ret = register_blkdev(major, "ide");
390 if (ret != 0) {
391 DPRINT_ERR(BLKVSC_DRV,
392 "register_blkdev() failed! ret %d",
393 ret);
394 goto Remove;
397 ide1_registered = 1;
399 } else {
400 DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
401 ret = -1;
402 goto Cleanup;
405 DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
407 blkdev->gd = alloc_disk(BLKVSC_MINORS);
408 if (!blkdev->gd) {
409 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
410 ret = -1;
411 goto Cleanup;
414 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
416 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
417 blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
418 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
419 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
420 blk_queue_dma_alignment(blkdev->gd->queue, 511);
422 blkdev->gd->major = major;
423 if (devnum == 1 || devnum == 3)
424 blkdev->gd->first_minor = BLKVSC_MINORS;
425 else
426 blkdev->gd->first_minor = 0;
427 blkdev->gd->fops = &block_ops;
428 blkdev->gd->events = DISK_EVENT_MEDIA_CHANGE;
429 blkdev->gd->private_data = blkdev;
430 blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
431 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
433 blkvsc_do_inquiry(blkdev);
434 if (blkdev->device_type == DVD_TYPE) {
435 set_disk_ro(blkdev->gd, 1);
436 blkdev->gd->flags |= GENHD_FL_REMOVABLE;
437 blkvsc_do_read_capacity(blkdev);
438 } else {
439 blkvsc_do_read_capacity16(blkdev);
442 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
443 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
444 /* go! */
445 add_disk(blkdev->gd);
447 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d",
448 blkdev->gd->disk_name, (unsigned long)blkdev->capacity,
449 blkdev->sector_size);
451 return ret;
453 Remove:
454 storvsc_drv_obj->base.dev_rm(device_obj);
456 Cleanup:
457 if (blkdev) {
458 if (blkdev->request_pool) {
459 kmem_cache_destroy(blkdev->request_pool);
460 blkdev->request_pool = NULL;
462 kfree(blkdev);
463 blkdev = NULL;
466 return ret;
469 static void blkvsc_shutdown(struct device *device)
471 struct block_device_context *blkdev = dev_get_drvdata(device);
472 unsigned long flags;
474 if (!blkdev)
475 return;
477 DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n",
478 blkdev->users, blkdev->gd->disk_name);
480 spin_lock_irqsave(&blkdev->lock, flags);
482 blkdev->shutting_down = 1;
484 blk_stop_queue(blkdev->gd->queue);
486 spin_unlock_irqrestore(&blkdev->lock, flags);
488 while (blkdev->num_outstanding_reqs) {
489 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
490 blkdev->num_outstanding_reqs);
491 udelay(100);
494 blkvsc_do_flush(blkdev);
496 spin_lock_irqsave(&blkdev->lock, flags);
498 blkvsc_cancel_pending_reqs(blkdev);
500 spin_unlock_irqrestore(&blkdev->lock, flags);
503 static int blkvsc_do_flush(struct block_device_context *blkdev)
505 struct blkvsc_request *blkvsc_req;
507 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
509 if (blkdev->device_type != HARDDISK_TYPE)
510 return 0;
512 blkvsc_req = kmem_cache_zalloc(blkdev->request_pool, GFP_KERNEL);
513 if (!blkvsc_req)
514 return -ENOMEM;
516 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
517 init_completion(&blkvsc_req->request.wait_event);
518 blkvsc_req->dev = blkdev;
519 blkvsc_req->req = NULL;
520 blkvsc_req->write = 0;
522 blkvsc_req->request.data_buffer.pfn_array[0] = 0;
523 blkvsc_req->request.data_buffer.offset = 0;
524 blkvsc_req->request.data_buffer.len = 0;
526 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
527 blkvsc_req->cmd_len = 10;
529 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
531 wait_for_completion_interruptible(&blkvsc_req->request.wait_event);
533 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
535 return 0;
538 /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
539 static int blkvsc_do_inquiry(struct block_device_context *blkdev)
541 struct blkvsc_request *blkvsc_req;
542 struct page *page_buf;
543 unsigned char *buf;
544 unsigned char device_type;
546 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
548 blkvsc_req = kmem_cache_zalloc(blkdev->request_pool, GFP_KERNEL);
549 if (!blkvsc_req)
550 return -ENOMEM;
552 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
553 page_buf = alloc_page(GFP_KERNEL);
554 if (!page_buf) {
555 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
556 return -ENOMEM;
559 init_completion(&blkvsc_req->request.wait_event);
560 blkvsc_req->dev = blkdev;
561 blkvsc_req->req = NULL;
562 blkvsc_req->write = 0;
564 blkvsc_req->request.data_buffer.pfn_array[0] =
565 page_to_pfn(page_buf);
566 blkvsc_req->request.data_buffer.offset = 0;
567 blkvsc_req->request.data_buffer.len = 64;
569 blkvsc_req->cmnd[0] = INQUIRY;
570 blkvsc_req->cmnd[1] = 0x1; /* Get product data */
571 blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */
572 blkvsc_req->cmnd[4] = 64;
573 blkvsc_req->cmd_len = 6;
575 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
577 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete\n",
578 blkvsc_req);
580 wait_for_completion_interruptible(&blkvsc_req->request.wait_event);
582 buf = kmap(page_buf);
584 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
585 /* be to le */
586 device_type = buf[0] & 0x1F;
588 if (device_type == 0x0) {
589 blkdev->device_type = HARDDISK_TYPE;
590 } else if (device_type == 0x5) {
591 blkdev->device_type = DVD_TYPE;
592 } else {
593 /* TODO: this is currently unsupported device type */
594 blkdev->device_type = UNKNOWN_DEV_TYPE;
597 DPRINT_DBG(BLKVSC_DRV, "device type %d\n", device_type);
599 blkdev->device_id_len = buf[7];
600 if (blkdev->device_id_len > 64)
601 blkdev->device_id_len = 64;
603 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
604 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
605 * blkdev->device_id_len); */
607 kunmap(page_buf);
609 __free_page(page_buf);
611 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
613 return 0;
616 /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
617 static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
619 struct blkvsc_request *blkvsc_req;
620 struct page *page_buf;
621 unsigned char *buf;
622 struct scsi_sense_hdr sense_hdr;
623 struct vmscsi_request *vm_srb;
625 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
627 blkdev->sector_size = 0;
628 blkdev->capacity = 0;
629 blkdev->media_not_present = 0; /* assume a disk is present */
631 blkvsc_req = kmem_cache_zalloc(blkdev->request_pool, GFP_KERNEL);
632 if (!blkvsc_req)
633 return -ENOMEM;
635 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
636 page_buf = alloc_page(GFP_KERNEL);
637 if (!page_buf) {
638 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
639 return -ENOMEM;
642 vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
643 init_completion(&blkvsc_req->request.wait_event);
644 blkvsc_req->dev = blkdev;
645 blkvsc_req->req = NULL;
646 blkvsc_req->write = 0;
648 blkvsc_req->request.data_buffer.pfn_array[0] =
649 page_to_pfn(page_buf);
650 blkvsc_req->request.data_buffer.offset = 0;
651 blkvsc_req->request.data_buffer.len = 8;
653 blkvsc_req->cmnd[0] = READ_CAPACITY;
654 blkvsc_req->cmd_len = 16;
656 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
658 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete\n",
659 blkvsc_req);
661 wait_for_completion_interruptible(&blkvsc_req->request.wait_event);
663 /* check error */
664 if (vm_srb->scsi_status) {
665 scsi_normalize_sense(blkvsc_req->sense_buffer,
666 SCSI_SENSE_BUFFERSIZE, &sense_hdr);
668 if (sense_hdr.asc == 0x3A) {
669 /* Medium not present */
670 blkdev->media_not_present = 1;
672 return 0;
674 buf = kmap(page_buf);
676 /* be to le */
677 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) |
678 (buf[2] << 8) | buf[3]) + 1;
679 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) |
680 (buf[6] << 8) | buf[7];
682 kunmap(page_buf);
684 __free_page(page_buf);
686 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
688 return 0;
691 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
693 struct blkvsc_request *blkvsc_req;
694 struct page *page_buf;
695 unsigned char *buf;
696 struct scsi_sense_hdr sense_hdr;
697 struct vmscsi_request *vm_srb;
699 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
701 blkdev->sector_size = 0;
702 blkdev->capacity = 0;
703 blkdev->media_not_present = 0; /* assume a disk is present */
705 blkvsc_req = kmem_cache_zalloc(blkdev->request_pool, GFP_KERNEL);
706 if (!blkvsc_req)
707 return -ENOMEM;
709 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
710 vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
711 page_buf = alloc_page(GFP_KERNEL);
712 if (!page_buf) {
713 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
714 return -ENOMEM;
717 init_completion(&blkvsc_req->request.wait_event);
718 blkvsc_req->dev = blkdev;
719 blkvsc_req->req = NULL;
720 blkvsc_req->write = 0;
722 blkvsc_req->request.data_buffer.pfn_array[0] =
723 page_to_pfn(page_buf);
724 blkvsc_req->request.data_buffer.offset = 0;
725 blkvsc_req->request.data_buffer.len = 12;
727 blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */
728 blkvsc_req->cmd_len = 16;
731 * Set this here since the completion routine may be invoked
732 * and completed before we return
735 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
737 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete\n",
738 blkvsc_req);
740 wait_for_completion_interruptible(&blkvsc_req->request.wait_event);
742 /* check error */
743 if (vm_srb->scsi_status) {
744 scsi_normalize_sense(blkvsc_req->sense_buffer,
745 SCSI_SENSE_BUFFERSIZE, &sense_hdr);
746 if (sense_hdr.asc == 0x3A) {
747 /* Medium not present */
748 blkdev->media_not_present = 1;
750 return 0;
752 buf = kmap(page_buf);
754 /* be to le */
755 blkdev->capacity = be64_to_cpu(*(unsigned long long *) &buf[0]) + 1;
756 blkdev->sector_size = be32_to_cpu(*(unsigned int *)&buf[8]);
758 #if 0
759 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) |
760 (buf[2] << 8) | buf[3]) + 1;
761 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) |
762 (buf[6] << 8) | buf[7];
763 #endif
765 kunmap(page_buf);
767 __free_page(page_buf);
769 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
771 return 0;
775 * blkvsc_remove() - Callback when our device is removed
777 static int blkvsc_remove(struct device *device)
779 struct hv_driver *drv =
780 drv_to_hv_drv(device->driver);
781 struct storvsc_driver_object *storvsc_drv_obj =
782 drv->priv;
783 struct hv_device *device_obj = device_to_hv_device(device);
784 struct block_device_context *blkdev = dev_get_drvdata(device);
785 unsigned long flags;
786 int ret;
788 DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
790 if (!storvsc_drv_obj->base.dev_rm)
791 return -1;
794 * Call to the vsc driver to let it know that the device is being
795 * removed
797 ret = storvsc_drv_obj->base.dev_rm(device_obj);
798 if (ret != 0) {
799 /* TODO: */
800 DPRINT_ERR(BLKVSC_DRV,
801 "unable to remove blkvsc device (ret %d)", ret);
804 /* Get to a known state */
805 spin_lock_irqsave(&blkdev->lock, flags);
807 blkdev->shutting_down = 1;
809 blk_stop_queue(blkdev->gd->queue);
811 spin_unlock_irqrestore(&blkdev->lock, flags);
813 while (blkdev->num_outstanding_reqs) {
814 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
815 blkdev->num_outstanding_reqs);
816 udelay(100);
819 blkvsc_do_flush(blkdev);
821 spin_lock_irqsave(&blkdev->lock, flags);
823 blkvsc_cancel_pending_reqs(blkdev);
825 spin_unlock_irqrestore(&blkdev->lock, flags);
827 blk_cleanup_queue(blkdev->gd->queue);
829 del_gendisk(blkdev->gd);
831 kmem_cache_destroy(blkdev->request_pool);
833 kfree(blkdev);
835 return ret;
838 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
840 /* ASSERT(blkvsc_req->req); */
841 /* ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8)); */
843 blkvsc_req->cmd_len = 16;
845 if (blkvsc_req->sector_start > 0xffffffff) {
846 if (rq_data_dir(blkvsc_req->req)) {
847 blkvsc_req->write = 1;
848 blkvsc_req->cmnd[0] = WRITE_16;
849 } else {
850 blkvsc_req->write = 0;
851 blkvsc_req->cmnd[0] = READ_16;
854 blkvsc_req->cmnd[1] |=
855 (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
857 *(unsigned long long *)&blkvsc_req->cmnd[2] =
858 cpu_to_be64(blkvsc_req->sector_start);
859 *(unsigned int *)&blkvsc_req->cmnd[10] =
860 cpu_to_be32(blkvsc_req->sector_count);
861 } else if ((blkvsc_req->sector_count > 0xff) ||
862 (blkvsc_req->sector_start > 0x1fffff)) {
863 if (rq_data_dir(blkvsc_req->req)) {
864 blkvsc_req->write = 1;
865 blkvsc_req->cmnd[0] = WRITE_10;
866 } else {
867 blkvsc_req->write = 0;
868 blkvsc_req->cmnd[0] = READ_10;
871 blkvsc_req->cmnd[1] |=
872 (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
874 *(unsigned int *)&blkvsc_req->cmnd[2] =
875 cpu_to_be32(blkvsc_req->sector_start);
876 *(unsigned short *)&blkvsc_req->cmnd[7] =
877 cpu_to_be16(blkvsc_req->sector_count);
878 } else {
879 if (rq_data_dir(blkvsc_req->req)) {
880 blkvsc_req->write = 1;
881 blkvsc_req->cmnd[0] = WRITE_6;
882 } else {
883 blkvsc_req->write = 0;
884 blkvsc_req->cmnd[0] = READ_6;
887 *(unsigned int *)&blkvsc_req->cmnd[1] =
888 cpu_to_be32(blkvsc_req->sector_start) >> 8;
889 blkvsc_req->cmnd[1] &= 0x1f;
890 blkvsc_req->cmnd[4] = (unsigned char)blkvsc_req->sector_count;
894 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
895 void (*request_completion)(struct hv_storvsc_request *))
897 struct block_device_context *blkdev = blkvsc_req->dev;
898 struct hv_device *device_ctx = blkdev->device_ctx;
899 struct hv_driver *drv =
900 drv_to_hv_drv(device_ctx->device.driver);
901 struct storvsc_driver_object *storvsc_drv_obj =
902 drv->priv;
903 struct hv_storvsc_request *storvsc_req;
904 struct vmscsi_request *vm_srb;
905 int ret;
907 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
908 "req %p type %s start_sector %lu count %ld offset %d "
909 "len %d\n", blkvsc_req,
910 (blkvsc_req->write) ? "WRITE" : "READ",
911 (unsigned long) blkvsc_req->sector_start,
912 blkvsc_req->sector_count,
913 blkvsc_req->request.data_buffer.offset,
914 blkvsc_req->request.data_buffer.len);
915 #if 0
916 for (i = 0; i < (blkvsc_req->request.data_buffer.len >> 12); i++) {
917 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
918 "req %p pfn[%d] %llx\n",
919 blkvsc_req, i,
920 blkvsc_req->request.data_buffer.pfn_array[i]);
922 #endif
924 storvsc_req = &blkvsc_req->request;
925 vm_srb = &storvsc_req->vstor_packet.vm_srb;
927 vm_srb->data_in = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
929 storvsc_req->on_io_completion = request_completion;
930 storvsc_req->context = blkvsc_req;
932 vm_srb->port_number = blkdev->port;
933 vm_srb->path_id = blkdev->path;
934 vm_srb->target_id = blkdev->target;
935 vm_srb->lun = 0; /* this is not really used at all */
937 vm_srb->cdb_length = blkvsc_req->cmd_len;
939 memcpy(vm_srb->cdb, blkvsc_req->cmnd, vm_srb->cdb_length);
941 storvsc_req->sense_buffer = blkvsc_req->sense_buffer;
943 ret = storvsc_drv_obj->on_io_request(blkdev->device_ctx,
944 &blkvsc_req->request);
945 if (ret == 0)
946 blkdev->num_outstanding_reqs++;
948 return ret;
952 * We break the request into 1 or more blkvsc_requests and submit
953 * them. If we can't submit them all, we put them on the
954 * pending_list. The blkvsc_request() will work on the pending_list.
956 static int blkvsc_do_request(struct block_device_context *blkdev,
957 struct request *req)
959 struct bio *bio = NULL;
960 struct bio_vec *bvec = NULL;
961 struct bio_vec *prev_bvec = NULL;
962 struct blkvsc_request *blkvsc_req = NULL;
963 struct blkvsc_request *tmp;
964 int databuf_idx = 0;
965 int seg_idx = 0;
966 sector_t start_sector;
967 unsigned long num_sectors = 0;
968 int ret = 0;
969 int pending = 0;
970 struct blkvsc_request_group *group = NULL;
972 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu\n", blkdev, req,
973 (unsigned long)blk_rq_pos(req));
975 /* Create a group to tie req to list of blkvsc_reqs */
976 group = kmem_cache_zalloc(blkdev->request_pool, GFP_ATOMIC);
977 if (!group)
978 return -ENOMEM;
980 INIT_LIST_HEAD(&group->blkvsc_req_list);
981 group->outstanding = group->status = 0;
983 start_sector = blk_rq_pos(req);
985 /* foreach bio in the request */
986 if (req->bio) {
987 for (bio = req->bio; bio; bio = bio->bi_next) {
989 * Map this bio into an existing or new storvsc request
991 bio_for_each_segment(bvec, bio, seg_idx) {
992 DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() "
993 "- req %p bio %p bvec %p seg_idx %d "
994 "databuf_idx %d\n", req, bio, bvec,
995 seg_idx, databuf_idx);
997 /* Get a new storvsc request */
998 /* 1st-time */
999 if ((!blkvsc_req) ||
1000 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT)
1001 /* hole at the begin of page */
1002 || (bvec->bv_offset != 0) ||
1003 /* hold at the end of page */
1004 (prev_bvec &&
1005 (prev_bvec->bv_len != PAGE_SIZE))) {
1006 /* submit the prev one */
1007 if (blkvsc_req) {
1008 blkvsc_req->sector_start = start_sector;
1009 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1011 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1012 blkvsc_init_rw(blkvsc_req);
1016 * Create new blkvsc_req to represent
1017 * the current bvec
1019 blkvsc_req =
1020 kmem_cache_zalloc(
1021 blkdev->request_pool, GFP_ATOMIC);
1022 if (!blkvsc_req) {
1023 /* free up everything */
1024 list_for_each_entry_safe(
1025 blkvsc_req, tmp,
1026 &group->blkvsc_req_list,
1027 req_entry) {
1028 list_del(&blkvsc_req->req_entry);
1029 kmem_cache_free(blkdev->request_pool, blkvsc_req);
1032 kmem_cache_free(blkdev->request_pool, group);
1033 return -ENOMEM;
1036 memset(blkvsc_req, 0,
1037 sizeof(struct blkvsc_request));
1039 blkvsc_req->dev = blkdev;
1040 blkvsc_req->req = req;
1041 blkvsc_req->request.
1042 data_buffer.offset
1043 = bvec->bv_offset;
1044 blkvsc_req->request.
1045 data_buffer.len = 0;
1047 /* Add to the group */
1048 blkvsc_req->group = group;
1049 blkvsc_req->group->outstanding++;
1050 list_add_tail(&blkvsc_req->req_entry,
1051 &blkvsc_req->group->blkvsc_req_list);
1053 start_sector += num_sectors;
1054 num_sectors = 0;
1055 databuf_idx = 0;
1058 /* Add the curr bvec/segment to the curr blkvsc_req */
1059 blkvsc_req->request.data_buffer.
1060 pfn_array[databuf_idx]
1061 = page_to_pfn(bvec->bv_page);
1062 blkvsc_req->request.data_buffer.len
1063 += bvec->bv_len;
1065 prev_bvec = bvec;
1067 databuf_idx++;
1068 num_sectors += bvec->bv_len >> 9;
1070 } /* bio_for_each_segment */
1072 } /* rq_for_each_bio */
1075 /* Handle the last one */
1076 if (blkvsc_req) {
1077 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n",
1078 blkdev, req, blkvsc_req->group,
1079 blkvsc_req->group->outstanding);
1081 blkvsc_req->sector_start = start_sector;
1082 sector_div(blkvsc_req->sector_start,
1083 (blkdev->sector_size >> 9));
1085 blkvsc_req->sector_count = num_sectors /
1086 (blkdev->sector_size >> 9);
1088 blkvsc_init_rw(blkvsc_req);
1091 list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) {
1092 if (pending) {
1093 DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to "
1094 "pending_list - blkvsc_req %p start_sect %lu"
1095 " sect_count %ld (%lu %ld)\n", blkvsc_req,
1096 (unsigned long)blkvsc_req->sector_start,
1097 blkvsc_req->sector_count,
1098 (unsigned long)start_sector,
1099 (unsigned long)num_sectors);
1101 list_add_tail(&blkvsc_req->pend_entry,
1102 &blkdev->pending_list);
1103 } else {
1104 ret = blkvsc_submit_request(blkvsc_req,
1105 blkvsc_request_completion);
1106 if (ret == -1) {
1107 pending = 1;
1108 list_add_tail(&blkvsc_req->pend_entry,
1109 &blkdev->pending_list);
1112 DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p "
1113 "start_sect %lu sect_count %ld (%lu %ld) "
1114 "ret %d\n", blkvsc_req,
1115 (unsigned long)blkvsc_req->sector_start,
1116 blkvsc_req->sector_count,
1117 (unsigned long)start_sector,
1118 num_sectors, ret);
1122 return pending;
1125 static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
1127 struct blkvsc_request *blkvsc_req =
1128 (struct blkvsc_request *)request->context;
1129 struct block_device_context *blkdev =
1130 (struct block_device_context *)blkvsc_req->dev;
1131 struct scsi_sense_hdr sense_hdr;
1132 struct vmscsi_request *vm_srb;
1134 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n",
1135 blkvsc_req);
1137 vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
1138 blkdev->num_outstanding_reqs--;
1140 if (vm_srb->scsi_status)
1141 if (scsi_normalize_sense(blkvsc_req->sense_buffer,
1142 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1143 scsi_print_sense_hdr("blkvsc", &sense_hdr);
1145 complete(&blkvsc_req->request.wait_event);
1148 static void blkvsc_request_completion(struct hv_storvsc_request *request)
1150 struct blkvsc_request *blkvsc_req =
1151 (struct blkvsc_request *)request->context;
1152 struct block_device_context *blkdev =
1153 (struct block_device_context *)blkvsc_req->dev;
1154 unsigned long flags;
1155 struct blkvsc_request *comp_req, *tmp;
1156 struct vmscsi_request *vm_srb;
1158 /* ASSERT(blkvsc_req->group); */
1160 DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s "
1161 "sect_start %lu sect_count %ld len %d group outstd %d "
1162 "total outstd %d\n",
1163 blkdev, blkvsc_req, blkvsc_req->group,
1164 (blkvsc_req->write) ? "WRITE" : "READ",
1165 (unsigned long)blkvsc_req->sector_start,
1166 blkvsc_req->sector_count,
1167 blkvsc_req->request.data_buffer.len,
1168 blkvsc_req->group->outstanding,
1169 blkdev->num_outstanding_reqs);
1171 spin_lock_irqsave(&blkdev->lock, flags);
1173 blkdev->num_outstanding_reqs--;
1174 blkvsc_req->group->outstanding--;
1177 * Only start processing when all the blkvsc_reqs are
1178 * completed. This guarantees no out-of-order blkvsc_req
1179 * completion when calling end_that_request_first()
1181 if (blkvsc_req->group->outstanding == 0) {
1182 list_for_each_entry_safe(comp_req, tmp,
1183 &blkvsc_req->group->blkvsc_req_list,
1184 req_entry) {
1185 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
1186 "sect_start %lu sect_count %ld\n",
1187 comp_req,
1188 (unsigned long)comp_req->sector_start,
1189 comp_req->sector_count);
1191 list_del(&comp_req->req_entry);
1193 vm_srb =
1194 &comp_req->request.vstor_packet.vm_srb;
1195 if (!__blk_end_request(comp_req->req,
1196 (!vm_srb->scsi_status ? 0 : -EIO),
1197 comp_req->sector_count * blkdev->sector_size)) {
1199 * All the sectors have been xferred ie the
1200 * request is done
1202 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n",
1203 comp_req->req);
1204 kmem_cache_free(blkdev->request_pool,
1205 comp_req->group);
1208 kmem_cache_free(blkdev->request_pool, comp_req);
1211 if (!blkdev->shutting_down) {
1212 blkvsc_do_pending_reqs(blkdev);
1213 blk_start_queue(blkdev->gd->queue);
1214 blkvsc_request(blkdev->gd->queue);
1218 spin_unlock_irqrestore(&blkdev->lock, flags);
1221 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1223 struct blkvsc_request *pend_req, *tmp;
1224 struct blkvsc_request *comp_req, *tmp2;
1225 struct vmscsi_request *vm_srb;
1227 int ret = 0;
1229 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1231 /* Flush the pending list first */
1232 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1233 pend_entry) {
1235 * The pend_req could be part of a partially completed
1236 * request. If so, complete those req first until we
1237 * hit the pend_req
1239 list_for_each_entry_safe(comp_req, tmp2,
1240 &pend_req->group->blkvsc_req_list,
1241 req_entry) {
1242 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
1243 "sect_start %lu sect_count %ld\n",
1244 comp_req,
1245 (unsigned long) comp_req->sector_start,
1246 comp_req->sector_count);
1248 if (comp_req == pend_req)
1249 break;
1251 list_del(&comp_req->req_entry);
1253 if (comp_req->req) {
1254 vm_srb =
1255 &comp_req->request.vstor_packet.
1256 vm_srb;
1257 ret = __blk_end_request(comp_req->req,
1258 (!vm_srb->scsi_status ? 0 : -EIO),
1259 comp_req->sector_count *
1260 blkdev->sector_size);
1262 /* FIXME: shouldn't this do more than return? */
1263 if (ret)
1264 goto out;
1267 kmem_cache_free(blkdev->request_pool, comp_req);
1270 DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n",
1271 pend_req);
1273 list_del(&pend_req->pend_entry);
1275 list_del(&pend_req->req_entry);
1277 if (comp_req->req) {
1278 if (!__blk_end_request(pend_req->req, -EIO,
1279 pend_req->sector_count *
1280 blkdev->sector_size)) {
1282 * All the sectors have been xferred ie the
1283 * request is done
1285 DPRINT_DBG(BLKVSC_DRV,
1286 "blkvsc_cancel_pending_reqs() - "
1287 "req %p COMPLETED\n", pend_req->req);
1288 kmem_cache_free(blkdev->request_pool,
1289 pend_req->group);
1293 kmem_cache_free(blkdev->request_pool, pend_req);
1296 out:
1297 return ret;
1300 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1302 struct blkvsc_request *pend_req, *tmp;
1303 int ret = 0;
1305 /* Flush the pending list first */
1306 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1307 pend_entry) {
1308 DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n",
1309 pend_req);
1311 ret = blkvsc_submit_request(pend_req,
1312 blkvsc_request_completion);
1313 if (ret != 0)
1314 break;
1315 else
1316 list_del(&pend_req->pend_entry);
1319 return ret;
1322 static void blkvsc_request(struct request_queue *queue)
1324 struct block_device_context *blkdev = NULL;
1325 struct request *req;
1326 int ret = 0;
1328 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1329 while ((req = blk_peek_request(queue)) != NULL) {
1330 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1332 blkdev = req->rq_disk->private_data;
1333 if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS ||
1334 blkdev->media_not_present) {
1335 __blk_end_request_cur(req, 0);
1336 continue;
1339 ret = blkvsc_do_pending_reqs(blkdev);
1341 if (ret != 0) {
1342 DPRINT_DBG(BLKVSC_DRV,
1343 "- stop queue - pending_list not empty\n");
1344 blk_stop_queue(queue);
1345 break;
1348 blk_start_request(req);
1350 ret = blkvsc_do_request(blkdev, req);
1351 if (ret > 0) {
1352 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1353 blk_stop_queue(queue);
1354 break;
1355 } else if (ret < 0) {
1356 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1357 blk_requeue_request(queue, req);
1358 blk_stop_queue(queue);
1359 break;
1364 static int blkvsc_open(struct block_device *bdev, fmode_t mode)
1366 struct block_device_context *blkdev = bdev->bd_disk->private_data;
1368 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1369 blkdev->gd->disk_name);
1371 mutex_lock(&blkvsc_mutex);
1372 spin_lock(&blkdev->lock);
1374 if (!blkdev->users && blkdev->device_type == DVD_TYPE) {
1375 spin_unlock(&blkdev->lock);
1376 check_disk_change(bdev);
1377 spin_lock(&blkdev->lock);
1380 blkdev->users++;
1382 spin_unlock(&blkdev->lock);
1383 mutex_unlock(&blkvsc_mutex);
1384 return 0;
1387 static int blkvsc_release(struct gendisk *disk, fmode_t mode)
1389 struct block_device_context *blkdev = disk->private_data;
1391 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1392 blkdev->gd->disk_name);
1394 mutex_lock(&blkvsc_mutex);
1395 spin_lock(&blkdev->lock);
1396 if (blkdev->users == 1) {
1397 spin_unlock(&blkdev->lock);
1398 blkvsc_do_flush(blkdev);
1399 spin_lock(&blkdev->lock);
1402 blkdev->users--;
1404 spin_unlock(&blkdev->lock);
1405 mutex_unlock(&blkvsc_mutex);
1406 return 0;
1409 static unsigned int blkvsc_check_events(struct gendisk *gd,
1410 unsigned int clearing)
1412 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1413 return DISK_EVENT_MEDIA_CHANGE;
1416 static int blkvsc_revalidate_disk(struct gendisk *gd)
1418 struct block_device_context *blkdev = gd->private_data;
1420 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1422 if (blkdev->device_type == DVD_TYPE) {
1423 blkvsc_do_read_capacity(blkdev);
1424 set_capacity(blkdev->gd, blkdev->capacity *
1425 (blkdev->sector_size/512));
1426 blk_queue_logical_block_size(gd->queue, blkdev->sector_size);
1428 return 0;
1431 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1433 sector_t total_sectors = get_capacity(bd->bd_disk);
1434 sector_t cylinder_times_heads = 0;
1435 sector_t temp = 0;
1437 int sectors_per_track = 0;
1438 int heads = 0;
1439 int cylinders = 0;
1440 int rem = 0;
1442 if (total_sectors > (65535 * 16 * 255))
1443 total_sectors = (65535 * 16 * 255);
1445 if (total_sectors >= (65535 * 16 * 63)) {
1446 sectors_per_track = 255;
1447 heads = 16;
1449 cylinder_times_heads = total_sectors;
1450 /* sector_div stores the quotient in cylinder_times_heads */
1451 rem = sector_div(cylinder_times_heads, sectors_per_track);
1452 } else {
1453 sectors_per_track = 17;
1455 cylinder_times_heads = total_sectors;
1456 /* sector_div stores the quotient in cylinder_times_heads */
1457 rem = sector_div(cylinder_times_heads, sectors_per_track);
1459 temp = cylinder_times_heads + 1023;
1460 /* sector_div stores the quotient in temp */
1461 rem = sector_div(temp, 1024);
1463 heads = temp;
1465 if (heads < 4)
1466 heads = 4;
1469 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1470 sectors_per_track = 31;
1471 heads = 16;
1473 cylinder_times_heads = total_sectors;
1475 * sector_div stores the quotient in
1476 * cylinder_times_heads
1478 rem = sector_div(cylinder_times_heads,
1479 sectors_per_track);
1482 if (cylinder_times_heads >= (heads * 1024)) {
1483 sectors_per_track = 63;
1484 heads = 16;
1486 cylinder_times_heads = total_sectors;
1488 * sector_div stores the quotient in
1489 * cylinder_times_heads
1491 rem = sector_div(cylinder_times_heads,
1492 sectors_per_track);
1496 temp = cylinder_times_heads;
1497 /* sector_div stores the quotient in temp */
1498 rem = sector_div(temp, heads);
1499 cylinders = temp;
1501 hg->heads = heads;
1502 hg->sectors = sectors_per_track;
1503 hg->cylinders = cylinders;
1505 DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads,
1506 sectors_per_track);
1508 return 0;
1511 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
1512 unsigned cmd, unsigned long argument)
1514 /* struct block_device_context *blkdev = bd->bd_disk->private_data; */
1515 int ret;
1517 switch (cmd) {
1519 * TODO: I think there is certain format for HDIO_GET_IDENTITY rather
1520 * than just a GUID. Commented it out for now.
1522 #if 0
1523 case HDIO_GET_IDENTITY:
1524 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1525 if (copy_to_user((void __user *)arg, blkdev->device_id,
1526 blkdev->device_id_len))
1527 ret = -EFAULT;
1528 break;
1529 #endif
1530 default:
1531 ret = -EINVAL;
1532 break;
1535 return ret;
1538 static int __init blkvsc_init(void)
1540 int ret;
1542 BUILD_BUG_ON(sizeof(sector_t) != 8);
1544 DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1546 ret = blkvsc_drv_init();
1548 return ret;
1551 static void __exit blkvsc_exit(void)
1553 blkvsc_drv_exit();
1556 MODULE_LICENSE("GPL");
1557 MODULE_VERSION(HV_DRV_VERSION);
1558 MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1559 module_init(blkvsc_init);
1560 module_exit(blkvsc_exit);