staging: hv: Fixed lockup problem with bounce_buffer scatter list
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / hv / storvsc_drv.c
blob2a4b147b0b388aa1b718eb2fa1a471bfca64e0f7
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/blkdev.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_eh.h>
31 #include <scsi/scsi_devinfo.h>
32 #include <scsi/scsi_dbg.h>
33 #include "osd.h"
34 #include "logging.h"
35 #include "vmbus.h"
36 #include "StorVscApi.h"
39 struct host_device_context {
40 /* must be 1st field
41 * FIXME this is a bug */
42 struct work_struct host_rescan_work;
44 /* point back to our device context */
45 struct device_context *device_ctx;
46 struct kmem_cache *request_pool;
47 unsigned int port;
48 unsigned char path;
49 unsigned char target;
52 struct storvsc_cmd_request {
53 struct list_head entry;
54 struct scsi_cmnd *cmd;
56 unsigned int bounce_sgl_count;
57 struct scatterlist *bounce_sgl;
59 struct hv_storvsc_request request;
60 /* !!!DO NOT ADD ANYTHING BELOW HERE!!! */
61 /* The extension buffer falls right here and is pointed to by
62 * request.Extension;
63 * Which sounds like a very bad design... */
66 struct storvsc_driver_context {
67 /* !! These must be the first 2 fields !! */
68 /* FIXME this is a bug... */
69 struct driver_context drv_ctx;
70 struct storvsc_driver_object drv_obj;
73 /* Static decl */
74 static int storvsc_probe(struct device *dev);
75 static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
76 void (*done)(struct scsi_cmnd *));
77 static int storvsc_device_alloc(struct scsi_device *);
78 static int storvsc_device_configure(struct scsi_device *);
79 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd);
80 static void storvsc_host_rescan_callback(struct work_struct *work);
81 static void storvsc_host_rescan(struct hv_device *device_obj);
82 static int storvsc_remove(struct device *dev);
84 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
85 unsigned int sg_count,
86 unsigned int len);
87 static void destroy_bounce_buffer(struct scatterlist *sgl,
88 unsigned int sg_count);
89 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count);
90 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
91 struct scatterlist *bounce_sgl,
92 unsigned int orig_sgl_count);
93 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
94 struct scatterlist *bounce_sgl,
95 unsigned int orig_sgl_count);
97 static int storvsc_report_luns(struct scsi_device *sdev, unsigned int luns[],
98 unsigned int *lun_count);
99 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device *bdev,
100 sector_t capacity, int *info);
103 static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
105 /* The one and only one */
106 static struct storvsc_driver_context g_storvsc_drv;
108 /* Scsi driver */
109 static struct scsi_host_template scsi_driver = {
110 .module = THIS_MODULE,
111 .name = "storvsc_host_t",
112 .bios_param = storvsc_get_chs,
113 .queuecommand = storvsc_queuecommand,
114 .eh_host_reset_handler = storvsc_host_reset_handler,
115 .slave_alloc = storvsc_device_alloc,
116 .slave_configure = storvsc_device_configure,
117 .cmd_per_lun = 1,
118 /* 64 max_queue * 1 target */
119 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
120 .this_id = -1,
121 /* no use setting to 0 since ll_blk_rw reset it to 1 */
122 /* currently 32 */
123 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
125 * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
126 * into 1 sg element. If set, we must limit the max_segment_size to
127 * PAGE_SIZE, otherwise we may get 1 sg element that represents
128 * multiple
130 /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
131 .use_clustering = ENABLE_CLUSTERING,
132 /* Make sure we dont get a sg segment crosses a page boundary */
133 .dma_boundary = PAGE_SIZE-1,
138 * storvsc_drv_init - StorVsc driver initialization.
140 static int storvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
142 int ret;
143 struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv.drv_obj;
144 struct driver_context *drv_ctx = &g_storvsc_drv.drv_ctx;
146 DPRINT_ENTER(STORVSC_DRV);
148 vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
150 storvsc_drv_obj->RingBufferSize = storvsc_ringbuffer_size;
151 storvsc_drv_obj->OnHostRescan = storvsc_host_rescan;
153 /* Callback to client driver to complete the initialization */
154 drv_init(&storvsc_drv_obj->Base);
156 DPRINT_INFO(STORVSC_DRV,
157 "request extension size %u, max outstanding reqs %u",
158 storvsc_drv_obj->RequestExtSize,
159 storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
161 if (storvsc_drv_obj->MaxOutstandingRequestsPerChannel <
162 STORVSC_MAX_IO_REQUESTS) {
163 DPRINT_ERR(STORVSC_DRV,
164 "The number of outstanding io requests (%d) "
165 "is larger than that supported (%d) internally.",
166 STORVSC_MAX_IO_REQUESTS,
167 storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
168 return -1;
171 drv_ctx->driver.name = storvsc_drv_obj->Base.name;
172 memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
173 sizeof(struct hv_guid));
175 drv_ctx->probe = storvsc_probe;
176 drv_ctx->remove = storvsc_remove;
178 /* The driver belongs to vmbus */
179 ret = vmbus_child_driver_register(drv_ctx);
181 DPRINT_EXIT(STORVSC_DRV);
183 return ret;
186 static int storvsc_drv_exit_cb(struct device *dev, void *data)
188 struct device **curr = (struct device **)data;
189 *curr = dev;
190 return 1; /* stop iterating */
193 static void storvsc_drv_exit(void)
195 struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv.drv_obj;
196 struct driver_context *drv_ctx = &g_storvsc_drv.drv_ctx;
197 struct device *current_dev = NULL;
198 int ret;
200 DPRINT_ENTER(STORVSC_DRV);
202 while (1) {
203 current_dev = NULL;
205 /* Get the device */
206 ret = driver_for_each_device(&drv_ctx->driver, NULL,
207 (void *) &current_dev,
208 storvsc_drv_exit_cb);
210 if (ret)
211 DPRINT_WARN(STORVSC_DRV,
212 "driver_for_each_device returned %d", ret);
214 if (current_dev == NULL)
215 break;
217 /* Initiate removal from the top-down */
218 device_unregister(current_dev);
221 if (storvsc_drv_obj->Base.OnCleanup)
222 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
224 vmbus_child_driver_unregister(drv_ctx);
226 DPRINT_EXIT(STORVSC_DRV);
228 return;
232 * storvsc_probe - Add a new device for this driver
234 static int storvsc_probe(struct device *device)
236 int ret;
237 struct driver_context *driver_ctx =
238 driver_to_driver_context(device->driver);
239 struct storvsc_driver_context *storvsc_drv_ctx =
240 (struct storvsc_driver_context *)driver_ctx;
241 struct storvsc_driver_object *storvsc_drv_obj =
242 &storvsc_drv_ctx->drv_obj;
243 struct device_context *device_ctx = device_to_device_context(device);
244 struct hv_device *device_obj = &device_ctx->device_obj;
245 struct Scsi_Host *host;
246 struct host_device_context *host_device_ctx;
247 struct storvsc_device_info device_info;
249 DPRINT_ENTER(STORVSC_DRV);
251 if (!storvsc_drv_obj->Base.OnDeviceAdd)
252 return -1;
254 host = scsi_host_alloc(&scsi_driver,
255 sizeof(struct host_device_context));
256 if (!host) {
257 DPRINT_ERR(STORVSC_DRV, "unable to allocate scsi host object");
258 return -ENOMEM;
261 dev_set_drvdata(device, host);
263 host_device_ctx = (struct host_device_context *)host->hostdata;
264 memset(host_device_ctx, 0, sizeof(struct host_device_context));
266 host_device_ctx->port = host->host_no;
267 host_device_ctx->device_ctx = device_ctx;
269 INIT_WORK(&host_device_ctx->host_rescan_work,
270 storvsc_host_rescan_callback);
272 host_device_ctx->request_pool =
273 kmem_cache_create(dev_name(&device_ctx->device),
274 sizeof(struct storvsc_cmd_request) +
275 storvsc_drv_obj->RequestExtSize, 0,
276 SLAB_HWCACHE_ALIGN, NULL);
278 if (!host_device_ctx->request_pool) {
279 scsi_host_put(host);
280 DPRINT_EXIT(STORVSC_DRV);
282 return -ENOMEM;
285 device_info.PortNumber = host->host_no;
286 /* Call to the vsc driver to add the device */
287 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj,
288 (void *)&device_info);
289 if (ret != 0) {
290 DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
291 kmem_cache_destroy(host_device_ctx->request_pool);
292 scsi_host_put(host);
293 DPRINT_EXIT(STORVSC_DRV);
295 return -1;
298 /* host_device_ctx->port = device_info.PortNumber; */
299 host_device_ctx->path = device_info.PathId;
300 host_device_ctx->target = device_info.TargetId;
302 /* max # of devices per target */
303 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
304 /* max # of targets per channel */
305 host->max_id = STORVSC_MAX_TARGETS;
306 /* max # of channels */
307 host->max_channel = STORVSC_MAX_CHANNELS - 1;
309 /* Register the HBA and start the scsi bus scan */
310 ret = scsi_add_host(host, device);
311 if (ret != 0) {
312 DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device");
314 storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
316 kmem_cache_destroy(host_device_ctx->request_pool);
317 scsi_host_put(host);
318 DPRINT_EXIT(STORVSC_DRV);
320 return -1;
323 scsi_scan_host(host);
325 DPRINT_EXIT(STORVSC_DRV);
327 return ret;
331 * storvsc_remove - Callback when our device is removed
333 static int storvsc_remove(struct device *device)
335 int ret;
336 struct driver_context *driver_ctx =
337 driver_to_driver_context(device->driver);
338 struct storvsc_driver_context *storvsc_drv_ctx =
339 (struct storvsc_driver_context *)driver_ctx;
340 struct storvsc_driver_object *storvsc_drv_obj =
341 &storvsc_drv_ctx->drv_obj;
342 struct device_context *device_ctx = device_to_device_context(device);
343 struct hv_device *device_obj = &device_ctx->device_obj;
344 struct Scsi_Host *host = dev_get_drvdata(device);
345 struct host_device_context *host_device_ctx =
346 (struct host_device_context *)host->hostdata;
349 DPRINT_ENTER(STORVSC_DRV);
351 if (!storvsc_drv_obj->Base.OnDeviceRemove) {
352 DPRINT_EXIT(STORVSC_DRV);
353 return -1;
357 * Call to the vsc driver to let it know that the device is being
358 * removed
360 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
361 if (ret != 0) {
362 /* TODO: */
363 DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)",
364 ret);
367 if (host_device_ctx->request_pool) {
368 kmem_cache_destroy(host_device_ctx->request_pool);
369 host_device_ctx->request_pool = NULL;
372 DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
373 scsi_remove_host(host);
375 DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
376 scsi_host_put(host);
378 DPRINT_EXIT(STORVSC_DRV);
380 return ret;
384 * storvsc_commmand_completion - Command completion processing
386 static void storvsc_commmand_completion(struct hv_storvsc_request *request)
388 struct storvsc_cmd_request *cmd_request =
389 (struct storvsc_cmd_request *)request->Context;
390 struct scsi_cmnd *scmnd = cmd_request->cmd;
391 struct host_device_context *host_device_ctx =
392 (struct host_device_context *)scmnd->device->host->hostdata;
393 void (*scsi_done_fn)(struct scsi_cmnd *);
394 struct scsi_sense_hdr sense_hdr;
396 ASSERT(request == &cmd_request->request);
397 ASSERT((unsigned long)scmnd->host_scribble ==
398 (unsigned long)cmd_request);
399 ASSERT(scmnd);
400 ASSERT(scmnd->scsi_done);
402 DPRINT_ENTER(STORVSC_DRV);
404 if (cmd_request->bounce_sgl_count) {
405 /* using bounce buffer */
406 /* printk("copy_from_bounce_buffer\n"); */
408 /* FIXME: We can optimize on writes by just skipping this */
409 copy_from_bounce_buffer(scsi_sglist(scmnd),
410 cmd_request->bounce_sgl,
411 scsi_sg_count(scmnd));
412 destroy_bounce_buffer(cmd_request->bounce_sgl,
413 cmd_request->bounce_sgl_count);
416 scmnd->result = request->Status;
418 if (scmnd->result) {
419 if (scsi_normalize_sense(scmnd->sense_buffer,
420 request->SenseBufferSize, &sense_hdr))
421 scsi_print_sense_hdr("storvsc", &sense_hdr);
424 ASSERT(request->BytesXfer <= request->DataBuffer.Length);
425 scsi_set_resid(scmnd, request->DataBuffer.Length - request->BytesXfer);
427 scsi_done_fn = scmnd->scsi_done;
429 scmnd->host_scribble = NULL;
430 scmnd->scsi_done = NULL;
432 /* !!DO NOT MODIFY the scmnd after this call */
433 scsi_done_fn(scmnd);
435 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
437 DPRINT_EXIT(STORVSC_DRV);
440 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
442 int i;
444 /* No need to check */
445 if (sg_count < 2)
446 return -1;
448 /* We have at least 2 sg entries */
449 for (i = 0; i < sg_count; i++) {
450 if (i == 0) {
451 /* make sure 1st one does not have hole */
452 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
453 return i;
454 } else if (i == sg_count - 1) {
455 /* make sure last one does not have hole */
456 if (sgl[i].offset != 0)
457 return i;
458 } else {
459 /* make sure no hole in the middle */
460 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
461 return i;
464 return -1;
467 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
468 unsigned int sg_count,
469 unsigned int len)
471 int i;
472 int num_pages;
473 struct scatterlist *bounce_sgl;
474 struct page *page_buf;
476 num_pages = ALIGN_UP(len, PAGE_SIZE) >> PAGE_SHIFT;
478 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
479 if (!bounce_sgl)
480 return NULL;
482 for (i = 0; i < num_pages; i++) {
483 page_buf = alloc_page(GFP_ATOMIC);
484 if (!page_buf)
485 goto cleanup;
486 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
489 return bounce_sgl;
491 cleanup:
492 destroy_bounce_buffer(bounce_sgl, num_pages);
493 return NULL;
496 static void destroy_bounce_buffer(struct scatterlist *sgl,
497 unsigned int sg_count)
499 int i;
500 struct page *page_buf;
502 for (i = 0; i < sg_count; i++) {
503 page_buf = sg_page((&sgl[i]));
504 if (page_buf != NULL)
505 __free_page(page_buf);
508 kfree(sgl);
511 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
512 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
513 struct scatterlist *bounce_sgl,
514 unsigned int orig_sgl_count)
516 int i;
517 int j = 0;
518 unsigned long src, dest;
519 unsigned int srclen, destlen, copylen;
520 unsigned int total_copied = 0;
521 unsigned long bounce_addr = 0;
522 unsigned long src_addr = 0;
523 unsigned long flags;
525 local_irq_save(flags);
527 for (i = 0; i < orig_sgl_count; i++) {
528 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
529 KM_IRQ0) + orig_sgl[i].offset;
530 src = src_addr;
531 srclen = orig_sgl[i].length;
533 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
535 if (bounce_addr == 0)
536 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
538 while (srclen) {
539 /* assume bounce offset always == 0 */
540 dest = bounce_addr + bounce_sgl[j].length;
541 destlen = PAGE_SIZE - bounce_sgl[j].length;
543 copylen = min(srclen, destlen);
544 memcpy((void *)dest, (void *)src, copylen);
546 total_copied += copylen;
547 bounce_sgl[j].length += copylen;
548 srclen -= copylen;
549 src += copylen;
551 if (bounce_sgl[j].length == PAGE_SIZE) {
552 /* full..move to next entry */
553 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
554 j++;
556 /* if we need to use another bounce buffer */
557 if (srclen || i != orig_sgl_count - 1)
558 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
559 } else if (srclen == 0 && i == orig_sgl_count - 1) {
560 /* unmap the last bounce that is < PAGE_SIZE */
561 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
565 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
568 local_irq_restore(flags);
570 return total_copied;
573 /* Assume the original sgl has enough room */
574 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
575 struct scatterlist *bounce_sgl,
576 unsigned int orig_sgl_count)
578 int i;
579 int j = 0;
580 unsigned long src, dest;
581 unsigned int srclen, destlen, copylen;
582 unsigned int total_copied = 0;
583 unsigned long bounce_addr = 0;
584 unsigned long dest_addr = 0;
585 unsigned long flags;
587 local_irq_save(flags);
589 for (i = 0; i < orig_sgl_count; i++) {
590 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
591 KM_IRQ0) + orig_sgl[i].offset;
592 dest = dest_addr;
593 destlen = orig_sgl[i].length;
594 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
596 if (bounce_addr == 0)
597 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
599 while (destlen) {
600 src = bounce_addr + bounce_sgl[j].offset;
601 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
603 copylen = min(srclen, destlen);
604 memcpy((void *)dest, (void *)src, copylen);
606 total_copied += copylen;
607 bounce_sgl[j].offset += copylen;
608 destlen -= copylen;
609 dest += copylen;
611 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
612 /* full */
613 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
614 j++;
616 /* if we need to use another bounce buffer */
617 if (destlen || i != orig_sgl_count - 1)
618 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
619 } else if (destlen == 0 && i == orig_sgl_count - 1) {
620 /* unmap the last bounce that is < PAGE_SIZE */
621 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
625 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
626 KM_IRQ0);
629 local_irq_restore(flags);
631 return total_copied;
635 * storvsc_queuecommand - Initiate command processing
637 static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
638 void (*done)(struct scsi_cmnd *))
640 int ret;
641 struct host_device_context *host_device_ctx =
642 (struct host_device_context *)scmnd->device->host->hostdata;
643 struct device_context *device_ctx = host_device_ctx->device_ctx;
644 struct driver_context *driver_ctx =
645 driver_to_driver_context(device_ctx->device.driver);
646 struct storvsc_driver_context *storvsc_drv_ctx =
647 (struct storvsc_driver_context *)driver_ctx;
648 struct storvsc_driver_object *storvsc_drv_obj =
649 &storvsc_drv_ctx->drv_obj;
650 struct hv_storvsc_request *request;
651 struct storvsc_cmd_request *cmd_request;
652 unsigned int request_size = 0;
653 int i;
654 struct scatterlist *sgl;
655 unsigned int sg_count = 0;
657 DPRINT_ENTER(STORVSC_DRV);
659 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
660 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
661 scsi_sg_count(scmnd), scsi_sglist(scmnd),
662 scsi_bufflen(scmnd), scmnd->device->queue_depth,
663 scmnd->device->tagged_supported);
665 /* If retrying, no need to prep the cmd */
666 if (scmnd->host_scribble) {
667 ASSERT(scmnd->scsi_done != NULL);
669 cmd_request =
670 (struct storvsc_cmd_request *)scmnd->host_scribble;
671 DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p",
672 scmnd, cmd_request);
674 goto retry_request;
677 ASSERT(scmnd->scsi_done == NULL);
678 ASSERT(scmnd->host_scribble == NULL);
680 scmnd->scsi_done = done;
682 request_size = sizeof(struct storvsc_cmd_request);
684 cmd_request = kmem_cache_alloc(host_device_ctx->request_pool,
685 GFP_ATOMIC);
686 if (!cmd_request) {
687 DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - unable to allocate "
688 "storvsc_cmd_request...marking queue busy", scmnd);
689 scmnd->scsi_done = NULL;
690 return SCSI_MLQUEUE_DEVICE_BUSY;
693 /* Setup the cmd request */
694 cmd_request->bounce_sgl_count = 0;
695 cmd_request->bounce_sgl = NULL;
696 cmd_request->cmd = scmnd;
698 scmnd->host_scribble = (unsigned char *)cmd_request;
700 request = &cmd_request->request;
702 request->Extension =
703 (void *)((unsigned long)cmd_request + request_size);
704 DPRINT_DBG(STORVSC_DRV, "req %p size %d ext %d", request, request_size,
705 storvsc_drv_obj->RequestExtSize);
707 /* Build the SRB */
708 switch (scmnd->sc_data_direction) {
709 case DMA_TO_DEVICE:
710 request->Type = WRITE_TYPE;
711 break;
712 case DMA_FROM_DEVICE:
713 request->Type = READ_TYPE;
714 break;
715 default:
716 request->Type = UNKNOWN_TYPE;
717 break;
720 request->OnIOCompletion = storvsc_commmand_completion;
721 request->Context = cmd_request;/* scmnd; */
723 /* request->PortId = scmnd->device->channel; */
724 request->Host = host_device_ctx->port;
725 request->Bus = scmnd->device->channel;
726 request->TargetId = scmnd->device->id;
727 request->LunId = scmnd->device->lun;
729 ASSERT(scmnd->cmd_len <= 16);
730 request->CdbLen = scmnd->cmd_len;
731 request->Cdb = scmnd->cmnd;
733 request->SenseBuffer = scmnd->sense_buffer;
734 request->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
737 request->DataBuffer.Length = scsi_bufflen(scmnd);
738 if (scsi_sg_count(scmnd)) {
739 sgl = (struct scatterlist *)scsi_sglist(scmnd);
740 sg_count = scsi_sg_count(scmnd);
742 /* check if we need to bounce the sgl */
743 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
744 DPRINT_INFO(STORVSC_DRV,
745 "need to bounce buffer for this scmnd %p",
746 scmnd);
747 cmd_request->bounce_sgl =
748 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
749 scsi_bufflen(scmnd));
750 if (!cmd_request->bounce_sgl) {
751 DPRINT_ERR(STORVSC_DRV,
752 "unable to create bounce buffer for "
753 "this scmnd %p", scmnd);
755 scmnd->scsi_done = NULL;
756 scmnd->host_scribble = NULL;
757 kmem_cache_free(host_device_ctx->request_pool,
758 cmd_request);
760 return SCSI_MLQUEUE_HOST_BUSY;
763 cmd_request->bounce_sgl_count =
764 ALIGN_UP(scsi_bufflen(scmnd), PAGE_SIZE) >>
765 PAGE_SHIFT;
768 * FIXME: We can optimize on reads by just skipping
769 * this
771 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl,
772 scsi_sg_count(scmnd));
774 sgl = cmd_request->bounce_sgl;
775 sg_count = cmd_request->bounce_sgl_count;
778 request->DataBuffer.Offset = sgl[0].offset;
780 for (i = 0; i < sg_count; i++) {
781 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n",
782 i, sgl[i].length, sgl[i].offset);
783 request->DataBuffer.PfnArray[i] =
784 page_to_pfn(sg_page((&sgl[i])));
786 } else if (scsi_sglist(scmnd)) {
787 ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE);
788 request->DataBuffer.Offset =
789 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
790 request->DataBuffer.PfnArray[0] =
791 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
792 } else {
793 ASSERT(scsi_bufflen(scmnd) == 0);
796 retry_request:
797 /* Invokes the vsc to start an IO */
798 ret = storvsc_drv_obj->OnIORequest(&device_ctx->device_obj,
799 &cmd_request->request);
800 if (ret == -1) {
801 /* no more space */
802 DPRINT_ERR(STORVSC_DRV,
803 "scmnd (%p) - queue FULL...marking queue busy",
804 scmnd);
806 if (cmd_request->bounce_sgl_count) {
808 * FIXME: We can optimize on writes by just skipping
809 * this
811 copy_from_bounce_buffer(scsi_sglist(scmnd),
812 cmd_request->bounce_sgl,
813 scsi_sg_count(scmnd));
814 destroy_bounce_buffer(cmd_request->bounce_sgl,
815 cmd_request->bounce_sgl_count);
818 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
820 scmnd->scsi_done = NULL;
821 scmnd->host_scribble = NULL;
823 ret = SCSI_MLQUEUE_DEVICE_BUSY;
826 DPRINT_EXIT(STORVSC_DRV);
828 return ret;
831 static int storvsc_merge_bvec(struct request_queue *q,
832 struct bvec_merge_data *bmd, struct bio_vec *bvec)
834 /* checking done by caller. */
835 return bvec->bv_len;
839 * storvsc_device_configure - Configure the specified scsi device
841 static int storvsc_device_alloc(struct scsi_device *sdevice)
843 DPRINT_DBG(STORVSC_DRV, "sdev (%p) - setting device flag to %d",
844 sdevice, BLIST_SPARSELUN);
846 * This enables luns to be located sparsely. Otherwise, we may not
847 * discovered them.
849 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
850 return 0;
853 static int storvsc_device_configure(struct scsi_device *sdevice)
855 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - curr queue depth %d", sdevice,
856 sdevice->queue_depth);
858 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting queue depth to %d",
859 sdevice, STORVSC_MAX_IO_REQUESTS);
860 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
861 STORVSC_MAX_IO_REQUESTS);
863 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld",
864 sdevice, PAGE_SIZE);
865 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
867 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine",
868 sdevice);
869 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
871 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
872 /* sdevice->timeout = (2000 * HZ);//(75 * HZ); */
874 return 0;
878 * storvsc_host_reset_handler - Reset the scsi HBA
880 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
882 int ret;
883 struct host_device_context *host_device_ctx =
884 (struct host_device_context *)scmnd->device->host->hostdata;
885 struct device_context *device_ctx = host_device_ctx->device_ctx;
886 struct driver_context *driver_ctx =
887 driver_to_driver_context(device_ctx->device.driver);
888 struct storvsc_driver_context *storvsc_drv_ctx =
889 (struct storvsc_driver_context *)driver_ctx;
891 struct storvsc_driver_object *storvsc_drv_obj =
892 &storvsc_drv_ctx->drv_obj;
894 DPRINT_ENTER(STORVSC_DRV);
896 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...",
897 scmnd->device, &device_ctx->device_obj);
899 /* Invokes the vsc to reset the host/bus */
900 ASSERT(storvsc_drv_obj->OnHostReset);
901 ret = storvsc_drv_obj->OnHostReset(&device_ctx->device_obj);
902 if (ret != 0) {
903 DPRINT_EXIT(STORVSC_DRV);
904 return ret;
907 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted",
908 scmnd->device, &device_ctx->device_obj);
910 DPRINT_EXIT(STORVSC_DRV);
912 return ret;
916 * storvsc_host_rescan - Rescan the scsi HBA
918 static void storvsc_host_rescan_callback(struct work_struct *work)
920 struct hv_device *device_obj =
921 &((struct host_device_context *)work)->device_ctx->device_obj;
922 struct device_context *device_ctx = to_device_context(device_obj);
923 struct Scsi_Host *host = dev_get_drvdata(&device_ctx->device);
924 struct scsi_device *sdev;
925 struct host_device_context *host_device_ctx;
926 struct scsi_device **sdevs_remove_list;
927 unsigned int sdevs_count = 0;
928 unsigned int found;
929 unsigned int i;
930 unsigned int lun_count = 0;
931 unsigned int *lun_list;
933 DPRINT_ENTER(STORVSC_DRV);
935 host_device_ctx = (struct host_device_context *)host->hostdata;
936 lun_list = kcalloc(STORVSC_MAX_LUNS_PER_TARGET, sizeof(unsigned int),
937 GFP_ATOMIC);
938 if (!lun_list) {
939 DPRINT_ERR(STORVSC_DRV, "unable to allocate lun list");
940 return;
943 sdevs_remove_list = kcalloc(STORVSC_MAX_LUNS_PER_TARGET,
944 sizeof(void *), GFP_ATOMIC);
945 if (!sdevs_remove_list) {
946 kfree(lun_list);
947 DPRINT_ERR(STORVSC_DRV, "unable to allocate lun remove list");
948 return;
951 DPRINT_INFO(STORVSC_DRV, "rescanning host for new scsi devices...");
953 /* Rescan for new device */
954 scsi_scan_target(&host->shost_gendev, host_device_ctx->path,
955 host_device_ctx->target, SCAN_WILD_CARD, 1);
957 DPRINT_INFO(STORVSC_DRV, "rescanning host for removed scsi device...");
959 /* Use the 1st device to send the report luns cmd */
960 shost_for_each_device(sdev, host) {
961 lun_count = STORVSC_MAX_LUNS_PER_TARGET;
962 storvsc_report_luns(sdev, lun_list, &lun_count);
964 DPRINT_INFO(STORVSC_DRV,
965 "report luns on scsi device (%p) found %u luns ",
966 sdev, lun_count);
967 DPRINT_INFO(STORVSC_DRV,
968 "existing luns on scsi device (%p) host (%d)",
969 sdev, host->host_no);
971 scsi_device_put(sdev);
972 break;
975 for (i = 0; i < lun_count; i++)
976 DPRINT_INFO(STORVSC_DRV, "%d) lun %u", i, lun_list[i]);
978 /* Rescan for devices that may have been removed.
979 * We do not have to worry that new devices may have been added since
980 * this callback is serialized by the workqueue ie add/remove are done
981 * here.
983 shost_for_each_device(sdev, host) {
984 /* See if this device is still here */
985 found = 0;
986 for (i = 0; i < lun_count; i++) {
987 if (sdev->lun == lun_list[i]) {
988 found = 1;
989 break;
992 if (!found) {
993 DPRINT_INFO(STORVSC_DRV, "lun (%u) does not exists",
994 sdev->lun);
995 sdevs_remove_list[sdevs_count++] = sdev;
999 /* Now remove the devices */
1000 for (i = 0; i < sdevs_count; i++) {
1001 DPRINT_INFO(STORVSC_DRV,
1002 "removing scsi device (%p) lun (%u)...",
1003 sdevs_remove_list[i], sdevs_remove_list[i]->lun);
1005 /* make sure it is not removed from underneath us */
1006 if (!scsi_device_get(sdevs_remove_list[i])) {
1007 scsi_remove_device(sdevs_remove_list[i]);
1008 scsi_device_put(sdevs_remove_list[i]);
1012 DPRINT_INFO(STORVSC_DRV, "rescan completed on dev obj (%p) "
1013 "target (%u) bus (%u)", device_obj,
1014 host_device_ctx->target, host_device_ctx->path);
1016 kfree(lun_list);
1017 kfree(sdevs_remove_list);
1019 DPRINT_EXIT(STORVSC_DRV);
1022 static int storvsc_report_luns(struct scsi_device *sdev, unsigned int luns[],
1023 unsigned int *lun_count)
1025 int i, j;
1026 unsigned int lun = 0;
1027 unsigned int num_luns;
1028 int result;
1029 unsigned char *data;
1030 struct scsi_sense_hdr sshdr;
1031 unsigned char cmd[16] = {0};
1032 /* Add 1 to cover the report_lun header */
1033 unsigned int report_len = 8 * (STORVSC_MAX_LUNS_PER_TARGET+1);
1034 unsigned long long *report_luns;
1035 const unsigned int in_lun_count = *lun_count;
1037 *lun_count = 0;
1039 report_luns = kzalloc(report_len, GFP_ATOMIC);
1040 if (!report_luns)
1041 return -ENOMEM;
1043 cmd[0] = REPORT_LUNS;
1045 /* cmd length */
1046 *(unsigned int *)&cmd[6] = cpu_to_be32(report_len);
1048 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE,
1049 (unsigned char *)report_luns, report_len,
1050 &sshdr, 30 * HZ, 3, NULL);
1051 if (result != 0) {
1052 kfree(report_luns);
1053 return -EBUSY;
1056 /* get the length from the first four bytes */
1057 report_len = be32_to_cpu(*(unsigned int *)&report_luns[0]);
1059 num_luns = (report_len / sizeof(unsigned long long));
1060 if (num_luns > in_lun_count) {
1061 kfree(report_luns);
1062 return -EINVAL;
1065 *lun_count = num_luns;
1067 DPRINT_DBG(STORVSC_DRV,
1068 "report luns on scsi device (%p) found %u luns ",
1069 sdev, num_luns);
1071 /* lun id starts at 1 */
1072 for (i = 1; i < num_luns + 1; i++) {
1073 lun = 0;
1074 data = (unsigned char *)&report_luns[i];
1075 for (j = 0; j < sizeof(lun); j += 2) {
1076 lun = lun | (((data[j] << 8) | data[j + 1]) <<
1077 (j * 8));
1080 luns[i-1] = lun;
1083 kfree(report_luns);
1084 return 0;
1087 static void storvsc_host_rescan(struct hv_device *device_obj)
1089 struct device_context *device_ctx = to_device_context(device_obj);
1090 struct Scsi_Host *host = dev_get_drvdata(&device_ctx->device);
1091 struct host_device_context *host_device_ctx;
1093 DPRINT_ENTER(STORVSC_DRV);
1095 host_device_ctx = (struct host_device_context *)host->hostdata;
1097 DPRINT_INFO(STORVSC_DRV, "initiating rescan on dev obj (%p) "
1098 "target (%u) bus (%u)...", device_obj,
1099 host_device_ctx->target, host_device_ctx->path);
1102 * We need to queue this since the scanning may block and the caller
1103 * may be in an intr context
1105 /* scsi_queue_work(host, &host_device_ctx->host_rescan_work); */
1106 schedule_work(&host_device_ctx->host_rescan_work);
1107 DPRINT_EXIT(STORVSC_DRV);
1110 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1111 sector_t capacity, int *info)
1113 sector_t total_sectors = capacity;
1114 sector_t cylinder_times_heads = 0;
1115 sector_t temp = 0;
1117 int sectors_per_track = 0;
1118 int heads = 0;
1119 int cylinders = 0;
1120 int rem = 0;
1122 if (total_sectors > (65535 * 16 * 255))
1123 total_sectors = (65535 * 16 * 255);
1125 if (total_sectors >= (65535 * 16 * 63)) {
1126 sectors_per_track = 255;
1127 heads = 16;
1129 cylinder_times_heads = total_sectors;
1130 /* sector_div stores the quotient in cylinder_times_heads */
1131 rem = sector_div(cylinder_times_heads, sectors_per_track);
1132 } else {
1133 sectors_per_track = 17;
1135 cylinder_times_heads = total_sectors;
1136 /* sector_div stores the quotient in cylinder_times_heads */
1137 rem = sector_div(cylinder_times_heads, sectors_per_track);
1139 temp = cylinder_times_heads + 1023;
1140 /* sector_div stores the quotient in temp */
1141 rem = sector_div(temp, 1024);
1143 heads = temp;
1145 if (heads < 4)
1146 heads = 4;
1148 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1149 sectors_per_track = 31;
1150 heads = 16;
1152 cylinder_times_heads = total_sectors;
1154 * sector_div stores the quotient in
1155 * cylinder_times_heads
1157 rem = sector_div(cylinder_times_heads,
1158 sectors_per_track);
1161 if (cylinder_times_heads >= (heads * 1024)) {
1162 sectors_per_track = 63;
1163 heads = 16;
1165 cylinder_times_heads = total_sectors;
1167 * sector_div stores the quotient in
1168 * cylinder_times_heads
1170 rem = sector_div(cylinder_times_heads,
1171 sectors_per_track);
1175 temp = cylinder_times_heads;
1176 /* sector_div stores the quotient in temp */
1177 rem = sector_div(temp, heads);
1178 cylinders = temp;
1180 info[0] = heads;
1181 info[1] = sectors_per_track;
1182 info[2] = cylinders;
1184 DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads,
1185 sectors_per_track);
1187 return 0;
1190 static int __init storvsc_init(void)
1192 int ret;
1194 DPRINT_ENTER(STORVSC_DRV);
1195 DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
1196 ret = storvsc_drv_init(StorVscInitialize);
1197 DPRINT_EXIT(STORVSC_DRV);
1198 return ret;
1201 static void __exit storvsc_exit(void)
1203 DPRINT_ENTER(STORVSC_DRV);
1204 storvsc_drv_exit();
1205 DPRINT_ENTER(STORVSC_DRV);
1208 MODULE_LICENSE("GPL");
1209 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
1210 module_init(storvsc_init);
1211 module_exit(storvsc_exit);