2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/completion.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
29 #include <linux/delay.h>
32 #include "hyperv_storage.h"
35 static inline struct storvsc_device
*alloc_stor_device(struct hv_device
*device
)
37 struct storvsc_device
*stor_device
;
39 stor_device
= kzalloc(sizeof(struct storvsc_device
), GFP_KERNEL
);
43 stor_device
->destroy
= false;
44 init_waitqueue_head(&stor_device
->waiting_to_drain
);
45 stor_device
->device
= device
;
46 device
->ext
= stor_device
;
52 static inline struct storvsc_device
*get_in_stor_device(
53 struct hv_device
*device
)
55 struct storvsc_device
*stor_device
;
58 spin_lock_irqsave(&device
->channel
->inbound_lock
, flags
);
59 stor_device
= (struct storvsc_device
*)device
->ext
;
65 * If the device is being destroyed; allow incoming
66 * traffic only to cleanup outstanding requests.
69 if (stor_device
->destroy
&&
70 (atomic_read(&stor_device
->num_outstanding_req
) == 0))
74 spin_unlock_irqrestore(&device
->channel
->inbound_lock
, flags
);
79 static int storvsc_channel_init(struct hv_device
*device
)
81 struct storvsc_device
*stor_device
;
82 struct hv_storvsc_request
*request
;
83 struct vstor_packet
*vstor_packet
;
86 stor_device
= get_out_stor_device(device
);
90 request
= &stor_device
->init_request
;
91 vstor_packet
= &request
->vstor_packet
;
94 * Now, initiate the vsc/vsp initialization protocol on the open
97 memset(request
, 0, sizeof(struct hv_storvsc_request
));
98 init_completion(&request
->wait_event
);
99 vstor_packet
->operation
= VSTOR_OPERATION_BEGIN_INITIALIZATION
;
100 vstor_packet
->flags
= REQUEST_COMPLETION_FLAG
;
102 ret
= vmbus_sendpacket(device
->channel
, vstor_packet
,
103 sizeof(struct vstor_packet
),
104 (unsigned long)request
,
106 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
110 t
= wait_for_completion_timeout(&request
->wait_event
, 5*HZ
);
116 if (vstor_packet
->operation
!= VSTOR_OPERATION_COMPLETE_IO
||
117 vstor_packet
->status
!= 0)
121 /* reuse the packet for version range supported */
122 memset(vstor_packet
, 0, sizeof(struct vstor_packet
));
123 vstor_packet
->operation
= VSTOR_OPERATION_QUERY_PROTOCOL_VERSION
;
124 vstor_packet
->flags
= REQUEST_COMPLETION_FLAG
;
126 vstor_packet
->version
.major_minor
= VMSTOR_PROTOCOL_VERSION_CURRENT
;
127 FILL_VMSTOR_REVISION(vstor_packet
->version
.revision
);
129 ret
= vmbus_sendpacket(device
->channel
, vstor_packet
,
130 sizeof(struct vstor_packet
),
131 (unsigned long)request
,
133 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
137 t
= wait_for_completion_timeout(&request
->wait_event
, 5*HZ
);
143 if (vstor_packet
->operation
!= VSTOR_OPERATION_COMPLETE_IO
||
144 vstor_packet
->status
!= 0)
148 memset(vstor_packet
, 0, sizeof(struct vstor_packet
));
149 vstor_packet
->operation
= VSTOR_OPERATION_QUERY_PROPERTIES
;
150 vstor_packet
->flags
= REQUEST_COMPLETION_FLAG
;
151 vstor_packet
->storage_channel_properties
.port_number
=
152 stor_device
->port_number
;
154 ret
= vmbus_sendpacket(device
->channel
, vstor_packet
,
155 sizeof(struct vstor_packet
),
156 (unsigned long)request
,
158 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
163 t
= wait_for_completion_timeout(&request
->wait_event
, 5*HZ
);
169 if (vstor_packet
->operation
!= VSTOR_OPERATION_COMPLETE_IO
||
170 vstor_packet
->status
!= 0)
173 stor_device
->path_id
= vstor_packet
->storage_channel_properties
.path_id
;
174 stor_device
->target_id
175 = vstor_packet
->storage_channel_properties
.target_id
;
177 memset(vstor_packet
, 0, sizeof(struct vstor_packet
));
178 vstor_packet
->operation
= VSTOR_OPERATION_END_INITIALIZATION
;
179 vstor_packet
->flags
= REQUEST_COMPLETION_FLAG
;
181 ret
= vmbus_sendpacket(device
->channel
, vstor_packet
,
182 sizeof(struct vstor_packet
),
183 (unsigned long)request
,
185 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
190 t
= wait_for_completion_timeout(&request
->wait_event
, 5*HZ
);
196 if (vstor_packet
->operation
!= VSTOR_OPERATION_COMPLETE_IO
||
197 vstor_packet
->status
!= 0)
205 static void storvsc_on_io_completion(struct hv_device
*device
,
206 struct vstor_packet
*vstor_packet
,
207 struct hv_storvsc_request
*request
)
209 struct storvsc_device
*stor_device
;
210 struct vstor_packet
*stor_pkt
;
212 stor_device
= (struct storvsc_device
*)device
->ext
;
214 stor_pkt
= &request
->vstor_packet
;
217 /* Copy over the status...etc */
218 stor_pkt
->vm_srb
.scsi_status
= vstor_packet
->vm_srb
.scsi_status
;
219 stor_pkt
->vm_srb
.srb_status
= vstor_packet
->vm_srb
.srb_status
;
220 stor_pkt
->vm_srb
.sense_info_length
=
221 vstor_packet
->vm_srb
.sense_info_length
;
223 if (vstor_packet
->vm_srb
.scsi_status
!= 0 ||
224 vstor_packet
->vm_srb
.srb_status
!= 1){
226 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
227 stor_pkt
->vm_srb
.cdb
[0],
228 vstor_packet
->vm_srb
.scsi_status
,
229 vstor_packet
->vm_srb
.srb_status
);
232 if ((vstor_packet
->vm_srb
.scsi_status
& 0xFF) == 0x02) {
233 /* CHECK_CONDITION */
234 if (vstor_packet
->vm_srb
.srb_status
& 0x80) {
235 /* autosense data available */
236 DPRINT_WARN(STORVSC
, "storvsc pkt %p autosense data "
237 "valid - len %d\n", request
,
238 vstor_packet
->vm_srb
.sense_info_length
);
240 memcpy(request
->sense_buffer
,
241 vstor_packet
->vm_srb
.sense_data
,
242 vstor_packet
->vm_srb
.sense_info_length
);
247 stor_pkt
->vm_srb
.data_transfer_length
=
248 vstor_packet
->vm_srb
.data_transfer_length
;
250 request
->on_io_completion(request
);
252 if (atomic_dec_and_test(&stor_device
->num_outstanding_req
) &&
253 stor_device
->drain_notify
)
254 wake_up(&stor_device
->waiting_to_drain
);
259 static void storvsc_on_receive(struct hv_device
*device
,
260 struct vstor_packet
*vstor_packet
,
261 struct hv_storvsc_request
*request
)
263 switch (vstor_packet
->operation
) {
264 case VSTOR_OPERATION_COMPLETE_IO
:
265 storvsc_on_io_completion(device
, vstor_packet
, request
);
267 case VSTOR_OPERATION_REMOVE_DEVICE
:
274 static void storvsc_on_channel_callback(void *context
)
276 struct hv_device
*device
= (struct hv_device
*)context
;
277 struct storvsc_device
*stor_device
;
280 unsigned char packet
[ALIGN(sizeof(struct vstor_packet
), 8)];
281 struct hv_storvsc_request
*request
;
285 stor_device
= get_in_stor_device(device
);
290 ret
= vmbus_recvpacket(device
->channel
, packet
,
291 ALIGN(sizeof(struct vstor_packet
), 8),
292 &bytes_recvd
, &request_id
);
293 if (ret
== 0 && bytes_recvd
> 0) {
295 request
= (struct hv_storvsc_request
*)
296 (unsigned long)request_id
;
298 if ((request
== &stor_device
->init_request
) ||
299 (request
== &stor_device
->reset_request
)) {
301 memcpy(&request
->vstor_packet
, packet
,
302 sizeof(struct vstor_packet
));
303 complete(&request
->wait_event
);
305 storvsc_on_receive(device
,
306 (struct vstor_packet
*)packet
,
317 static int storvsc_connect_to_vsp(struct hv_device
*device
, u32 ring_size
)
319 struct vmstorage_channel_properties props
;
322 memset(&props
, 0, sizeof(struct vmstorage_channel_properties
));
324 /* Open the channel */
325 ret
= vmbus_open(device
->channel
,
329 sizeof(struct vmstorage_channel_properties
),
330 storvsc_on_channel_callback
, device
);
335 ret
= storvsc_channel_init(device
);
340 int storvsc_dev_add(struct hv_device
*device
,
341 void *additional_info
)
343 struct storvsc_device
*stor_device
;
344 struct storvsc_device_info
*device_info
;
347 device_info
= (struct storvsc_device_info
*)additional_info
;
348 stor_device
= alloc_stor_device(device
);
352 /* Save the channel properties to our storvsc channel */
355 * If we support more than 1 scsi channel, we need to set the
356 * port number here to the scsi channel but how do we get the
357 * scsi channel prior to the bus scan.
359 * The host does not support this.
362 stor_device
->port_number
= device_info
->port_number
;
363 /* Send it back up */
364 ret
= storvsc_connect_to_vsp(device
, device_info
->ring_buffer_size
);
369 device_info
->path_id
= stor_device
->path_id
;
370 device_info
->target_id
= stor_device
->target_id
;
375 int storvsc_dev_remove(struct hv_device
*device
)
377 struct storvsc_device
*stor_device
;
380 stor_device
= (struct storvsc_device
*)device
->ext
;
382 spin_lock_irqsave(&device
->channel
->inbound_lock
, flags
);
383 stor_device
->destroy
= true;
384 spin_unlock_irqrestore(&device
->channel
->inbound_lock
, flags
);
387 * At this point, all outbound traffic should be disable. We
388 * only allow inbound traffic (responses) to proceed so that
389 * outstanding requests can be completed.
392 storvsc_wait_to_drain(stor_device
);
395 * Since we have already drained, we don't need to busy wait
396 * as was done in final_release_stor_device()
397 * Note that we cannot set the ext pointer to NULL until
398 * we have drained - to drain the outgoing packets, we need to
399 * allow incoming packets.
401 spin_lock_irqsave(&device
->channel
->inbound_lock
, flags
);
403 spin_unlock_irqrestore(&device
->channel
->inbound_lock
, flags
);
405 /* Close the channel */
406 vmbus_close(device
->channel
);
412 int storvsc_do_io(struct hv_device
*device
,
413 struct hv_storvsc_request
*request
)
415 struct storvsc_device
*stor_device
;
416 struct vstor_packet
*vstor_packet
;
419 vstor_packet
= &request
->vstor_packet
;
420 stor_device
= get_out_stor_device(device
);
426 request
->device
= device
;
429 vstor_packet
->flags
|= REQUEST_COMPLETION_FLAG
;
431 vstor_packet
->vm_srb
.length
= sizeof(struct vmscsi_request
);
434 vstor_packet
->vm_srb
.sense_info_length
= SENSE_BUFFER_SIZE
;
437 vstor_packet
->vm_srb
.data_transfer_length
=
438 request
->data_buffer
.len
;
440 vstor_packet
->operation
= VSTOR_OPERATION_EXECUTE_SRB
;
442 if (request
->data_buffer
.len
) {
443 ret
= vmbus_sendpacket_multipagebuffer(device
->channel
,
444 &request
->data_buffer
,
446 sizeof(struct vstor_packet
),
447 (unsigned long)request
);
449 ret
= vmbus_sendpacket(device
->channel
, vstor_packet
,
450 sizeof(struct vstor_packet
),
451 (unsigned long)request
,
453 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
459 atomic_inc(&stor_device
->num_outstanding_req
);
465 * The channel properties uniquely specify how the device is to be
466 * presented to the guest. Map this information for use by the block
467 * driver. For Linux guests on Hyper-V, we emulate a scsi HBA in the guest
468 * (storvsc_drv) and so scsi devices in the guest are handled by
469 * native upper level Linux drivers. Consequently, Hyper-V
470 * block driver, while being a generic block driver, presently does not
471 * deal with anything other than devices that would need to be presented
472 * to the guest as an IDE disk.
474 * This function maps the channel properties as embedded in the input
475 * parameter device_info onto information necessary to register the
476 * corresponding block device.
478 * Currently, there is no way to stop the emulation of the block device
479 * on the host side. And so, to prevent the native IDE drivers in Linux
480 * from taking over these devices (to be managedby Hyper-V block
481 * driver), we will take over if need be the major of the IDE controllers.
485 int storvsc_get_major_info(struct storvsc_device_info
*device_info
,
486 struct storvsc_major_info
*major_info
)
488 static bool ide0_registered
;
489 static bool ide1_registered
;
492 * For now we only support IDE disks.
494 major_info
->devname
= "ide";
495 major_info
->diskname
= "hd";
497 if (device_info
->path_id
) {
498 major_info
->major
= 22;
499 if (!ide1_registered
) {
500 major_info
->do_register
= true;
501 ide1_registered
= true;
503 major_info
->do_register
= false;
505 if (device_info
->target_id
)
506 major_info
->index
= 3;
508 major_info
->index
= 2;
512 major_info
->major
= 3;
513 if (!ide0_registered
) {
514 major_info
->do_register
= true;
515 ide0_registered
= true;
517 major_info
->do_register
= false;
519 if (device_info
->target_id
)
520 major_info
->index
= 1;
522 major_info
->index
= 0;