3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
20 /* if you want to turn on some debugging of write device data or read
21 * device data, define these two undefs. You will probably want to
22 * customize the code which is here since it was written assuming
23 * reading and writing a specific data file df.64M.txt which is a
24 * 64Megabyte file created by Art Nilson using a scritp I wrote called
25 * cr_test_data.pl. The data file consists of 256 byte lines of text
26 * which start with an 8 digit sequence number, a colon, and then
27 * letters after that */
31 #include <linux/kernel.h>
32 #ifdef CONFIG_MODVERSIONS
33 #include <config/modversions.h>
37 #include "diagnostics/appos_subsystems.h"
40 #include "uisthread.h"
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/pci.h>
45 #include <linux/spinlock.h>
46 #include <linux/device.h>
47 #include <linux/slab.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_host.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <asm/param.h>
53 #include <linux/debugfs.h>
54 #include <linux/types.h>
58 #include "visorchipset.h"
60 #include "guestlinuxdebug.h"
61 /* this is shorter than using __FILE__ (full path name) in
62 * debug/info/error messages
64 #define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c
65 #define __MYFILE__ "virthba.c"
67 /* NOTE: L1_CACHE_BYTES >=128 */
68 #define DEVICE_ATTRIBUTE struct device_attribute
70 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
71 * = 4800 bytes ~ 2^13 = 8192 bytes
75 /*****************************************************/
76 /* Forward declarations */
77 /*****************************************************/
78 static int virthba_probe(struct virtpci_dev
*dev
,
79 const struct pci_device_id
*id
);
80 static void virthba_remove(struct virtpci_dev
*dev
);
81 static int virthba_abort_handler(struct scsi_cmnd
*scsicmd
);
82 static int virthba_bus_reset_handler(struct scsi_cmnd
*scsicmd
);
83 static int virthba_device_reset_handler(struct scsi_cmnd
*scsicmd
);
84 static int virthba_host_reset_handler(struct scsi_cmnd
*scsicmd
);
85 static const char *virthba_get_info(struct Scsi_Host
*shp
);
86 static int virthba_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
);
87 static int virthba_queue_command_lck(struct scsi_cmnd
*scsicmd
,
88 void (*virthba_cmnd_done
)(struct scsi_cmnd
*));
90 static const struct x86_cpu_id unisys_spar_ids
[] = {
91 { X86_VENDOR_INTEL
, 6, 62, X86_FEATURE_ANY
},
96 MODULE_DEVICE_TABLE(x86cpu
, unisys_spar_ids
);
99 static DEF_SCSI_QCMD(virthba_queue_command
)
101 #define virthba_queue_command virthba_queue_command_lck
105 static int virthba_slave_alloc(struct scsi_device
*scsidev
);
106 static int virthba_slave_configure(struct scsi_device
*scsidev
);
107 static void virthba_slave_destroy(struct scsi_device
*scsidev
);
108 static int process_incoming_rsps(void *);
109 static int virthba_serverup(struct virtpci_dev
*virtpcidev
);
110 static int virthba_serverdown(struct virtpci_dev
*virtpcidev
, u32 state
);
111 static void doDiskAddRemove(struct work_struct
*work
);
112 static void virthba_serverdown_complete(struct work_struct
*work
);
113 static ssize_t
info_debugfs_read(struct file
*file
, char __user
*buf
,
114 size_t len
, loff_t
*offset
);
115 static ssize_t
enable_ints_write(struct file
*file
,
116 const char __user
*buffer
, size_t count
, loff_t
*ppos
);
118 /*****************************************************/
120 /*****************************************************/
122 static int rsltq_wait_usecs
= 4000; /* Default 4ms */
123 static unsigned int MaxBuffLen
;
126 static char *virthba_options
= "NONE";
128 static const struct pci_device_id virthba_id_table
[] = {
129 {PCI_DEVICE(PCI_VENDOR_ID_UNISYS
, PCI_DEVICE_ID_VIRTHBA
)},
133 /* export virthba_id_table */
134 MODULE_DEVICE_TABLE(pci
, virthba_id_table
);
136 static struct workqueue_struct
*virthba_serverdown_workqueue
;
138 static struct virtpci_driver virthba_driver
= {
139 .name
= "uisvirthba",
142 .id_table
= virthba_id_table
,
143 .probe
= virthba_probe
,
144 .remove
= virthba_remove
,
145 .resume
= virthba_serverup
,
146 .suspend
= virthba_serverdown
149 /* The Send and Recive Buffers of the IO Queue may both be full */
150 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2)
151 #define INTERRUPT_VECTOR_MASK 0x3F
154 char cmdtype
; /* Type of pointer that is being stored */
155 void *sent
; /* The Data being tracked */
156 /* struct scsi_cmnd *type for virthba_queue_command */
157 /* struct uiscmdrsp *type for management commands */
160 #define VIRTHBA_ERROR_COUNT 30
161 #define IOS_ERROR_THRESHOLD 1000
162 struct virtdisk_info
{
164 u32 channel
, id
, lun
; /* Disk Path */
165 atomic_t ios_threshold
;
166 atomic_t error_count
;
167 struct virtdisk_info
*next
;
169 /* Each Scsi_Host has a host_data area that contains this struct. */
170 struct virthba_info
{
171 struct Scsi_Host
*scsihost
;
172 struct virtpci_dev
*virtpcidev
;
173 struct list_head dev_info_list
;
174 struct chaninfo chinfo
;
175 struct irq_info intr
; /* use recvInterrupt info to receive
176 interrupts when IOs complete */
177 int interrupt_vector
;
178 struct scsipending pending
[MAX_PENDING_REQUESTS
]; /* Tracks the requests
180 /* forwarded to the IOVM and haven't returned yet */
181 unsigned int nextinsert
; /* Start search for next pending
185 bool serverchangingstate
;
186 unsigned long long acquire_failed_cnt
;
187 unsigned long long interrupts_rcvd
;
188 unsigned long long interrupts_notme
;
189 unsigned long long interrupts_disabled
;
190 struct work_struct serverdown_completion
;
191 u64 __iomem
*flags_addr
;
192 atomic_t interrupt_rcvd
;
193 wait_queue_head_t rsp_queue
;
194 struct virtdisk_info head
;
197 /* Work Data for DARWorkQ */
198 struct diskaddremove
{
199 u8 add
; /* 0-remove, 1-add */
200 struct Scsi_Host
*shost
; /* Scsi Host for this virthba instance */
201 u32 channel
, id
, lun
; /* Disk Path */
202 struct diskaddremove
*next
;
205 #define virtpci_dev_to_virthba_virthba_get_info(d) \
206 container_of(d, struct virthba_info, virtpcidev)
208 static DEVICE_ATTRIBUTE
*virthba_shost_attrs
[];
209 static struct scsi_host_template virthba_driver_template
= {
210 .name
= "Unisys Virtual HBA",
211 .info
= virthba_get_info
,
212 .ioctl
= virthba_ioctl
,
213 .queuecommand
= virthba_queue_command
,
214 .eh_abort_handler
= virthba_abort_handler
,
215 .eh_device_reset_handler
= virthba_device_reset_handler
,
216 .eh_bus_reset_handler
= virthba_bus_reset_handler
,
217 .eh_host_reset_handler
= virthba_host_reset_handler
,
218 .shost_attrs
= virthba_shost_attrs
,
220 #define VIRTHBA_MAX_CMNDS 128
221 .can_queue
= VIRTHBA_MAX_CMNDS
,
222 .sg_tablesize
= 64, /* largest number of address/length pairs */
224 .slave_alloc
= virthba_slave_alloc
,
225 .slave_configure
= virthba_slave_configure
,
226 .slave_destroy
= virthba_slave_destroy
,
227 .use_clustering
= ENABLE_CLUSTERING
,
230 struct virthba_devices_open
{
231 struct virthba_info
*virthbainfo
;
234 static const struct file_operations debugfs_info_fops
= {
235 .read
= info_debugfs_read
,
238 static const struct file_operations debugfs_enable_ints_fops
= {
239 .write
= enable_ints_write
,
242 /*****************************************************/
244 /*****************************************************/
246 #define VIRTHBASOPENMAX 1
247 /* array of open devices maintained by open() and close(); */
248 static struct virthba_devices_open VirtHbasOpen
[VIRTHBASOPENMAX
];
249 static struct dentry
*virthba_debugfs_dir
;
251 /*****************************************************/
252 /* Local Functions */
253 /*****************************************************/
255 add_scsipending_entry(struct virthba_info
*vhbainfo
, char cmdtype
, void *new)
260 spin_lock_irqsave(&vhbainfo
->privlock
, flags
);
261 insert_location
= vhbainfo
->nextinsert
;
262 while (vhbainfo
->pending
[insert_location
].sent
!= NULL
) {
263 insert_location
= (insert_location
+ 1) % MAX_PENDING_REQUESTS
;
264 if (insert_location
== (int) vhbainfo
->nextinsert
) {
265 LOGERR("Queue should be full. insert_location<<%d>> Unable to find open slot for pending commands.\n",
267 spin_unlock_irqrestore(&vhbainfo
->privlock
, flags
);
272 vhbainfo
->pending
[insert_location
].cmdtype
= cmdtype
;
273 vhbainfo
->pending
[insert_location
].sent
= new;
274 vhbainfo
->nextinsert
= (insert_location
+ 1) % MAX_PENDING_REQUESTS
;
275 spin_unlock_irqrestore(&vhbainfo
->privlock
, flags
);
277 return insert_location
;
281 add_scsipending_entry_with_wait(struct virthba_info
*vhbainfo
, char cmdtype
,
284 int insert_location
= add_scsipending_entry(vhbainfo
, cmdtype
, new);
286 while (insert_location
== -1) {
287 LOGERR("Failed to find empty queue slot. Waiting to try again\n");
288 set_current_state(TASK_INTERRUPTIBLE
);
289 schedule_timeout(msecs_to_jiffies(10));
290 insert_location
= add_scsipending_entry(vhbainfo
, cmdtype
, new);
293 return (unsigned int) insert_location
;
297 del_scsipending_entry(struct virthba_info
*vhbainfo
, uintptr_t del
)
302 if (del
>= MAX_PENDING_REQUESTS
) {
303 LOGERR("Invalid queue position <<%lu>> given to delete. MAX_PENDING_REQUESTS <<%d>>\n",
304 (unsigned long) del
, MAX_PENDING_REQUESTS
);
306 spin_lock_irqsave(&vhbainfo
->privlock
, flags
);
308 if (vhbainfo
->pending
[del
].sent
== NULL
)
309 LOGERR("Deleting already cleared queue entry at <<%lu>>.\n",
310 (unsigned long) del
);
312 sent
= vhbainfo
->pending
[del
].sent
;
314 vhbainfo
->pending
[del
].cmdtype
= 0;
315 vhbainfo
->pending
[del
].sent
= NULL
;
316 spin_unlock_irqrestore(&vhbainfo
->privlock
, flags
);
322 /* DARWorkQ (Disk Add/Remove) */
323 static struct work_struct DARWorkQ
;
324 static struct diskaddremove
*DARWorkQHead
;
325 static spinlock_t DARWorkQLock
;
326 static unsigned short DARWorkQSched
;
327 #define QUEUE_DISKADDREMOVE(dar) { \
328 spin_lock_irqsave(&DARWorkQLock, flags); \
329 if (!DARWorkQHead) { \
330 DARWorkQHead = dar; \
334 dar->next = DARWorkQHead; \
335 DARWorkQHead = dar; \
337 if (!DARWorkQSched) { \
338 schedule_work(&DARWorkQ); \
341 spin_unlock_irqrestore(&DARWorkQLock, flags); \
345 SendDiskAddRemove(struct diskaddremove
*dar
)
347 struct scsi_device
*sdev
;
350 sdev
= scsi_device_lookup(dar
->shost
, dar
->channel
, dar
->id
, dar
->lun
);
353 scsi_remove_device(sdev
);
354 } else if (dar
->add
) {
356 scsi_add_device(dar
->shost
, dar
->channel
, dar
->id
,
359 LOGERR("Failed scsi_add_device: host_no=%d[chan=%d:id=%d:lun=%d]\n",
360 dar
->shost
->host_no
, dar
->channel
, dar
->id
,
363 LOGERR("Failed scsi_device_lookup:[chan=%d:id=%d:lun=%d]\n",
364 dar
->channel
, dar
->id
, dar
->lun
);
368 /*****************************************************/
369 /* DARWorkQ Handler Thread */
370 /*****************************************************/
372 doDiskAddRemove(struct work_struct
*work
)
374 struct diskaddremove
*dar
;
375 struct diskaddremove
*tmphead
;
379 spin_lock_irqsave(&DARWorkQLock
, flags
);
380 tmphead
= DARWorkQHead
;
383 spin_unlock_irqrestore(&DARWorkQLock
, flags
);
387 SendDiskAddRemove(dar
);
392 /*****************************************************/
393 /* Routine to add entry to DARWorkQ */
394 /*****************************************************/
396 process_disk_notify(struct Scsi_Host
*shost
, struct uiscmdrsp
*cmdrsp
)
398 struct diskaddremove
*dar
;
401 dar
= kzalloc(sizeof(struct diskaddremove
), GFP_ATOMIC
);
403 dar
->add
= cmdrsp
->disknotify
.add
;
405 dar
->channel
= cmdrsp
->disknotify
.channel
;
406 dar
->id
= cmdrsp
->disknotify
.id
;
407 dar
->lun
= cmdrsp
->disknotify
.lun
;
408 QUEUE_DISKADDREMOVE(dar
);
410 LOGERR("kmalloc failed for dar. host_no=%d[chan=%d:id=%d:lun=%d]\n",
411 shost
->host_no
, cmdrsp
->disknotify
.channel
,
412 cmdrsp
->disknotify
.id
, cmdrsp
->disknotify
.lun
);
416 /*****************************************************/
417 /* Probe Remove Functions */
418 /*****************************************************/
420 virthba_ISR(int irq
, void *dev_id
)
422 struct virthba_info
*virthbainfo
= (struct virthba_info
*) dev_id
;
423 struct channel_header __iomem
*pChannelHeader
;
424 struct signal_queue_header __iomem
*pqhdr
;
426 unsigned long long rc1
;
428 if (virthbainfo
== NULL
)
430 virthbainfo
->interrupts_rcvd
++;
431 pChannelHeader
= virthbainfo
->chinfo
.queueinfo
->chan
;
432 if (((readq(&pChannelHeader
->features
)
433 & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS
) != 0)
434 && ((readq(&pChannelHeader
->features
) &
435 ULTRA_IO_DRIVER_DISABLES_INTS
) !=
437 virthbainfo
->interrupts_disabled
++;
438 mask
= ~ULTRA_CHANNEL_ENABLE_INTS
;
439 rc1
= uisqueue_interlocked_and(virthbainfo
->flags_addr
, mask
);
441 if (spar_signalqueue_empty(pChannelHeader
, IOCHAN_FROM_IOPART
)) {
442 virthbainfo
->interrupts_notme
++;
445 pqhdr
= (struct signal_queue_header __iomem
*)
446 ((char __iomem
*) pChannelHeader
+
447 readq(&pChannelHeader
->ch_space_offset
)) + IOCHAN_FROM_IOPART
;
448 writeq(readq(&pqhdr
->num_irq_received
) + 1,
449 &pqhdr
->num_irq_received
);
450 atomic_set(&virthbainfo
->interrupt_rcvd
, 1);
451 wake_up_interruptible(&virthbainfo
->rsp_queue
);
456 virthba_probe(struct virtpci_dev
*virtpcidev
, const struct pci_device_id
*id
)
459 struct Scsi_Host
*scsihost
;
460 struct virthba_info
*virthbainfo
;
463 irq_handler_t handler
= virthba_ISR
;
464 struct channel_header __iomem
*pChannelHeader
;
465 struct signal_queue_header __iomem
*pqhdr
;
468 LOGVER("entering virthba_probe...\n");
469 LOGVER("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev
->bus_no
,
470 virtpcidev
->device_no
);
472 LOGINF("entering virthba_probe...\n");
473 LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev
->bus_no
,
474 virtpcidev
->device_no
);
475 POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
476 /* call scsi_host_alloc to register a scsi host adapter
477 * instance - this virthba that has just been created is an
478 * instance of a scsi host adapter. This scsi_host_alloc
479 * function allocates a new Scsi_Host struct & performs basic
480 * initializatoin. The host is not published to the scsi
481 * midlayer until scsi_add_host is called.
483 DBGINF("calling scsi_host_alloc.\n");
485 /* arg 2 passed in length of extra space we want allocated
486 * with scsi_host struct for our own use scsi_host_alloc
489 scsihost
= scsi_host_alloc(&virthba_driver_template
,
490 sizeof(struct virthba_info
));
491 if (scsihost
== NULL
)
494 DBGINF("scsihost: 0x%p, scsihost->this_id: %d, host_no: %d.\n",
495 scsihost
, scsihost
->this_id
, scsihost
->host_no
);
497 scsihost
->this_id
= UIS_MAGIC_VHBA
;
498 /* linux treats max-channel differently than max-id & max-lun.
499 * In the latter cases, those two values result in 0 to max-1
500 * (inclusive) being scanned. But in the case of channels, the
501 * scan is 0 to max (inclusive); so we will subtract one from
502 * the max-channel value.
504 LOGINF("virtpcidev->scsi.max.max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_io_size=%u\n",
505 (unsigned) virtpcidev
->scsi
.max
.max_channel
- 1,
506 (unsigned) virtpcidev
->scsi
.max
.max_id
,
507 (unsigned) virtpcidev
->scsi
.max
.max_lun
,
508 (unsigned) virtpcidev
->scsi
.max
.cmd_per_lun
,
509 (unsigned) virtpcidev
->scsi
.max
.max_io_size
);
510 scsihost
->max_channel
= (unsigned) virtpcidev
->scsi
.max
.max_channel
;
511 scsihost
->max_id
= (unsigned) virtpcidev
->scsi
.max
.max_id
;
512 scsihost
->max_lun
= (unsigned) virtpcidev
->scsi
.max
.max_lun
;
513 scsihost
->cmd_per_lun
= (unsigned) virtpcidev
->scsi
.max
.cmd_per_lun
;
514 scsihost
->max_sectors
=
515 (unsigned short) (virtpcidev
->scsi
.max
.max_io_size
>> 9);
516 scsihost
->sg_tablesize
=
517 (unsigned short) (virtpcidev
->scsi
.max
.max_io_size
/ PAGE_SIZE
);
518 if (scsihost
->sg_tablesize
> MAX_PHYS_INFO
)
519 scsihost
->sg_tablesize
= MAX_PHYS_INFO
;
520 LOGINF("scsihost->max_channel=%u, max_id=%u, max_lun=%llu, cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
521 scsihost
->max_channel
, scsihost
->max_id
, scsihost
->max_lun
,
522 scsihost
->cmd_per_lun
, scsihost
->max_sectors
,
523 scsihost
->sg_tablesize
);
524 LOGINF("scsihost->can_queue=%u, scsihost->cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
525 scsihost
->can_queue
, scsihost
->cmd_per_lun
, scsihost
->max_sectors
,
526 scsihost
->sg_tablesize
);
528 DBGINF("calling scsi_add_host\n");
530 /* this creates "host%d" in sysfs. If 2nd argument is NULL,
531 * then this generic /sys/devices/platform/host? device is
532 * created and /sys/scsi_host/host? ->
533 * /sys/devices/platform/host? If 2nd argument is not NULL,
534 * then this generic /sys/devices/<path>/host? is created and
535 * host? points to that device instead.
537 error
= scsi_add_host(scsihost
, &virtpcidev
->generic_dev
);
539 LOGERR("scsi_add_host ****FAILED 0x%x TBD - RECOVER\n", error
);
540 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC
, POSTCODE_SEVERITY_ERR
);
541 /* decr refcount on scsihost which was incremented by
542 * scsi_add_host so the scsi_host gets deleted
544 scsi_host_put(scsihost
);
548 virthbainfo
= (struct virthba_info
*) scsihost
->hostdata
;
549 memset(virthbainfo
, 0, sizeof(struct virthba_info
));
550 for (i
= 0; i
< VIRTHBASOPENMAX
; i
++) {
551 if (VirtHbasOpen
[i
].virthbainfo
== NULL
) {
552 VirtHbasOpen
[i
].virthbainfo
= virthbainfo
;
556 virthbainfo
->interrupt_vector
= -1;
557 virthbainfo
->chinfo
.queueinfo
= &virtpcidev
->queueinfo
;
558 virthbainfo
->virtpcidev
= virtpcidev
;
559 spin_lock_init(&virthbainfo
->chinfo
.insertlock
);
561 DBGINF("generic_dev: 0x%p, queueinfo: 0x%p.\n",
562 &virtpcidev
->generic_dev
, &virtpcidev
->queueinfo
);
564 init_waitqueue_head(&virthbainfo
->rsp_queue
);
565 spin_lock_init(&virthbainfo
->privlock
);
566 memset(&virthbainfo
->pending
, 0, sizeof(virthbainfo
->pending
));
567 virthbainfo
->serverdown
= false;
568 virthbainfo
->serverchangingstate
= false;
570 virthbainfo
->intr
= virtpcidev
->intr
;
571 /* save of host within virthba_info */
572 virthbainfo
->scsihost
= scsihost
;
574 /* save of host within virtpci_dev */
575 virtpcidev
->scsi
.scsihost
= scsihost
;
577 /* Setup workqueue for serverdown messages */
578 INIT_WORK(&virthbainfo
->serverdown_completion
,
579 virthba_serverdown_complete
);
581 writeq(readq(&virthbainfo
->chinfo
.queueinfo
->chan
->features
) |
582 ULTRA_IO_CHANNEL_IS_POLLING
,
583 &virthbainfo
->chinfo
.queueinfo
->chan
->features
);
584 /* start thread that will receive scsicmnd responses */
585 DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
586 virthbainfo
->chinfo
.queueinfo
, &virthbainfo
->chinfo
.threadinfo
);
588 pChannelHeader
= virthbainfo
->chinfo
.queueinfo
->chan
;
589 pqhdr
= (struct signal_queue_header __iomem
*)
590 ((char __iomem
*)pChannelHeader
+
591 readq(&pChannelHeader
->ch_space_offset
)) + IOCHAN_FROM_IOPART
;
592 virthbainfo
->flags_addr
= &pqhdr
->features
;
594 if (!uisthread_start(&virthbainfo
->chinfo
.threadinfo
,
595 process_incoming_rsps
,
596 virthbainfo
, "vhba_incoming")) {
597 LOGERR("uisthread_start rsp ****FAILED\n");
598 /* decr refcount on scsihost which was incremented by
599 * scsi_add_host so the scsi_host gets deleted
601 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC
, POSTCODE_SEVERITY_ERR
);
602 scsi_host_put(scsihost
);
605 LOGINF("sendInterruptHandle=0x%16llX",
606 virthbainfo
->intr
.send_irq_handle
);
607 LOGINF("recvInterruptHandle=0x%16llX",
608 virthbainfo
->intr
.recv_irq_handle
);
609 LOGINF("recvInterruptVector=0x%8X",
610 virthbainfo
->intr
.recv_irq_vector
);
611 LOGINF("recvInterruptShared=0x%2X",
612 virthbainfo
->intr
.recv_irq_shared
);
613 LOGINF("scsihost.hostt->name=%s", scsihost
->hostt
->name
);
614 virthbainfo
->interrupt_vector
=
615 virthbainfo
->intr
.recv_irq_handle
& INTERRUPT_VECTOR_MASK
;
616 rsp
= request_irq(virthbainfo
->interrupt_vector
, handler
, IRQF_SHARED
,
617 scsihost
->hostt
->name
, virthbainfo
);
619 LOGERR("request_irq(%d) uislib_virthba_ISR request failed with rsp=%d\n",
620 virthbainfo
->interrupt_vector
, rsp
);
621 virthbainfo
->interrupt_vector
= -1;
622 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC
, POSTCODE_SEVERITY_ERR
);
624 u64 __iomem
*Features_addr
=
625 &virthbainfo
->chinfo
.queueinfo
->chan
->features
;
626 LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n",
627 virthbainfo
->interrupt_vector
);
628 mask
= ~(ULTRA_IO_CHANNEL_IS_POLLING
|
629 ULTRA_IO_DRIVER_DISABLES_INTS
);
630 uisqueue_interlocked_and(Features_addr
, mask
);
631 mask
= ULTRA_IO_DRIVER_ENABLES_INTS
;
632 uisqueue_interlocked_or(Features_addr
, mask
);
633 rsltq_wait_usecs
= 4000000;
636 DBGINF("calling scsi_scan_host.\n");
637 scsi_scan_host(scsihost
);
638 DBGINF("return from scsi_scan_host.\n");
640 LOGINF("virthba added scsihost:0x%p\n", scsihost
);
641 POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
646 virthba_remove(struct virtpci_dev
*virtpcidev
)
648 struct virthba_info
*virthbainfo
;
649 struct Scsi_Host
*scsihost
=
650 (struct Scsi_Host
*) virtpcidev
->scsi
.scsihost
;
652 LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev
->bus_no
,
653 virtpcidev
->device_no
);
654 virthbainfo
= (struct virthba_info
*) scsihost
->hostdata
;
655 if (virthbainfo
->interrupt_vector
!= -1)
656 free_irq(virthbainfo
->interrupt_vector
, virthbainfo
);
657 LOGINF("Removing virtpcidev: 0x%p, virthbainfo: 0x%p\n", virtpcidev
,
660 DBGINF("removing scsihost: 0x%p, scsihost->this_id: %d\n", scsihost
,
662 scsi_remove_host(scsihost
);
664 DBGINF("stopping thread.\n");
665 uisthread_stop(&virthbainfo
->chinfo
.threadinfo
);
667 DBGINF("calling scsi_host_put\n");
669 /* decr refcount on scsihost which was incremented by
670 * scsi_add_host so the scsi_host gets deleted
672 scsi_host_put(scsihost
);
673 LOGINF("virthba removed scsi_host.\n");
677 forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype
,
678 struct Scsi_Host
*scsihost
,
679 struct uisscsi_dest
*vdest
)
681 struct uiscmdrsp
*cmdrsp
;
682 struct virthba_info
*virthbainfo
=
683 (struct virthba_info
*) scsihost
->hostdata
;
684 int notifyresult
= 0xffff;
685 wait_queue_head_t notifyevent
;
687 LOGINF("vDiskMgmt:%d %d:%d:%d\n", vdiskcmdtype
,
688 vdest
->channel
, vdest
->id
, vdest
->lun
);
690 if (virthbainfo
->serverdown
|| virthbainfo
->serverchangingstate
) {
691 DBGINF("Server is down/changing state. Returning Failure.\n");
695 cmdrsp
= kzalloc(SIZEOF_CMDRSP
, GFP_ATOMIC
);
696 if (cmdrsp
== NULL
) {
697 LOGERR("kmalloc of cmdrsp failed.\n");
698 return FAILED
; /* reject */
701 init_waitqueue_head(¬ifyevent
);
703 /* issue VDISK_MGMT_CMD
704 * set type to command - as opposed to task mgmt
706 cmdrsp
->cmdtype
= CMD_VDISKMGMT_TYPE
;
707 /* specify the event that has to be triggered when this cmd is
710 cmdrsp
->vdiskmgmt
.notify
= (void *) ¬ifyevent
;
711 cmdrsp
->vdiskmgmt
.notifyresult
= (void *) ¬ifyresult
;
713 /* save destination */
714 cmdrsp
->vdiskmgmt
.vdisktype
= vdiskcmdtype
;
715 cmdrsp
->vdiskmgmt
.vdest
.channel
= vdest
->channel
;
716 cmdrsp
->vdiskmgmt
.vdest
.id
= vdest
->id
;
717 cmdrsp
->vdiskmgmt
.vdest
.lun
= vdest
->lun
;
718 cmdrsp
->vdiskmgmt
.scsicmd
=
720 add_scsipending_entry_with_wait(virthbainfo
, CMD_VDISKMGMT_TYPE
,
723 uisqueue_put_cmdrsp_with_lock_client(virthbainfo
->chinfo
.queueinfo
,
724 cmdrsp
, IOCHAN_TO_IOPART
,
725 &virthbainfo
->chinfo
.insertlock
,
726 DONT_ISSUE_INTERRUPT
, (u64
) NULL
,
728 LOGINF("VdiskMgmt waiting on event notifyevent=0x%p\n",
729 cmdrsp
->scsitaskmgmt
.notify
);
730 wait_event(notifyevent
, notifyresult
!= 0xffff);
731 LOGINF("VdiskMgmt complete; result:%d\n", cmdrsp
->vdiskmgmt
.result
);
736 /*****************************************************/
737 /* Scsi Host support functions */
738 /*****************************************************/
741 forward_taskmgmt_command(enum task_mgmt_types tasktype
,
742 struct scsi_device
*scsidev
)
744 struct uiscmdrsp
*cmdrsp
;
745 struct virthba_info
*virthbainfo
=
746 (struct virthba_info
*) scsidev
->host
->hostdata
;
747 int notifyresult
= 0xffff;
748 wait_queue_head_t notifyevent
;
750 LOGINF("TaskMgmt:%d %d:%d:%llu\n", tasktype
,
751 scsidev
->channel
, scsidev
->id
, scsidev
->lun
);
753 if (virthbainfo
->serverdown
|| virthbainfo
->serverchangingstate
) {
754 DBGINF("Server is down/changing state. Returning Failure.\n");
758 cmdrsp
= kzalloc(SIZEOF_CMDRSP
, GFP_ATOMIC
);
759 if (cmdrsp
== NULL
) {
760 LOGERR("kmalloc of cmdrsp failed.\n");
761 return FAILED
; /* reject */
764 init_waitqueue_head(¬ifyevent
);
766 /* issue TASK_MGMT_ABORT_TASK */
767 /* set type to command - as opposed to task mgmt */
768 cmdrsp
->cmdtype
= CMD_SCSITASKMGMT_TYPE
;
769 /* specify the event that has to be triggered when this */
770 /* cmd is complete */
771 cmdrsp
->scsitaskmgmt
.notify
= (void *) ¬ifyevent
;
772 cmdrsp
->scsitaskmgmt
.notifyresult
= (void *) ¬ifyresult
;
774 /* save destination */
775 cmdrsp
->scsitaskmgmt
.tasktype
= tasktype
;
776 cmdrsp
->scsitaskmgmt
.vdest
.channel
= scsidev
->channel
;
777 cmdrsp
->scsitaskmgmt
.vdest
.id
= scsidev
->id
;
778 cmdrsp
->scsitaskmgmt
.vdest
.lun
= scsidev
->lun
;
779 cmdrsp
->scsitaskmgmt
.scsicmd
=
781 add_scsipending_entry_with_wait(virthbainfo
,
782 CMD_SCSITASKMGMT_TYPE
,
785 uisqueue_put_cmdrsp_with_lock_client(virthbainfo
->chinfo
.queueinfo
,
786 cmdrsp
, IOCHAN_TO_IOPART
,
787 &virthbainfo
->chinfo
.insertlock
,
788 DONT_ISSUE_INTERRUPT
, (u64
) NULL
,
790 LOGINF("TaskMgmt waiting on event notifyevent=0x%p\n",
791 cmdrsp
->scsitaskmgmt
.notify
);
792 wait_event(notifyevent
, notifyresult
!= 0xffff);
793 LOGINF("TaskMgmt complete; result:%d\n", cmdrsp
->scsitaskmgmt
.result
);
798 /* The abort handler returns SUCCESS if it has succeeded to make LLDD
799 * and all related hardware forget about the scmd.
802 virthba_abort_handler(struct scsi_cmnd
*scsicmd
)
804 /* issue TASK_MGMT_ABORT_TASK */
805 struct scsi_device
*scsidev
;
806 struct virtdisk_info
*vdisk
;
808 scsidev
= scsicmd
->device
;
809 for (vdisk
= &((struct virthba_info
*) scsidev
->host
->hostdata
)->head
;
810 vdisk
->next
; vdisk
= vdisk
->next
) {
811 if ((scsidev
->channel
== vdisk
->channel
)
812 && (scsidev
->id
== vdisk
->id
)
813 && (scsidev
->lun
== vdisk
->lun
)) {
814 if (atomic_read(&vdisk
->error_count
) <
815 VIRTHBA_ERROR_COUNT
) {
816 atomic_inc(&vdisk
->error_count
);
817 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC
,
818 POSTCODE_SEVERITY_INFO
);
820 atomic_set(&vdisk
->ios_threshold
,
821 IOS_ERROR_THRESHOLD
);
824 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK
, scsicmd
->device
);
828 virthba_bus_reset_handler(struct scsi_cmnd
*scsicmd
)
830 /* issue TASK_MGMT_TARGET_RESET for each target on the bus */
831 struct scsi_device
*scsidev
;
832 struct virtdisk_info
*vdisk
;
834 scsidev
= scsicmd
->device
;
835 for (vdisk
= &((struct virthba_info
*) scsidev
->host
->hostdata
)->head
;
836 vdisk
->next
; vdisk
= vdisk
->next
) {
837 if ((scsidev
->channel
== vdisk
->channel
)
838 && (scsidev
->id
== vdisk
->id
)
839 && (scsidev
->lun
== vdisk
->lun
)) {
840 if (atomic_read(&vdisk
->error_count
) <
841 VIRTHBA_ERROR_COUNT
) {
842 atomic_inc(&vdisk
->error_count
);
843 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC
,
844 POSTCODE_SEVERITY_INFO
);
846 atomic_set(&vdisk
->ios_threshold
,
847 IOS_ERROR_THRESHOLD
);
850 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET
, scsicmd
->device
);
854 virthba_device_reset_handler(struct scsi_cmnd
*scsicmd
)
856 /* issue TASK_MGMT_LUN_RESET */
857 struct scsi_device
*scsidev
;
858 struct virtdisk_info
*vdisk
;
860 scsidev
= scsicmd
->device
;
861 for (vdisk
= &((struct virthba_info
*) scsidev
->host
->hostdata
)->head
;
862 vdisk
->next
; vdisk
= vdisk
->next
) {
863 if ((scsidev
->channel
== vdisk
->channel
)
864 && (scsidev
->id
== vdisk
->id
)
865 && (scsidev
->lun
== vdisk
->lun
)) {
866 if (atomic_read(&vdisk
->error_count
) <
867 VIRTHBA_ERROR_COUNT
) {
868 atomic_inc(&vdisk
->error_count
);
869 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC
,
870 POSTCODE_SEVERITY_INFO
);
872 atomic_set(&vdisk
->ios_threshold
,
873 IOS_ERROR_THRESHOLD
);
876 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET
, scsicmd
->device
);
880 virthba_host_reset_handler(struct scsi_cmnd
*scsicmd
)
882 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
883 LOGERR("virthba_host_reset_handler Not yet implemented\n");
887 static char virthba_get_info_str
[256];
890 virthba_get_info(struct Scsi_Host
*shp
)
892 /* Return version string */
893 sprintf(virthba_get_info_str
, "virthba, version %s\n", VIRTHBA_VERSION
);
894 return virthba_get_info_str
;
898 virthba_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
900 DBGINF("In virthba_ioctl: ioctl: cmd=0x%x\n", cmd
);
904 /* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart
908 virthba_queue_command_lck(struct scsi_cmnd
*scsicmd
,
909 void (*virthba_cmnd_done
)(struct scsi_cmnd
*))
911 struct scsi_device
*scsidev
= scsicmd
->device
;
914 unsigned char *cdb
= scsicmd
->cmnd
;
915 struct Scsi_Host
*scsihost
= scsidev
->host
;
916 struct uiscmdrsp
*cmdrsp
;
918 struct virthba_info
*virthbainfo
=
919 (struct virthba_info
*) scsihost
->hostdata
;
920 struct scatterlist
*sg
= NULL
;
921 struct scatterlist
*sgl
= NULL
;
924 if (virthbainfo
->serverdown
|| virthbainfo
->serverchangingstate
) {
925 DBGINF("Server is down/changing state. Returning SCSI_MLQUEUE_DEVICE_BUSY.\n");
926 return SCSI_MLQUEUE_DEVICE_BUSY
;
929 cmdrsp
= kzalloc(SIZEOF_CMDRSP
, GFP_ATOMIC
);
930 if (cmdrsp
== NULL
) {
931 LOGERR("kmalloc of cmdrsp failed.\n");
932 return 1; /* reject the command */
935 /* now saving everything we need from scsi_cmd into cmdrsp
936 * before we queue cmdrsp set type to command - as opposed to
939 cmdrsp
->cmdtype
= CMD_SCSI_TYPE
;
940 /* save the pending insertion location. Deletion from pending
941 * will return the scsicmd pointer for completion
944 add_scsipending_entry(virthbainfo
, CMD_SCSI_TYPE
, (void *) scsicmd
);
945 if (insert_location
!= -1) {
946 cmdrsp
->scsi
.scsicmd
= (void *) (uintptr_t) insert_location
;
948 LOGERR("Queue is full. Returning busy.\n");
950 return SCSI_MLQUEUE_DEVICE_BUSY
;
952 /* save done function that we have call when cmd is complete */
953 scsicmd
->scsi_done
= virthba_cmnd_done
;
954 /* save destination */
955 cmdrsp
->scsi
.vdest
.channel
= scsidev
->channel
;
956 cmdrsp
->scsi
.vdest
.id
= scsidev
->id
;
957 cmdrsp
->scsi
.vdest
.lun
= scsidev
->lun
;
959 cmdrsp
->scsi
.data_dir
= scsicmd
->sc_data_direction
;
960 memcpy(cmdrsp
->scsi
.cmnd
, cdb
, MAX_CMND_SIZE
);
962 cmdrsp
->scsi
.bufflen
= scsi_bufflen(scsicmd
);
964 /* keep track of the max buffer length so far. */
965 if (cmdrsp
->scsi
.bufflen
> MaxBuffLen
)
966 MaxBuffLen
= cmdrsp
->scsi
.bufflen
;
968 if (scsi_sg_count(scsicmd
) > MAX_PHYS_INFO
) {
969 LOGERR("scsicmd use_sg:%d greater than MAX:%d\n",
970 scsi_sg_count(scsicmd
), MAX_PHYS_INFO
);
971 del_scsipending_entry(virthbainfo
, (uintptr_t) insert_location
);
973 return 1; /* reject the command */
976 /* This is what we USED to do when we assumed we were running */
977 /* uissd & virthba on the same Linux system. */
978 /* cmdrsp->scsi.buffer = scsicmd->request_buffer; */
979 /* The following code does NOT make that assumption. */
980 /* convert buffer to phys information */
981 if (scsi_sg_count(scsicmd
) == 0) {
982 if (scsi_bufflen(scsicmd
) > 0) {
983 LOGERR("**** FAILED No scatter list for bufflen > 0\n");
984 BUG_ON(scsi_sg_count(scsicmd
) == 0);
986 DBGINF("No sg; buffer:0x%p bufflen:%d\n",
987 scsi_sglist(scsicmd
), scsi_bufflen(scsicmd
));
989 /* buffer is scatterlist - copy it out */
990 sgl
= scsi_sglist(scsicmd
);
992 for_each_sg(sgl
, sg
, scsi_sg_count(scsicmd
), i
) {
994 cmdrsp
->scsi
.gpi_list
[i
].address
= sg_phys(sg
);
995 cmdrsp
->scsi
.gpi_list
[i
].length
= sg
->length
;
996 if ((i
!= 0) && (sg
->offset
!= 0))
997 LOGINF("Offset on a sg_entry other than zero =<<%d>>.\n",
1002 LOGERR("Start sg_list dump (entries %d, bufflen %d)...\n",
1003 scsi_sg_count(scsicmd
), cmdrsp
->scsi
.bufflen
);
1004 for_each_sg(sgl
, sg
, scsi_sg_count(scsicmd
), i
) {
1005 LOGERR(" Entry(%d): page->[0x%p], phys->[0x%Lx], off(%d), len(%d)\n",
1007 (unsigned long long) sg_phys(sg
),
1008 sg
->offset
, sg
->length
);
1010 LOGERR("Done sg_list dump.\n");
1011 /* BUG(); ***** For now, let it fail in uissd
1012 * if it is a problem, as it might just
1017 cmdrsp
->scsi
.guest_phys_entries
= scsi_sg_count(scsicmd
);
1021 i
= uisqueue_put_cmdrsp_with_lock_client(virthbainfo
->chinfo
.queueinfo
,
1022 cmdrsp
, IOCHAN_TO_IOPART
,
1023 &virthbainfo
->chinfo
.
1025 DONT_ISSUE_INTERRUPT
,
1026 (u64
) NULL
, DONT_WAIT
, "vhba");
1028 /* queue must be full - and we said don't wait - return busy */
1029 LOGERR("uisqueue_put_cmdrsp_with_lock ****FAILED\n");
1031 del_scsipending_entry(virthbainfo
, (uintptr_t) insert_location
);
1032 return SCSI_MLQUEUE_DEVICE_BUSY
;
1035 /* we're done with cmdrsp space - data from it has been copied
1036 * into channel - free it now.
1039 return 0; /* non-zero implies host/device is busy */
1043 virthba_slave_alloc(struct scsi_device
*scsidev
)
1045 /* this called by the midlayer before scan for new devices -
1046 * LLD can alloc any struct & do init if needed.
1048 struct virtdisk_info
*vdisk
;
1049 struct virtdisk_info
*tmpvdisk
;
1050 struct virthba_info
*virthbainfo
;
1051 struct Scsi_Host
*scsihost
= (struct Scsi_Host
*) scsidev
->host
;
1053 virthbainfo
= (struct virthba_info
*) scsihost
->hostdata
;
1055 LOGERR("Could not find virthba_info for scsihost\n");
1056 return 0; /* even though we errored, treat as success */
1058 for (vdisk
= &virthbainfo
->head
; vdisk
->next
; vdisk
= vdisk
->next
) {
1059 if (vdisk
->next
->valid
&&
1060 (vdisk
->next
->channel
== scsidev
->channel
) &&
1061 (vdisk
->next
->id
== scsidev
->id
) &&
1062 (vdisk
->next
->lun
== scsidev
->lun
))
1065 tmpvdisk
= kzalloc(sizeof(struct virtdisk_info
), GFP_ATOMIC
);
1066 if (!tmpvdisk
) { /* error allocating */
1067 LOGERR("Could not allocate memory for disk\n");
1071 tmpvdisk
->channel
= scsidev
->channel
;
1072 tmpvdisk
->id
= scsidev
->id
;
1073 tmpvdisk
->lun
= scsidev
->lun
;
1074 tmpvdisk
->valid
= 1;
1075 vdisk
->next
= tmpvdisk
;
1076 return 0; /* success */
1080 virthba_slave_configure(struct scsi_device
*scsidev
)
1082 return 0; /* success */
1086 virthba_slave_destroy(struct scsi_device
*scsidev
)
1088 /* midlevel calls this after device has been quiesced and
1089 * before it is to be deleted.
1091 struct virtdisk_info
*vdisk
, *delvdisk
;
1092 struct virthba_info
*virthbainfo
;
1093 struct Scsi_Host
*scsihost
= (struct Scsi_Host
*) scsidev
->host
;
1095 virthbainfo
= (struct virthba_info
*) scsihost
->hostdata
;
1097 LOGERR("Could not find virthba_info for scsihost\n");
1098 for (vdisk
= &virthbainfo
->head
; vdisk
->next
; vdisk
= vdisk
->next
) {
1099 if (vdisk
->next
->valid
&&
1100 (vdisk
->next
->channel
== scsidev
->channel
) &&
1101 (vdisk
->next
->id
== scsidev
->id
) &&
1102 (vdisk
->next
->lun
== scsidev
->lun
)) {
1103 delvdisk
= vdisk
->next
;
1104 vdisk
->next
= vdisk
->next
->next
;
1111 /*****************************************************/
1112 /* Scsi Cmnd support thread */
1113 /*****************************************************/
1116 do_scsi_linuxstat(struct uiscmdrsp
*cmdrsp
, struct scsi_cmnd
*scsicmd
)
1118 struct virtdisk_info
*vdisk
;
1119 struct scsi_device
*scsidev
;
1120 struct sense_data
*sd
;
1122 scsidev
= scsicmd
->device
;
1123 memcpy(scsicmd
->sense_buffer
, cmdrsp
->scsi
.sensebuf
, MAX_SENSE_SIZE
);
1124 sd
= (struct sense_data
*) scsicmd
->sense_buffer
;
1126 /* Do not log errors for disk-not-present inquiries */
1127 if ((cmdrsp
->scsi
.cmnd
[0] == INQUIRY
) &&
1128 (host_byte(cmdrsp
->scsi
.linuxstat
) == DID_NO_CONNECT
) &&
1129 (cmdrsp
->scsi
.addlstat
== ADDL_SEL_TIMEOUT
))
1132 /* Okay see what our error_count is here.... */
1133 for (vdisk
= &((struct virthba_info
*) scsidev
->host
->hostdata
)->head
;
1134 vdisk
->next
; vdisk
= vdisk
->next
) {
1135 if ((scsidev
->channel
!= vdisk
->channel
)
1136 || (scsidev
->id
!= vdisk
->id
)
1137 || (scsidev
->lun
!= vdisk
->lun
))
1140 if (atomic_read(&vdisk
->error_count
) < VIRTHBA_ERROR_COUNT
) {
1141 atomic_inc(&vdisk
->error_count
);
1142 LOGERR("SCSICMD ****FAILED scsicmd:0x%p op:0x%x <%d:%d:%d:%llu> 0x%x-0x%x-0x%x-0x%x-0x%x.\n",
1143 scsicmd
, cmdrsp
->scsi
.cmnd
[0],
1144 scsidev
->host
->host_no
, scsidev
->id
,
1145 scsidev
->channel
, scsidev
->lun
,
1146 cmdrsp
->scsi
.linuxstat
, sd
->valid
, sd
->sense_key
,
1147 sd
->additional_sense_code
,
1148 sd
->additional_sense_code_qualifier
);
1149 if (atomic_read(&vdisk
->error_count
) ==
1150 VIRTHBA_ERROR_COUNT
) {
1151 LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%llu>\n",
1152 scsidev
->host
->host_no
, scsidev
->id
,
1153 scsidev
->channel
, scsidev
->lun
);
1155 atomic_set(&vdisk
->ios_threshold
, IOS_ERROR_THRESHOLD
);
1161 do_scsi_nolinuxstat(struct uiscmdrsp
*cmdrsp
, struct scsi_cmnd
*scsicmd
)
1163 struct scsi_device
*scsidev
;
1164 unsigned char buf
[36];
1165 struct scatterlist
*sg
;
1168 char *thispage_orig
;
1170 struct virtdisk_info
*vdisk
;
1172 scsidev
= scsicmd
->device
;
1173 if ((cmdrsp
->scsi
.cmnd
[0] == INQUIRY
)
1174 && (cmdrsp
->scsi
.bufflen
>= MIN_INQUIRY_RESULT_LEN
)) {
1175 if (cmdrsp
->scsi
.no_disk_result
== 0)
1178 /* Linux scsi code is weird; it wants
1179 * a device at Lun 0 to issue report
1180 * luns, but we don't want a disk
1181 * there so we'll present a processor
1183 SET_NO_DISK_INQUIRY_RESULT(buf
, cmdrsp
->scsi
.bufflen
,
1185 DEV_DISK_CAPABLE_NOT_PRESENT
,
1188 if (scsi_sg_count(scsicmd
) == 0) {
1189 if (scsi_bufflen(scsicmd
) > 0) {
1190 LOGERR("**** FAILED No scatter list for bufflen > 0\n");
1191 BUG_ON(scsi_sg_count(scsicmd
) ==
1194 memcpy(scsi_sglist(scsicmd
), buf
,
1195 cmdrsp
->scsi
.bufflen
);
1199 sg
= scsi_sglist(scsicmd
);
1200 for (i
= 0; i
< scsi_sg_count(scsicmd
); i
++) {
1201 DBGVER("copying OUT OF buf into 0x%p %d\n",
1202 sg_page(sg
+ i
), sg
[i
].length
);
1203 thispage_orig
= kmap_atomic(sg_page(sg
+ i
));
1204 thispage
= (void *) ((unsigned long)thispage_orig
|
1206 memcpy(thispage
, buf
+ bufind
, sg
[i
].length
);
1207 kunmap_atomic(thispage_orig
);
1208 bufind
+= sg
[i
].length
;
1212 vdisk
= &((struct virthba_info
*)scsidev
->host
->hostdata
)->head
;
1213 for ( ; vdisk
->next
; vdisk
= vdisk
->next
) {
1214 if ((scsidev
->channel
!= vdisk
->channel
)
1215 || (scsidev
->id
!= vdisk
->id
)
1216 || (scsidev
->lun
!= vdisk
->lun
))
1219 if (atomic_read(&vdisk
->ios_threshold
) > 0) {
1220 atomic_dec(&vdisk
->ios_threshold
);
1221 if (atomic_read(&vdisk
->ios_threshold
) == 0) {
1222 LOGERR("Resetting error count for disk\n");
1223 atomic_set(&vdisk
->error_count
, 0);
1231 complete_scsi_command(struct uiscmdrsp
*cmdrsp
, struct scsi_cmnd
*scsicmd
)
1233 DBGINF("cmdrsp: 0x%p, scsistat:0x%x.\n", cmdrsp
, cmdrsp
->scsi
.scsistat
);
1235 /* take what we need out of cmdrsp and complete the scsicmd */
1236 scsicmd
->result
= cmdrsp
->scsi
.linuxstat
;
1237 if (cmdrsp
->scsi
.linuxstat
)
1238 do_scsi_linuxstat(cmdrsp
, scsicmd
);
1240 do_scsi_nolinuxstat(cmdrsp
, scsicmd
);
1242 if (scsicmd
->scsi_done
) {
1243 DBGVER("Scsi_DONE\n");
1244 scsicmd
->scsi_done(scsicmd
);
1249 complete_vdiskmgmt_command(struct uiscmdrsp
*cmdrsp
)
1251 /* copy the result of the taskmgmt and */
1252 /* wake up the error handler that is waiting for this */
1253 *(int *) cmdrsp
->vdiskmgmt
.notifyresult
= cmdrsp
->vdiskmgmt
.result
;
1254 wake_up_all((wait_queue_head_t
*) cmdrsp
->vdiskmgmt
.notify
);
1255 LOGINF("set notify result to %d\n", cmdrsp
->vdiskmgmt
.result
);
1259 complete_taskmgmt_command(struct uiscmdrsp
*cmdrsp
)
1261 /* copy the result of the taskmgmt and */
1262 /* wake up the error handler that is waiting for this */
1263 *(int *) cmdrsp
->scsitaskmgmt
.notifyresult
=
1264 cmdrsp
->scsitaskmgmt
.result
;
1265 wake_up_all((wait_queue_head_t
*) cmdrsp
->scsitaskmgmt
.notify
);
1266 LOGINF("set notify result to %d\n", cmdrsp
->scsitaskmgmt
.result
);
1270 drain_queue(struct virthba_info
*virthbainfo
, struct chaninfo
*dc
,
1271 struct uiscmdrsp
*cmdrsp
)
1273 unsigned long flags
;
1275 struct scsi_cmnd
*scsicmd
;
1276 struct Scsi_Host
*shost
= virthbainfo
->scsihost
;
1279 spin_lock_irqsave(&virthbainfo
->chinfo
.insertlock
, flags
);
1280 if (!spar_channel_client_acquire_os(dc
->queueinfo
->chan
,
1282 spin_unlock_irqrestore(&virthbainfo
->chinfo
.insertlock
,
1284 virthbainfo
->acquire_failed_cnt
++;
1287 qrslt
= uisqueue_get_cmdrsp(dc
->queueinfo
, cmdrsp
,
1288 IOCHAN_FROM_IOPART
);
1289 spar_channel_client_release_os(dc
->queueinfo
->chan
, "vhba");
1290 spin_unlock_irqrestore(&virthbainfo
->chinfo
.insertlock
, flags
);
1293 if (cmdrsp
->cmdtype
== CMD_SCSI_TYPE
) {
1294 /* scsicmd location is returned by the
1297 scsicmd
= del_scsipending_entry(virthbainfo
,
1298 (uintptr_t) cmdrsp
->scsi
.scsicmd
);
1301 /* complete the orig cmd */
1302 complete_scsi_command(cmdrsp
, scsicmd
);
1303 } else if (cmdrsp
->cmdtype
== CMD_SCSITASKMGMT_TYPE
) {
1304 if (!del_scsipending_entry(virthbainfo
,
1305 (uintptr_t) cmdrsp
->scsitaskmgmt
.scsicmd
))
1307 complete_taskmgmt_command(cmdrsp
);
1308 } else if (cmdrsp
->cmdtype
== CMD_NOTIFYGUEST_TYPE
) {
1309 /* The vHba pointer has no meaning in
1310 * a Client/Guest Partition. Let's be
1311 * safe and set it to NULL now. Do
1312 * not use it here! */
1313 cmdrsp
->disknotify
.v_hba
= NULL
;
1314 process_disk_notify(shost
, cmdrsp
);
1315 } else if (cmdrsp
->cmdtype
== CMD_VDISKMGMT_TYPE
) {
1316 if (!del_scsipending_entry(virthbainfo
,
1317 (uintptr_t) cmdrsp
->vdiskmgmt
.scsicmd
))
1319 complete_vdiskmgmt_command(cmdrsp
);
1321 LOGERR("Invalid cmdtype %d\n", cmdrsp
->cmdtype
);
1322 /* cmdrsp is now available for reuse */
1327 /* main function for the thread that waits for scsi commands to arrive
1328 * in a specified queue
1331 process_incoming_rsps(void *v
)
1333 struct virthba_info
*virthbainfo
= v
;
1334 struct chaninfo
*dc
= &virthbainfo
->chinfo
;
1335 struct uiscmdrsp
*cmdrsp
= NULL
;
1336 const int SZ
= sizeof(struct uiscmdrsp
);
1338 unsigned long long rc1
;
1340 UIS_DAEMONIZE("vhba_incoming");
1341 /* alloc once and reuse */
1342 cmdrsp
= kmalloc(SZ
, GFP_ATOMIC
);
1343 if (cmdrsp
== NULL
) {
1344 LOGERR("process_incoming_rsps ****FAILED to malloc - thread exiting\n");
1345 complete_and_exit(&dc
->threadinfo
.has_stopped
, 0);
1348 mask
= ULTRA_CHANNEL_ENABLE_INTS
;
1350 wait_event_interruptible_timeout(virthbainfo
->rsp_queue
,
1351 (atomic_read(&virthbainfo
->interrupt_rcvd
) == 1),
1352 usecs_to_jiffies(rsltq_wait_usecs
));
1353 atomic_set(&virthbainfo
->interrupt_rcvd
, 0);
1355 drain_queue(virthbainfo
, dc
, cmdrsp
);
1356 rc1
= uisqueue_interlocked_or(virthbainfo
->flags_addr
, mask
);
1357 if (dc
->threadinfo
.should_stop
)
1363 DBGINF("exiting processing incoming rsps.\n");
1364 complete_and_exit(&dc
->threadinfo
.has_stopped
, 0);
1367 /*****************************************************/
1368 /* Debugfs filesystem functions */
1369 /*****************************************************/
1371 static ssize_t
info_debugfs_read(struct file
*file
,
1372 char __user
*buf
, size_t len
, loff_t
*offset
)
1374 ssize_t bytes_read
= 0;
1376 u64 phys_flags_addr
;
1378 struct virthba_info
*virthbainfo
;
1383 vbuf
= kzalloc(len
, GFP_KERNEL
);
1387 for (i
= 0; i
< VIRTHBASOPENMAX
; i
++) {
1388 if (VirtHbasOpen
[i
].virthbainfo
== NULL
)
1391 virthbainfo
= VirtHbasOpen
[i
].virthbainfo
;
1393 str_pos
+= scnprintf(vbuf
+ str_pos
,
1394 len
- str_pos
, "MaxBuffLen:%u\n", MaxBuffLen
);
1396 str_pos
+= scnprintf(vbuf
+ str_pos
, len
- str_pos
,
1397 "\nvirthba result queue poll wait:%d usecs.\n",
1399 str_pos
+= scnprintf(vbuf
+ str_pos
, len
- str_pos
,
1400 "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
1401 virthbainfo
->interrupts_rcvd
,
1402 virthbainfo
->interrupts_disabled
);
1403 str_pos
+= scnprintf(vbuf
+ str_pos
,
1404 len
- str_pos
, "\ninterrupts_notme = %llu,\n",
1405 virthbainfo
->interrupts_notme
);
1406 phys_flags_addr
= virt_to_phys((__force
void *)
1407 virthbainfo
->flags_addr
);
1408 str_pos
+= scnprintf(vbuf
+ str_pos
, len
- str_pos
,
1409 "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
1410 virthbainfo
->flags_addr
, phys_flags_addr
,
1411 (__le64
)readq(virthbainfo
->flags_addr
));
1412 str_pos
+= scnprintf(vbuf
+ str_pos
,
1413 len
- str_pos
, "acquire_failed_cnt:%llu\n",
1414 virthbainfo
->acquire_failed_cnt
);
1415 str_pos
+= scnprintf(vbuf
+ str_pos
, len
- str_pos
, "\n");
1418 bytes_read
= simple_read_from_buffer(buf
, len
, offset
, vbuf
, str_pos
);
1423 static ssize_t
enable_ints_write(struct file
*file
,
1424 const char __user
*buffer
, size_t count
, loff_t
*ppos
)
1428 struct virthba_info
*virthbainfo
;
1430 u64 __iomem
*Features_addr
;
1433 if (count
>= ARRAY_SIZE(buf
))
1437 if (copy_from_user(buf
, buffer
, count
)) {
1438 LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
1439 (int) count
, buf
, count
);
1443 i
= kstrtoint(buf
, 10 , &new_value
);
1446 LOGERR("Failed to scan value for enable_ints, buf<<%.*s>>",
1451 /* set all counts to new_value usually 0 */
1452 for (i
= 0; i
< VIRTHBASOPENMAX
; i
++) {
1453 if (VirtHbasOpen
[i
].virthbainfo
!= NULL
) {
1454 virthbainfo
= VirtHbasOpen
[i
].virthbainfo
;
1456 &virthbainfo
->chinfo
.queueinfo
->chan
->features
;
1457 if (new_value
== 1) {
1458 mask
= ~(ULTRA_IO_CHANNEL_IS_POLLING
|
1459 ULTRA_IO_DRIVER_DISABLES_INTS
);
1460 uisqueue_interlocked_and(Features_addr
, mask
);
1461 mask
= ULTRA_IO_DRIVER_ENABLES_INTS
;
1462 uisqueue_interlocked_or(Features_addr
, mask
);
1463 rsltq_wait_usecs
= 4000000;
1465 mask
= ~(ULTRA_IO_DRIVER_ENABLES_INTS
|
1466 ULTRA_IO_DRIVER_DISABLES_INTS
);
1467 uisqueue_interlocked_and(Features_addr
, mask
);
1468 mask
= ULTRA_IO_CHANNEL_IS_POLLING
;
1469 uisqueue_interlocked_or(Features_addr
, mask
);
1470 rsltq_wait_usecs
= 4000;
1477 /* As per VirtpciFunc returns 1 for success and 0 for failure */
1479 virthba_serverup(struct virtpci_dev
*virtpcidev
)
1481 struct virthba_info
*virthbainfo
=
1482 (struct virthba_info
*) ((struct Scsi_Host
*) virtpcidev
->scsi
.
1483 scsihost
)->hostdata
;
1485 DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev
->bus_no
,
1486 virtpcidev
->device_no
);
1488 if (!virthbainfo
->serverdown
) {
1489 DBGINF("Server up message received while server is already up.\n");
1492 if (virthbainfo
->serverchangingstate
) {
1493 LOGERR("Server already processing change state message\n");
1497 virthbainfo
->serverchangingstate
= true;
1498 /* Must transition channel to ATTACHED state BEFORE we
1499 * can start using the device again
1501 SPAR_CHANNEL_CLIENT_TRANSITION(virthbainfo
->chinfo
.queueinfo
->chan
,
1502 dev_name(&virtpcidev
->generic_dev
),
1503 CHANNELCLI_ATTACHED
, NULL
);
1505 /* Start Processing the IOVM Response Queue Again */
1506 if (!uisthread_start(&virthbainfo
->chinfo
.threadinfo
,
1507 process_incoming_rsps
,
1508 virthbainfo
, "vhba_incoming")) {
1509 LOGERR("uisthread_start rsp ****FAILED\n");
1512 virthbainfo
->serverdown
= false;
1513 virthbainfo
->serverchangingstate
= false;
1519 virthba_serverdown_complete(struct work_struct
*work
)
1521 struct virthba_info
*virthbainfo
;
1522 struct virtpci_dev
*virtpcidev
;
1524 struct scsipending
*pendingdel
= NULL
;
1525 struct scsi_cmnd
*scsicmd
= NULL
;
1526 struct uiscmdrsp
*cmdrsp
;
1527 unsigned long flags
;
1529 virthbainfo
= container_of(work
, struct virthba_info
,
1530 serverdown_completion
);
1532 /* Stop Using the IOVM Response Queue (queue should be drained
1535 uisthread_stop(&virthbainfo
->chinfo
.threadinfo
);
1537 /* Fail Commands that weren't completed */
1538 spin_lock_irqsave(&virthbainfo
->privlock
, flags
);
1539 for (i
= 0; i
< MAX_PENDING_REQUESTS
; i
++) {
1540 pendingdel
= &(virthbainfo
->pending
[i
]);
1541 switch (pendingdel
->cmdtype
) {
1543 scsicmd
= (struct scsi_cmnd
*) pendingdel
->sent
;
1544 scsicmd
->result
= (DID_RESET
<< 16);
1545 if (scsicmd
->scsi_done
)
1546 scsicmd
->scsi_done(scsicmd
);
1548 case CMD_SCSITASKMGMT_TYPE
:
1549 cmdrsp
= (struct uiscmdrsp
*) pendingdel
->sent
;
1550 DBGINF("cmdrsp=0x%x, notify=0x%x\n", cmdrsp
,
1551 cmdrsp
->scsitaskmgmt
.notify
);
1552 *(int *) cmdrsp
->scsitaskmgmt
.notifyresult
=
1554 wake_up_all((wait_queue_head_t
*)
1555 cmdrsp
->scsitaskmgmt
.notify
);
1557 case CMD_VDISKMGMT_TYPE
:
1558 cmdrsp
= (struct uiscmdrsp
*) pendingdel
->sent
;
1559 *(int *) cmdrsp
->vdiskmgmt
.notifyresult
=
1561 wake_up_all((wait_queue_head_t
*)
1562 cmdrsp
->vdiskmgmt
.notify
);
1565 if (pendingdel
->sent
!= NULL
)
1566 LOGERR("Unknown command type: 0x%x. Only freeing list structure.\n",
1567 pendingdel
->cmdtype
);
1569 pendingdel
->cmdtype
= 0;
1570 pendingdel
->sent
= NULL
;
1572 spin_unlock_irqrestore(&virthbainfo
->privlock
, flags
);
1574 virtpcidev
= virthbainfo
->virtpcidev
;
1576 DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev
->bus_no
,
1577 virtpcidev
->device_no
);
1578 virthbainfo
->serverdown
= true;
1579 virthbainfo
->serverchangingstate
= false;
1580 /* Return the ServerDown response to Command */
1581 visorchipset_device_pause_response(virtpcidev
->bus_no
,
1582 virtpcidev
->device_no
, 0);
1585 /* As per VirtpciFunc returns 1 for success and 0 for failure */
1587 virthba_serverdown(struct virtpci_dev
*virtpcidev
, u32 state
)
1589 struct virthba_info
*virthbainfo
=
1590 (struct virthba_info
*) ((struct Scsi_Host
*) virtpcidev
->scsi
.
1591 scsihost
)->hostdata
;
1593 DBGINF("virthba_serverdown");
1594 DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev
->bus_no
,
1595 virtpcidev
->device_no
);
1597 if (!virthbainfo
->serverdown
&& !virthbainfo
->serverchangingstate
) {
1598 virthbainfo
->serverchangingstate
= true;
1599 queue_work(virthba_serverdown_workqueue
,
1600 &virthbainfo
->serverdown_completion
);
1601 } else if (virthbainfo
->serverchangingstate
) {
1602 LOGERR("Server already processing change state message\n");
1605 LOGERR("Server already down, but another server down message received.");
1610 /*****************************************************/
1611 /* Module Init & Exit functions */
1612 /*****************************************************/
1615 virthba_parse_line(char *str
)
1617 DBGINF("In virthba_parse_line %s\n", str
);
1622 virthba_parse_options(char *line
)
1626 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
1627 if (line
== NULL
|| !*line
)
1629 while ((line
= next
) != NULL
) {
1630 next
= strchr(line
, ' ');
1633 if (!virthba_parse_line(line
))
1634 DBGINF("Unknown option '%s'\n", line
);
1637 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
1641 virthba_mod_init(void)
1646 if (!unisys_spar_platform
)
1649 LOGINF("Entering virthba_mod_init...\n");
1651 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
1652 virthba_parse_options(virthba_options
);
1654 error
= virtpci_register_driver(&virthba_driver
);
1656 LOGERR("register ****FAILED 0x%x\n", error
);
1657 POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC
, error
,
1658 POSTCODE_SEVERITY_ERR
);
1661 /* create the debugfs directories and entries */
1662 virthba_debugfs_dir
= debugfs_create_dir("virthba", NULL
);
1663 debugfs_create_file("info", S_IRUSR
, virthba_debugfs_dir
,
1664 NULL
, &debugfs_info_fops
);
1665 debugfs_create_u32("rqwait_usecs", S_IRUSR
| S_IWUSR
,
1666 virthba_debugfs_dir
, &rsltq_wait_usecs
);
1667 debugfs_create_file("enable_ints", S_IWUSR
,
1668 virthba_debugfs_dir
, NULL
,
1669 &debugfs_enable_ints_fops
);
1670 /* Initialize DARWorkQ */
1671 INIT_WORK(&DARWorkQ
, doDiskAddRemove
);
1672 spin_lock_init(&DARWorkQLock
);
1674 /* clear out array */
1675 for (i
= 0; i
< VIRTHBASOPENMAX
; i
++)
1676 VirtHbasOpen
[i
].virthbainfo
= NULL
;
1677 /* Initialize the serverdown workqueue */
1678 virthba_serverdown_workqueue
=
1679 create_singlethread_workqueue("virthba_serverdown");
1680 if (virthba_serverdown_workqueue
== NULL
) {
1681 LOGERR("**** FAILED virthba_serverdown_workqueue creation\n");
1682 POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC
,
1683 POSTCODE_SEVERITY_ERR
);
1688 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
1689 LOGINF("Leaving virthba_mod_init\n");
1694 virthba_acquire_lun(struct device
*cdev
, struct device_attribute
*attr
,
1695 const char *buf
, size_t count
)
1697 struct uisscsi_dest vdest
;
1698 struct Scsi_Host
*shost
= class_to_shost(cdev
);
1701 i
= sscanf(buf
, "%d-%d-%d", &vdest
.channel
, &vdest
.id
, &vdest
.lun
);
1705 return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE
, shost
, &vdest
);
1709 virthba_release_lun(struct device
*cdev
, struct device_attribute
*attr
,
1710 const char *buf
, size_t count
)
1712 struct uisscsi_dest vdest
;
1713 struct Scsi_Host
*shost
= class_to_shost(cdev
);
1716 i
= sscanf(buf
, "%d-%d-%d", &vdest
.channel
, &vdest
.id
, &vdest
.lun
);
1720 return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE
, shost
, &vdest
);
1723 #define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \
1724 struct device_attribute class_device_attr_##_name = \
1725 __ATTR(_name, _mode, _show, _store)
1727 static CLASS_DEVICE_ATTR(acquire_lun
, S_IWUSR
, NULL
, virthba_acquire_lun
);
1728 static CLASS_DEVICE_ATTR(release_lun
, S_IWUSR
, NULL
, virthba_release_lun
);
1730 static DEVICE_ATTRIBUTE
*virthba_shost_attrs
[] = {
1731 &class_device_attr_acquire_lun
,
1732 &class_device_attr_release_lun
,
1737 virthba_mod_exit(void)
1739 LOGINF("entering virthba_mod_exit...\n");
1741 virtpci_unregister_driver(&virthba_driver
);
1742 /* unregister is going to call virthba_remove */
1743 /* destroy serverdown completion workqueue */
1744 if (virthba_serverdown_workqueue
) {
1745 destroy_workqueue(virthba_serverdown_workqueue
);
1746 virthba_serverdown_workqueue
= NULL
;
1749 debugfs_remove_recursive(virthba_debugfs_dir
);
1750 LOGINF("Leaving virthba_mod_exit\n");
1754 /* specify function to be run at module insertion time */
1755 module_init(virthba_mod_init
);
1757 /* specify function to be run when module is removed */
1758 module_exit(virthba_mod_exit
);
1760 MODULE_LICENSE("GPL");
1761 MODULE_AUTHOR("Usha Srinivasan");
1762 MODULE_ALIAS("uisvirthba");
1763 /* this is extracted during depmod and kept in modules.dep */
1764 /* module parameter */
1765 module_param(virthba_options
, charp
, S_IRUGO
);