2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/seq_file.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <asm/atomic.h>
50 #include <linux/kthread.h>
54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55 #define HPSA_DRIVER_VERSION "2.0.2-1"
56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
58 /* How long to wait (in milliseconds) for board to go into simple mode */
59 #define MAX_CONFIG_WAIT 30000
60 #define MAX_IOCTL_CONFIG_WAIT 1000
62 /*define how many times we will try a command because of bus resets */
63 #define MAX_CMD_RETRIES 3
65 /* Embedded module documentation macros - see modules.h */
66 MODULE_AUTHOR("Hewlett-Packard Company");
67 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
69 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70 MODULE_VERSION(HPSA_DRIVER_VERSION
);
71 MODULE_LICENSE("GPL");
73 static int hpsa_allow_any
;
74 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
75 MODULE_PARM_DESC(hpsa_allow_any
,
76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77 static int hpsa_simple_mode
;
78 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
79 MODULE_PARM_DESC(hpsa_simple_mode
,
80 "Use 'simple mode' rather than 'performant mode'");
82 /* define the PCI info for the cards we can control */
83 static const struct pci_device_id hpsa_pci_device_id
[] = {
84 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
85 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
86 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
87 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
88 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
89 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324a},
90 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324b},
91 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
92 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3250},
93 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3251},
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3252},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3253},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3254},
97 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
98 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
102 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
104 /* board_id = Subsystem Device ID & Vendor ID
105 * product = Marketing Name for the board
106 * access = Address of the struct of function pointers
108 static struct board_type products
[] = {
109 {0x3241103C, "Smart Array P212", &SA5_access
},
110 {0x3243103C, "Smart Array P410", &SA5_access
},
111 {0x3245103C, "Smart Array P410i", &SA5_access
},
112 {0x3247103C, "Smart Array P411", &SA5_access
},
113 {0x3249103C, "Smart Array P812", &SA5_access
},
114 {0x324a103C, "Smart Array P712m", &SA5_access
},
115 {0x324b103C, "Smart Array P711m", &SA5_access
},
116 {0x3250103C, "Smart Array", &SA5_access
},
117 {0x3250113C, "Smart Array", &SA5_access
},
118 {0x3250123C, "Smart Array", &SA5_access
},
119 {0x3250133C, "Smart Array", &SA5_access
},
120 {0x3250143C, "Smart Array", &SA5_access
},
121 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
124 static int number_of_controllers
;
126 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
127 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
128 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
129 static void start_io(struct ctlr_info
*h
);
132 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
135 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
136 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
);
137 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
138 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
);
139 static void fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
140 void *buff
, size_t size
, u8 page_code
, unsigned char *scsi3addr
,
143 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
144 static void hpsa_scan_start(struct Scsi_Host
*);
145 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
146 unsigned long elapsed_time
);
147 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
148 int qdepth
, int reason
);
150 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
151 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
152 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
154 static ssize_t
raid_level_show(struct device
*dev
,
155 struct device_attribute
*attr
, char *buf
);
156 static ssize_t
lunid_show(struct device
*dev
,
157 struct device_attribute
*attr
, char *buf
);
158 static ssize_t
unique_id_show(struct device
*dev
,
159 struct device_attribute
*attr
, char *buf
);
160 static ssize_t
host_show_firmware_revision(struct device
*dev
,
161 struct device_attribute
*attr
, char *buf
);
162 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
163 struct device_attribute
*attr
, char *buf
);
164 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
);
165 static ssize_t
host_store_rescan(struct device
*dev
,
166 struct device_attribute
*attr
, const char *buf
, size_t count
);
167 static int check_for_unit_attention(struct ctlr_info
*h
,
168 struct CommandList
*c
);
169 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
170 struct CommandList
*c
);
171 /* performant mode helper functions */
172 static void calc_bucket_map(int *bucket
, int num_buckets
,
173 int nsgs
, int *bucket_map
);
174 static __devinit
void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
175 static inline u32
next_command(struct ctlr_info
*h
);
176 static int __devinit
hpsa_find_cfg_addrs(struct pci_dev
*pdev
,
177 void __iomem
*vaddr
, u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
179 static int __devinit
hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
180 unsigned long *memory_bar
);
181 static int __devinit
hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
182 static int __devinit
hpsa_wait_for_board_state(struct pci_dev
*pdev
,
183 void __iomem
*vaddr
, int wait_for_ready
);
184 #define BOARD_NOT_READY 0
185 #define BOARD_READY 1
187 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
188 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
189 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
190 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
191 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
192 host_show_firmware_revision
, NULL
);
193 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
194 host_show_commands_outstanding
, NULL
);
196 static struct device_attribute
*hpsa_sdev_attrs
[] = {
197 &dev_attr_raid_level
,
203 static struct device_attribute
*hpsa_shost_attrs
[] = {
205 &dev_attr_firmware_revision
,
206 &dev_attr_commands_outstanding
,
210 static struct scsi_host_template hpsa_driver_template
= {
211 .module
= THIS_MODULE
,
214 .queuecommand
= hpsa_scsi_queue_command
,
215 .scan_start
= hpsa_scan_start
,
216 .scan_finished
= hpsa_scan_finished
,
217 .change_queue_depth
= hpsa_change_queue_depth
,
219 .use_clustering
= ENABLE_CLUSTERING
,
220 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
222 .slave_alloc
= hpsa_slave_alloc
,
223 .slave_destroy
= hpsa_slave_destroy
,
225 .compat_ioctl
= hpsa_compat_ioctl
,
227 .sdev_attrs
= hpsa_sdev_attrs
,
228 .shost_attrs
= hpsa_shost_attrs
,
231 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
233 unsigned long *priv
= shost_priv(sdev
->host
);
234 return (struct ctlr_info
*) *priv
;
237 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
239 unsigned long *priv
= shost_priv(sh
);
240 return (struct ctlr_info
*) *priv
;
243 static int check_for_unit_attention(struct ctlr_info
*h
,
244 struct CommandList
*c
)
246 if (c
->err_info
->SenseInfo
[2] != UNIT_ATTENTION
)
249 switch (c
->err_info
->SenseInfo
[12]) {
251 dev_warn(&h
->pdev
->dev
, "hpsa%d: a state change "
252 "detected, command retried\n", h
->ctlr
);
255 dev_warn(&h
->pdev
->dev
, "hpsa%d: LUN failure "
256 "detected, action required\n", h
->ctlr
);
258 case REPORT_LUNS_CHANGED
:
259 dev_warn(&h
->pdev
->dev
, "hpsa%d: report LUN data "
260 "changed, action required\n", h
->ctlr
);
262 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
266 dev_warn(&h
->pdev
->dev
, "hpsa%d: a power on "
267 "or device reset detected\n", h
->ctlr
);
269 case UNIT_ATTENTION_CLEARED
:
270 dev_warn(&h
->pdev
->dev
, "hpsa%d: unit attention "
271 "cleared by another initiator\n", h
->ctlr
);
274 dev_warn(&h
->pdev
->dev
, "hpsa%d: unknown "
275 "unit attention detected\n", h
->ctlr
);
281 static ssize_t
host_store_rescan(struct device
*dev
,
282 struct device_attribute
*attr
,
283 const char *buf
, size_t count
)
286 struct Scsi_Host
*shost
= class_to_shost(dev
);
287 h
= shost_to_hba(shost
);
288 hpsa_scan_start(h
->scsi_host
);
292 static ssize_t
host_show_firmware_revision(struct device
*dev
,
293 struct device_attribute
*attr
, char *buf
)
296 struct Scsi_Host
*shost
= class_to_shost(dev
);
297 unsigned char *fwrev
;
299 h
= shost_to_hba(shost
);
300 if (!h
->hba_inquiry_data
)
302 fwrev
= &h
->hba_inquiry_data
[32];
303 return snprintf(buf
, 20, "%c%c%c%c\n",
304 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
307 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
308 struct device_attribute
*attr
, char *buf
)
310 struct Scsi_Host
*shost
= class_to_shost(dev
);
311 struct ctlr_info
*h
= shost_to_hba(shost
);
313 return snprintf(buf
, 20, "%d\n", h
->commands_outstanding
);
316 /* Enqueuing and dequeuing functions for cmdlists. */
317 static inline void addQ(struct list_head
*list
, struct CommandList
*c
)
319 list_add_tail(&c
->list
, list
);
322 static inline u32
next_command(struct ctlr_info
*h
)
326 if (unlikely(h
->transMethod
!= CFGTBL_Trans_Performant
))
327 return h
->access
.command_completed(h
);
329 if ((*(h
->reply_pool_head
) & 1) == (h
->reply_pool_wraparound
)) {
330 a
= *(h
->reply_pool_head
); /* Next cmd in ring buffer */
331 (h
->reply_pool_head
)++;
332 h
->commands_outstanding
--;
336 /* Check for wraparound */
337 if (h
->reply_pool_head
== (h
->reply_pool
+ h
->max_commands
)) {
338 h
->reply_pool_head
= h
->reply_pool
;
339 h
->reply_pool_wraparound
^= 1;
344 /* set_performant_mode: Modify the tag for cciss performant
345 * set bit 0 for pull model, bits 3-1 for block fetch
348 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
)
350 if (likely(h
->transMethod
== CFGTBL_Trans_Performant
))
351 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
354 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
,
355 struct CommandList
*c
)
359 set_performant_mode(h
, c
);
360 spin_lock_irqsave(&h
->lock
, flags
);
364 spin_unlock_irqrestore(&h
->lock
, flags
);
367 static inline void removeQ(struct CommandList
*c
)
369 if (WARN_ON(list_empty(&c
->list
)))
371 list_del_init(&c
->list
);
374 static inline int is_hba_lunid(unsigned char scsi3addr
[])
376 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
379 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
381 return (scsi3addr
[3] & 0xC0) == 0x40;
384 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
386 if (!h
->hba_inquiry_data
)
388 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
393 static const char *raid_label
[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
396 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
398 static ssize_t
raid_level_show(struct device
*dev
,
399 struct device_attribute
*attr
, char *buf
)
402 unsigned char rlevel
;
404 struct scsi_device
*sdev
;
405 struct hpsa_scsi_dev_t
*hdev
;
408 sdev
= to_scsi_device(dev
);
409 h
= sdev_to_hba(sdev
);
410 spin_lock_irqsave(&h
->lock
, flags
);
411 hdev
= sdev
->hostdata
;
413 spin_unlock_irqrestore(&h
->lock
, flags
);
417 /* Is this even a logical drive? */
418 if (!is_logical_dev_addr_mode(hdev
->scsi3addr
)) {
419 spin_unlock_irqrestore(&h
->lock
, flags
);
420 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
424 rlevel
= hdev
->raid_level
;
425 spin_unlock_irqrestore(&h
->lock
, flags
);
426 if (rlevel
> RAID_UNKNOWN
)
427 rlevel
= RAID_UNKNOWN
;
428 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
432 static ssize_t
lunid_show(struct device
*dev
,
433 struct device_attribute
*attr
, char *buf
)
436 struct scsi_device
*sdev
;
437 struct hpsa_scsi_dev_t
*hdev
;
439 unsigned char lunid
[8];
441 sdev
= to_scsi_device(dev
);
442 h
= sdev_to_hba(sdev
);
443 spin_lock_irqsave(&h
->lock
, flags
);
444 hdev
= sdev
->hostdata
;
446 spin_unlock_irqrestore(&h
->lock
, flags
);
449 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
450 spin_unlock_irqrestore(&h
->lock
, flags
);
451 return snprintf(buf
, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
452 lunid
[0], lunid
[1], lunid
[2], lunid
[3],
453 lunid
[4], lunid
[5], lunid
[6], lunid
[7]);
456 static ssize_t
unique_id_show(struct device
*dev
,
457 struct device_attribute
*attr
, char *buf
)
460 struct scsi_device
*sdev
;
461 struct hpsa_scsi_dev_t
*hdev
;
463 unsigned char sn
[16];
465 sdev
= to_scsi_device(dev
);
466 h
= sdev_to_hba(sdev
);
467 spin_lock_irqsave(&h
->lock
, flags
);
468 hdev
= sdev
->hostdata
;
470 spin_unlock_irqrestore(&h
->lock
, flags
);
473 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
474 spin_unlock_irqrestore(&h
->lock
, flags
);
475 return snprintf(buf
, 16 * 2 + 2,
476 "%02X%02X%02X%02X%02X%02X%02X%02X"
477 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
478 sn
[0], sn
[1], sn
[2], sn
[3],
479 sn
[4], sn
[5], sn
[6], sn
[7],
480 sn
[8], sn
[9], sn
[10], sn
[11],
481 sn
[12], sn
[13], sn
[14], sn
[15]);
484 static int hpsa_find_target_lun(struct ctlr_info
*h
,
485 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
487 /* finds an unused bus, target, lun for a new physical device
488 * assumes h->devlock is held
491 DECLARE_BITMAP(lun_taken
, HPSA_MAX_SCSI_DEVS_PER_HBA
);
493 memset(&lun_taken
[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA
>> 3);
495 for (i
= 0; i
< h
->ndevices
; i
++) {
496 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
497 set_bit(h
->dev
[i
]->target
, lun_taken
);
500 for (i
= 0; i
< HPSA_MAX_SCSI_DEVS_PER_HBA
; i
++) {
501 if (!test_bit(i
, lun_taken
)) {
512 /* Add an entry into h->dev[] array. */
513 static int hpsa_scsi_add_entry(struct ctlr_info
*h
, int hostno
,
514 struct hpsa_scsi_dev_t
*device
,
515 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
517 /* assumes h->devlock is held */
520 unsigned char addr1
[8], addr2
[8];
521 struct hpsa_scsi_dev_t
*sd
;
523 if (n
>= HPSA_MAX_SCSI_DEVS_PER_HBA
) {
524 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
529 /* physical devices do not have lun or target assigned until now. */
530 if (device
->lun
!= -1)
531 /* Logical device, lun is already assigned. */
534 /* If this device a non-zero lun of a multi-lun device
535 * byte 4 of the 8-byte LUN addr will contain the logical
536 * unit no, zero otherise.
538 if (device
->scsi3addr
[4] == 0) {
539 /* This is not a non-zero lun of a multi-lun device */
540 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
541 device
->bus
, &device
->target
, &device
->lun
) != 0)
546 /* This is a non-zero lun of a multi-lun device.
547 * Search through our list and find the device which
548 * has the same 8 byte LUN address, excepting byte 4.
549 * Assign the same bus and target for this new LUN.
550 * Use the logical unit number from the firmware.
552 memcpy(addr1
, device
->scsi3addr
, 8);
554 for (i
= 0; i
< n
; i
++) {
556 memcpy(addr2
, sd
->scsi3addr
, 8);
558 /* differ only in byte 4? */
559 if (memcmp(addr1
, addr2
, 8) == 0) {
560 device
->bus
= sd
->bus
;
561 device
->target
= sd
->target
;
562 device
->lun
= device
->scsi3addr
[4];
566 if (device
->lun
== -1) {
567 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
568 " suspect firmware bug or unsupported hardware "
577 added
[*nadded
] = device
;
580 /* initially, (before registering with scsi layer) we don't
581 * know our hostno and we don't want to print anything first
582 * time anyway (the scsi layer's inquiries will show that info)
584 /* if (hostno != -1) */
585 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d added.\n",
586 scsi_device_type(device
->devtype
), hostno
,
587 device
->bus
, device
->target
, device
->lun
);
591 /* Replace an entry from h->dev[] array. */
592 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
, int hostno
,
593 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
594 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
595 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
597 /* assumes h->devlock is held */
598 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_SCSI_DEVS_PER_HBA
);
599 removed
[*nremoved
] = h
->dev
[entry
];
601 h
->dev
[entry
] = new_entry
;
602 added
[*nadded
] = new_entry
;
604 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d changed.\n",
605 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
606 new_entry
->target
, new_entry
->lun
);
609 /* Remove an entry from h->dev[] array. */
610 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int hostno
, int entry
,
611 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
613 /* assumes h->devlock is held */
615 struct hpsa_scsi_dev_t
*sd
;
617 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_SCSI_DEVS_PER_HBA
);
620 removed
[*nremoved
] = h
->dev
[entry
];
623 for (i
= entry
; i
< h
->ndevices
-1; i
++)
624 h
->dev
[i
] = h
->dev
[i
+1];
626 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d removed.\n",
627 scsi_device_type(sd
->devtype
), hostno
, sd
->bus
, sd
->target
,
631 #define SCSI3ADDR_EQ(a, b) ( \
632 (a)[7] == (b)[7] && \
633 (a)[6] == (b)[6] && \
634 (a)[5] == (b)[5] && \
635 (a)[4] == (b)[4] && \
636 (a)[3] == (b)[3] && \
637 (a)[2] == (b)[2] && \
638 (a)[1] == (b)[1] && \
641 static void fixup_botched_add(struct ctlr_info
*h
,
642 struct hpsa_scsi_dev_t
*added
)
644 /* called when scsi_add_device fails in order to re-adjust
645 * h->dev[] to match the mid layer's view.
650 spin_lock_irqsave(&h
->lock
, flags
);
651 for (i
= 0; i
< h
->ndevices
; i
++) {
652 if (h
->dev
[i
] == added
) {
653 for (j
= i
; j
< h
->ndevices
-1; j
++)
654 h
->dev
[j
] = h
->dev
[j
+1];
659 spin_unlock_irqrestore(&h
->lock
, flags
);
663 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
664 struct hpsa_scsi_dev_t
*dev2
)
666 /* we compare everything except lun and target as these
667 * are not yet assigned. Compare parts likely
670 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
671 sizeof(dev1
->scsi3addr
)) != 0)
673 if (memcmp(dev1
->device_id
, dev2
->device_id
,
674 sizeof(dev1
->device_id
)) != 0)
676 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
678 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
680 if (dev1
->devtype
!= dev2
->devtype
)
682 if (dev1
->bus
!= dev2
->bus
)
687 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
688 * and return needle location in *index. If scsi3addr matches, but not
689 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
690 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
692 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
693 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
697 #define DEVICE_NOT_FOUND 0
698 #define DEVICE_CHANGED 1
699 #define DEVICE_SAME 2
700 for (i
= 0; i
< haystack_size
; i
++) {
701 if (haystack
[i
] == NULL
) /* previously removed. */
703 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
705 if (device_is_the_same(needle
, haystack
[i
]))
708 return DEVICE_CHANGED
;
712 return DEVICE_NOT_FOUND
;
715 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
, int hostno
,
716 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
718 /* sd contains scsi3 addresses and devtypes, and inquiry
719 * data. This function takes what's in sd to be the current
720 * reality and updates h->dev[] to reflect that reality.
722 int i
, entry
, device_change
, changes
= 0;
723 struct hpsa_scsi_dev_t
*csd
;
725 struct hpsa_scsi_dev_t
**added
, **removed
;
726 int nadded
, nremoved
;
727 struct Scsi_Host
*sh
= NULL
;
729 added
= kzalloc(sizeof(*added
) * HPSA_MAX_SCSI_DEVS_PER_HBA
,
731 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_SCSI_DEVS_PER_HBA
,
734 if (!added
|| !removed
) {
735 dev_warn(&h
->pdev
->dev
, "out of memory in "
736 "adjust_hpsa_scsi_table\n");
740 spin_lock_irqsave(&h
->devlock
, flags
);
742 /* find any devices in h->dev[] that are not in
743 * sd[] and remove them from h->dev[], and for any
744 * devices which have changed, remove the old device
745 * info and add the new device info.
750 while (i
< h
->ndevices
) {
752 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
753 if (device_change
== DEVICE_NOT_FOUND
) {
755 hpsa_scsi_remove_entry(h
, hostno
, i
,
757 continue; /* remove ^^^, hence i not incremented */
758 } else if (device_change
== DEVICE_CHANGED
) {
760 hpsa_scsi_replace_entry(h
, hostno
, i
, sd
[entry
],
761 added
, &nadded
, removed
, &nremoved
);
762 /* Set it to NULL to prevent it from being freed
763 * at the bottom of hpsa_update_scsi_devices()
770 /* Now, make sure every device listed in sd[] is also
771 * listed in h->dev[], adding them if they aren't found
774 for (i
= 0; i
< nsds
; i
++) {
775 if (!sd
[i
]) /* if already added above. */
777 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
778 h
->ndevices
, &entry
);
779 if (device_change
== DEVICE_NOT_FOUND
) {
781 if (hpsa_scsi_add_entry(h
, hostno
, sd
[i
],
782 added
, &nadded
) != 0)
784 sd
[i
] = NULL
; /* prevent from being freed later. */
785 } else if (device_change
== DEVICE_CHANGED
) {
786 /* should never happen... */
788 dev_warn(&h
->pdev
->dev
,
789 "device unexpectedly changed.\n");
790 /* but if it does happen, we just ignore that device */
793 spin_unlock_irqrestore(&h
->devlock
, flags
);
795 /* Don't notify scsi mid layer of any changes the first time through
796 * (or if there are no changes) scsi_scan_host will do it later the
797 * first time through.
799 if (hostno
== -1 || !changes
)
803 /* Notify scsi mid layer of any removed devices */
804 for (i
= 0; i
< nremoved
; i
++) {
805 struct scsi_device
*sdev
=
806 scsi_device_lookup(sh
, removed
[i
]->bus
,
807 removed
[i
]->target
, removed
[i
]->lun
);
809 scsi_remove_device(sdev
);
810 scsi_device_put(sdev
);
812 /* We don't expect to get here.
813 * future cmds to this device will get selection
814 * timeout as if the device was gone.
816 dev_warn(&h
->pdev
->dev
, "didn't find c%db%dt%dl%d "
817 " for removal.", hostno
, removed
[i
]->bus
,
818 removed
[i
]->target
, removed
[i
]->lun
);
824 /* Notify scsi mid layer of any added devices */
825 for (i
= 0; i
< nadded
; i
++) {
826 if (scsi_add_device(sh
, added
[i
]->bus
,
827 added
[i
]->target
, added
[i
]->lun
) == 0)
829 dev_warn(&h
->pdev
->dev
, "scsi_add_device c%db%dt%dl%d failed, "
830 "device not added.\n", hostno
, added
[i
]->bus
,
831 added
[i
]->target
, added
[i
]->lun
);
832 /* now we have to remove it from h->dev,
833 * since it didn't get added to scsi mid layer
835 fixup_botched_add(h
, added
[i
]);
844 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
845 * Assume's h->devlock is held.
847 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
848 int bus
, int target
, int lun
)
851 struct hpsa_scsi_dev_t
*sd
;
853 for (i
= 0; i
< h
->ndevices
; i
++) {
855 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
861 /* link sdev->hostdata to our per-device structure. */
862 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
864 struct hpsa_scsi_dev_t
*sd
;
868 h
= sdev_to_hba(sdev
);
869 spin_lock_irqsave(&h
->devlock
, flags
);
870 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
871 sdev_id(sdev
), sdev
->lun
);
874 spin_unlock_irqrestore(&h
->devlock
, flags
);
878 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
883 static void hpsa_scsi_setup(struct ctlr_info
*h
)
887 spin_lock_init(&h
->devlock
);
890 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
896 for (i
= 0; i
< h
->nr_cmds
; i
++) {
897 kfree(h
->cmd_sg_list
[i
]);
898 h
->cmd_sg_list
[i
] = NULL
;
900 kfree(h
->cmd_sg_list
);
901 h
->cmd_sg_list
= NULL
;
904 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info
*h
)
908 if (h
->chainsize
<= 0)
911 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
915 for (i
= 0; i
< h
->nr_cmds
; i
++) {
916 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
917 h
->chainsize
, GFP_KERNEL
);
918 if (!h
->cmd_sg_list
[i
])
924 hpsa_free_sg_chain_blocks(h
);
928 static void hpsa_map_sg_chain_block(struct ctlr_info
*h
,
929 struct CommandList
*c
)
931 struct SGDescriptor
*chain_sg
, *chain_block
;
934 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
935 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
936 chain_sg
->Ext
= HPSA_SG_CHAIN
;
937 chain_sg
->Len
= sizeof(*chain_sg
) *
938 (c
->Header
.SGTotal
- h
->max_cmd_sg_entries
);
939 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_sg
->Len
,
941 chain_sg
->Addr
.lower
= (u32
) (temp64
& 0x0FFFFFFFFULL
);
942 chain_sg
->Addr
.upper
= (u32
) ((temp64
>> 32) & 0x0FFFFFFFFULL
);
945 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
946 struct CommandList
*c
)
948 struct SGDescriptor
*chain_sg
;
951 if (c
->Header
.SGTotal
<= h
->max_cmd_sg_entries
)
954 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
955 temp64
.val32
.lower
= chain_sg
->Addr
.lower
;
956 temp64
.val32
.upper
= chain_sg
->Addr
.upper
;
957 pci_unmap_single(h
->pdev
, temp64
.val
, chain_sg
->Len
, PCI_DMA_TODEVICE
);
960 static void complete_scsi_command(struct CommandList
*cp
,
961 int timeout
, u32 tag
)
963 struct scsi_cmnd
*cmd
;
965 struct ErrorInfo
*ei
;
967 unsigned char sense_key
;
968 unsigned char asc
; /* additional sense code */
969 unsigned char ascq
; /* additional sense code qualifier */
972 cmd
= (struct scsi_cmnd
*) cp
->scsi_cmd
;
975 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
976 if (cp
->Header
.SGTotal
> h
->max_cmd_sg_entries
)
977 hpsa_unmap_sg_chain_block(h
, cp
);
979 cmd
->result
= (DID_OK
<< 16); /* host byte */
980 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
981 cmd
->result
|= ei
->ScsiStatus
;
983 /* copy the sense data whether we need to or not. */
984 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
,
985 ei
->SenseLen
> SCSI_SENSE_BUFFERSIZE
?
986 SCSI_SENSE_BUFFERSIZE
:
988 scsi_set_resid(cmd
, ei
->ResidualCnt
);
990 if (ei
->CommandStatus
== 0) {
996 /* an error has occurred */
997 switch (ei
->CommandStatus
) {
999 case CMD_TARGET_STATUS
:
1000 if (ei
->ScsiStatus
) {
1002 sense_key
= 0xf & ei
->SenseInfo
[2];
1003 /* Get additional sense code */
1004 asc
= ei
->SenseInfo
[12];
1005 /* Get addition sense code qualifier */
1006 ascq
= ei
->SenseInfo
[13];
1009 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
1010 if (check_for_unit_attention(h
, cp
)) {
1011 cmd
->result
= DID_SOFT_ERROR
<< 16;
1014 if (sense_key
== ILLEGAL_REQUEST
) {
1016 * SCSI REPORT_LUNS is commonly unsupported on
1017 * Smart Array. Suppress noisy complaint.
1019 if (cp
->Request
.CDB
[0] == REPORT_LUNS
)
1022 /* If ASC/ASCQ indicate Logical Unit
1023 * Not Supported condition,
1025 if ((asc
== 0x25) && (ascq
== 0x0)) {
1026 dev_warn(&h
->pdev
->dev
, "cp %p "
1027 "has check condition\n", cp
);
1032 if (sense_key
== NOT_READY
) {
1033 /* If Sense is Not Ready, Logical Unit
1034 * Not ready, Manual Intervention
1037 if ((asc
== 0x04) && (ascq
== 0x03)) {
1038 dev_warn(&h
->pdev
->dev
, "cp %p "
1039 "has check condition: unit "
1040 "not ready, manual "
1041 "intervention required\n", cp
);
1045 if (sense_key
== ABORTED_COMMAND
) {
1046 /* Aborted command is retryable */
1047 dev_warn(&h
->pdev
->dev
, "cp %p "
1048 "has check condition: aborted command: "
1049 "ASC: 0x%x, ASCQ: 0x%x\n",
1051 cmd
->result
= DID_SOFT_ERROR
<< 16;
1054 /* Must be some other type of check condition */
1055 dev_warn(&h
->pdev
->dev
, "cp %p has check condition: "
1057 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1058 "Returning result: 0x%x, "
1059 "cmd=[%02x %02x %02x %02x %02x "
1060 "%02x %02x %02x %02x %02x %02x "
1061 "%02x %02x %02x %02x %02x]\n",
1062 cp
, sense_key
, asc
, ascq
,
1064 cmd
->cmnd
[0], cmd
->cmnd
[1],
1065 cmd
->cmnd
[2], cmd
->cmnd
[3],
1066 cmd
->cmnd
[4], cmd
->cmnd
[5],
1067 cmd
->cmnd
[6], cmd
->cmnd
[7],
1068 cmd
->cmnd
[8], cmd
->cmnd
[9],
1069 cmd
->cmnd
[10], cmd
->cmnd
[11],
1070 cmd
->cmnd
[12], cmd
->cmnd
[13],
1071 cmd
->cmnd
[14], cmd
->cmnd
[15]);
1076 /* Problem was not a check condition
1077 * Pass it up to the upper layers...
1079 if (ei
->ScsiStatus
) {
1080 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
1081 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1082 "Returning result: 0x%x\n",
1084 sense_key
, asc
, ascq
,
1086 } else { /* scsi status is zero??? How??? */
1087 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
1088 "Returning no connection.\n", cp
),
1090 /* Ordinarily, this case should never happen,
1091 * but there is a bug in some released firmware
1092 * revisions that allows it to happen if, for
1093 * example, a 4100 backplane loses power and
1094 * the tape drive is in it. We assume that
1095 * it's a fatal error of some kind because we
1096 * can't show that it wasn't. We will make it
1097 * look like selection timeout since that is
1098 * the most common reason for this to occur,
1099 * and it's severe enough.
1102 cmd
->result
= DID_NO_CONNECT
<< 16;
1106 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1108 case CMD_DATA_OVERRUN
:
1109 dev_warn(&h
->pdev
->dev
, "cp %p has"
1110 " completed with data overrun "
1114 /* print_bytes(cp, sizeof(*cp), 1, 0);
1116 /* We get CMD_INVALID if you address a non-existent device
1117 * instead of a selection timeout (no response). You will
1118 * see this if you yank out a drive, then try to access it.
1119 * This is kind of a shame because it means that any other
1120 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1121 * missing target. */
1122 cmd
->result
= DID_NO_CONNECT
<< 16;
1125 case CMD_PROTOCOL_ERR
:
1126 dev_warn(&h
->pdev
->dev
, "cp %p has "
1127 "protocol error \n", cp
);
1129 case CMD_HARDWARE_ERR
:
1130 cmd
->result
= DID_ERROR
<< 16;
1131 dev_warn(&h
->pdev
->dev
, "cp %p had hardware error\n", cp
);
1133 case CMD_CONNECTION_LOST
:
1134 cmd
->result
= DID_ERROR
<< 16;
1135 dev_warn(&h
->pdev
->dev
, "cp %p had connection lost\n", cp
);
1138 cmd
->result
= DID_ABORT
<< 16;
1139 dev_warn(&h
->pdev
->dev
, "cp %p was aborted with status 0x%x\n",
1140 cp
, ei
->ScsiStatus
);
1142 case CMD_ABORT_FAILED
:
1143 cmd
->result
= DID_ERROR
<< 16;
1144 dev_warn(&h
->pdev
->dev
, "cp %p reports abort failed\n", cp
);
1146 case CMD_UNSOLICITED_ABORT
:
1147 cmd
->result
= DID_RESET
<< 16;
1148 dev_warn(&h
->pdev
->dev
, "cp %p aborted do to an unsolicited "
1152 cmd
->result
= DID_TIME_OUT
<< 16;
1153 dev_warn(&h
->pdev
->dev
, "cp %p timedout\n", cp
);
1155 case CMD_UNABORTABLE
:
1156 cmd
->result
= DID_ERROR
<< 16;
1157 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
1160 cmd
->result
= DID_ERROR
<< 16;
1161 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
1162 cp
, ei
->CommandStatus
);
1164 cmd
->scsi_done(cmd
);
1168 static int hpsa_scsi_detect(struct ctlr_info
*h
)
1170 struct Scsi_Host
*sh
;
1173 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
1180 sh
->max_channel
= 3;
1181 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
1182 sh
->max_lun
= HPSA_MAX_LUN
;
1183 sh
->max_id
= HPSA_MAX_LUN
;
1184 sh
->can_queue
= h
->nr_cmds
;
1185 sh
->cmd_per_lun
= h
->nr_cmds
;
1186 sh
->sg_tablesize
= h
->maxsgentries
;
1188 sh
->hostdata
[0] = (unsigned long) h
;
1189 sh
->irq
= h
->intr
[PERF_MODE_INT
];
1190 sh
->unique_id
= sh
->irq
;
1191 error
= scsi_add_host(sh
, &h
->pdev
->dev
);
1198 dev_err(&h
->pdev
->dev
, "hpsa_scsi_detect: scsi_add_host"
1199 " failed for controller %d\n", h
->ctlr
);
1203 dev_err(&h
->pdev
->dev
, "hpsa_scsi_detect: scsi_host_alloc"
1204 " failed for controller %d\n", h
->ctlr
);
1208 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
1209 struct CommandList
*c
, int sg_used
, int data_direction
)
1212 union u64bit addr64
;
1214 for (i
= 0; i
< sg_used
; i
++) {
1215 addr64
.val32
.lower
= c
->SG
[i
].Addr
.lower
;
1216 addr64
.val32
.upper
= c
->SG
[i
].Addr
.upper
;
1217 pci_unmap_single(pdev
, (dma_addr_t
) addr64
.val
, c
->SG
[i
].Len
,
1222 static void hpsa_map_one(struct pci_dev
*pdev
,
1223 struct CommandList
*cp
,
1230 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
1231 cp
->Header
.SGList
= 0;
1232 cp
->Header
.SGTotal
= 0;
1236 addr64
= (u64
) pci_map_single(pdev
, buf
, buflen
, data_direction
);
1237 cp
->SG
[0].Addr
.lower
=
1238 (u32
) (addr64
& (u64
) 0x00000000FFFFFFFF);
1239 cp
->SG
[0].Addr
.upper
=
1240 (u32
) ((addr64
>> 32) & (u64
) 0x00000000FFFFFFFF);
1241 cp
->SG
[0].Len
= buflen
;
1242 cp
->Header
.SGList
= (u8
) 1; /* no. SGs contig in this cmd */
1243 cp
->Header
.SGTotal
= (u16
) 1; /* total sgs in this cmd list */
1246 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
1247 struct CommandList
*c
)
1249 DECLARE_COMPLETION_ONSTACK(wait
);
1252 enqueue_cmd_and_start_io(h
, c
);
1253 wait_for_completion(&wait
);
1256 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
1257 struct CommandList
*c
, int data_direction
)
1259 int retry_count
= 0;
1262 memset(c
->err_info
, 0, sizeof(c
->err_info
));
1263 hpsa_scsi_do_simple_cmd_core(h
, c
);
1265 } while (check_for_unit_attention(h
, c
) && retry_count
<= 3);
1266 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
1269 static void hpsa_scsi_interpret_error(struct CommandList
*cp
)
1271 struct ErrorInfo
*ei
;
1272 struct device
*d
= &cp
->h
->pdev
->dev
;
1275 switch (ei
->CommandStatus
) {
1276 case CMD_TARGET_STATUS
:
1277 dev_warn(d
, "cmd %p has completed with errors\n", cp
);
1278 dev_warn(d
, "cmd %p has SCSI Status = %x\n", cp
,
1280 if (ei
->ScsiStatus
== 0)
1281 dev_warn(d
, "SCSI status is abnormally zero. "
1282 "(probably indicates selection timeout "
1283 "reported incorrectly due to a known "
1284 "firmware bug, circa July, 2001.)\n");
1286 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1287 dev_info(d
, "UNDERRUN\n");
1289 case CMD_DATA_OVERRUN
:
1290 dev_warn(d
, "cp %p has completed with data overrun\n", cp
);
1293 /* controller unfortunately reports SCSI passthru's
1294 * to non-existent targets as invalid commands.
1296 dev_warn(d
, "cp %p is reported invalid (probably means "
1297 "target device no longer present)\n", cp
);
1298 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1302 case CMD_PROTOCOL_ERR
:
1303 dev_warn(d
, "cp %p has protocol error \n", cp
);
1305 case CMD_HARDWARE_ERR
:
1306 /* cmd->result = DID_ERROR << 16; */
1307 dev_warn(d
, "cp %p had hardware error\n", cp
);
1309 case CMD_CONNECTION_LOST
:
1310 dev_warn(d
, "cp %p had connection lost\n", cp
);
1313 dev_warn(d
, "cp %p was aborted\n", cp
);
1315 case CMD_ABORT_FAILED
:
1316 dev_warn(d
, "cp %p reports abort failed\n", cp
);
1318 case CMD_UNSOLICITED_ABORT
:
1319 dev_warn(d
, "cp %p aborted due to an unsolicited abort\n", cp
);
1322 dev_warn(d
, "cp %p timed out\n", cp
);
1324 case CMD_UNABORTABLE
:
1325 dev_warn(d
, "Command unabortable\n");
1328 dev_warn(d
, "cp %p returned unknown status %x\n", cp
,
1333 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
1334 unsigned char page
, unsigned char *buf
,
1335 unsigned char bufsize
)
1338 struct CommandList
*c
;
1339 struct ErrorInfo
*ei
;
1341 c
= cmd_special_alloc(h
);
1343 if (c
== NULL
) { /* trouble... */
1344 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1348 fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
, page
, scsi3addr
, TYPE_CMD
);
1349 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
1351 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
1352 hpsa_scsi_interpret_error(c
);
1355 cmd_special_free(h
, c
);
1359 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
)
1362 struct CommandList
*c
;
1363 struct ErrorInfo
*ei
;
1365 c
= cmd_special_alloc(h
);
1367 if (c
== NULL
) { /* trouble... */
1368 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1372 fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0, scsi3addr
, TYPE_MSG
);
1373 hpsa_scsi_do_simple_cmd_core(h
, c
);
1374 /* no unmap needed here because no data xfer. */
1377 if (ei
->CommandStatus
!= 0) {
1378 hpsa_scsi_interpret_error(c
);
1381 cmd_special_free(h
, c
);
1385 static void hpsa_get_raid_level(struct ctlr_info
*h
,
1386 unsigned char *scsi3addr
, unsigned char *raid_level
)
1391 *raid_level
= RAID_UNKNOWN
;
1392 buf
= kzalloc(64, GFP_KERNEL
);
1395 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, 0xC1, buf
, 64);
1397 *raid_level
= buf
[8];
1398 if (*raid_level
> RAID_UNKNOWN
)
1399 *raid_level
= RAID_UNKNOWN
;
1404 /* Get the device id from inquiry page 0x83 */
1405 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
1406 unsigned char *device_id
, int buflen
)
1413 buf
= kzalloc(64, GFP_KERNEL
);
1416 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, 0x83, buf
, 64);
1418 memcpy(device_id
, &buf
[8], buflen
);
1423 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
1424 struct ReportLUNdata
*buf
, int bufsize
,
1425 int extended_response
)
1428 struct CommandList
*c
;
1429 unsigned char scsi3addr
[8];
1430 struct ErrorInfo
*ei
;
1432 c
= cmd_special_alloc(h
);
1433 if (c
== NULL
) { /* trouble... */
1434 dev_err(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1437 /* address the controller */
1438 memset(scsi3addr
, 0, sizeof(scsi3addr
));
1439 fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
1440 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
);
1441 if (extended_response
)
1442 c
->Request
.CDB
[1] = extended_response
;
1443 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
1445 if (ei
->CommandStatus
!= 0 &&
1446 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
1447 hpsa_scsi_interpret_error(c
);
1450 cmd_special_free(h
, c
);
1454 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
1455 struct ReportLUNdata
*buf
,
1456 int bufsize
, int extended_response
)
1458 return hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
, extended_response
);
1461 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
1462 struct ReportLUNdata
*buf
, int bufsize
)
1464 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
1467 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
1468 int bus
, int target
, int lun
)
1471 device
->target
= target
;
1475 static int hpsa_update_device_info(struct ctlr_info
*h
,
1476 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
)
1478 #define OBDR_TAPE_INQ_SIZE 49
1479 unsigned char *inq_buff
;
1481 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
1485 /* Do an inquiry to the device to see what it is. */
1486 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
1487 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
1488 /* Inquiry failed (msg printed already) */
1489 dev_err(&h
->pdev
->dev
,
1490 "hpsa_update_device_info: inquiry failed\n");
1494 this_device
->devtype
= (inq_buff
[0] & 0x1f);
1495 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
1496 memcpy(this_device
->vendor
, &inq_buff
[8],
1497 sizeof(this_device
->vendor
));
1498 memcpy(this_device
->model
, &inq_buff
[16],
1499 sizeof(this_device
->model
));
1500 memset(this_device
->device_id
, 0,
1501 sizeof(this_device
->device_id
));
1502 hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
,
1503 sizeof(this_device
->device_id
));
1505 if (this_device
->devtype
== TYPE_DISK
&&
1506 is_logical_dev_addr_mode(scsi3addr
))
1507 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
1509 this_device
->raid_level
= RAID_UNKNOWN
;
1519 static unsigned char *msa2xxx_model
[] = {
1527 static int is_msa2xxx(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
1531 for (i
= 0; msa2xxx_model
[i
]; i
++)
1532 if (strncmp(device
->model
, msa2xxx_model
[i
],
1533 strlen(msa2xxx_model
[i
])) == 0)
1538 /* Helper function to assign bus, target, lun mapping of devices.
1539 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1540 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1541 * Logical drive target and lun are assigned at this time, but
1542 * physical device lun and target assignment are deferred (assigned
1543 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1545 static void figure_bus_target_lun(struct ctlr_info
*h
,
1546 u8
*lunaddrbytes
, int *bus
, int *target
, int *lun
,
1547 struct hpsa_scsi_dev_t
*device
)
1551 if (is_logical_dev_addr_mode(lunaddrbytes
)) {
1552 /* logical device */
1553 if (unlikely(is_scsi_rev_5(h
))) {
1554 /* p1210m, logical drives lun assignments
1555 * match SCSI REPORT LUNS data.
1557 lunid
= le32_to_cpu(*((__le32
*) lunaddrbytes
));
1560 *lun
= (lunid
& 0x3fff) + 1;
1563 lunid
= le32_to_cpu(*((__le32
*) lunaddrbytes
));
1564 if (is_msa2xxx(h
, device
)) {
1565 /* msa2xxx way, put logicals on bus 1
1566 * and match target/lun numbers box
1570 *target
= (lunid
>> 16) & 0x3fff;
1571 *lun
= lunid
& 0x00ff;
1573 /* Traditional smart array way. */
1576 *target
= lunid
& 0x3fff;
1580 /* physical device */
1581 if (is_hba_lunid(lunaddrbytes
))
1582 if (unlikely(is_scsi_rev_5(h
))) {
1583 *bus
= 0; /* put p1210m ctlr at 0,0,0 */
1588 *bus
= 3; /* traditional smartarray */
1590 *bus
= 2; /* physical disk */
1592 *lun
= -1; /* we will fill these in later. */
1597 * If there is no lun 0 on a target, linux won't find any devices.
1598 * For the MSA2xxx boxes, we have to manually detect the enclosure
1599 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1600 * it for some reason. *tmpdevice is the target we're adding,
1601 * this_device is a pointer into the current element of currentsd[]
1602 * that we're building up in update_scsi_devices(), below.
1603 * lunzerobits is a bitmap that tracks which targets already have a
1605 * Returns 1 if an enclosure was added, 0 if not.
1607 static int add_msa2xxx_enclosure_device(struct ctlr_info
*h
,
1608 struct hpsa_scsi_dev_t
*tmpdevice
,
1609 struct hpsa_scsi_dev_t
*this_device
, u8
*lunaddrbytes
,
1610 int bus
, int target
, int lun
, unsigned long lunzerobits
[],
1611 int *nmsa2xxx_enclosures
)
1613 unsigned char scsi3addr
[8];
1615 if (test_bit(target
, lunzerobits
))
1616 return 0; /* There is already a lun 0 on this target. */
1618 if (!is_logical_dev_addr_mode(lunaddrbytes
))
1619 return 0; /* It's the logical targets that may lack lun 0. */
1621 if (!is_msa2xxx(h
, tmpdevice
))
1622 return 0; /* It's only the MSA2xxx that have this problem. */
1624 if (lun
== 0) /* if lun is 0, then obviously we have a lun 0. */
1627 memset(scsi3addr
, 0, 8);
1628 scsi3addr
[3] = target
;
1629 if (is_hba_lunid(scsi3addr
))
1630 return 0; /* Don't add the RAID controller here. */
1632 if (is_scsi_rev_5(h
))
1633 return 0; /* p1210m doesn't need to do this. */
1635 #define MAX_MSA2XXX_ENCLOSURES 32
1636 if (*nmsa2xxx_enclosures
>= MAX_MSA2XXX_ENCLOSURES
) {
1637 dev_warn(&h
->pdev
->dev
, "Maximum number of MSA2XXX "
1638 "enclosures exceeded. Check your hardware "
1643 if (hpsa_update_device_info(h
, scsi3addr
, this_device
))
1645 (*nmsa2xxx_enclosures
)++;
1646 hpsa_set_bus_target_lun(this_device
, bus
, target
, 0);
1647 set_bit(target
, lunzerobits
);
1652 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1653 * logdev. The number of luns in physdev and logdev are returned in
1654 * *nphysicals and *nlogicals, respectively.
1655 * Returns 0 on success, -1 otherwise.
1657 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
1659 struct ReportLUNdata
*physdev
, u32
*nphysicals
,
1660 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
1662 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, reportlunsize
, 0)) {
1663 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
1666 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 8;
1667 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
1668 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded."
1669 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
1670 *nphysicals
- HPSA_MAX_PHYS_LUN
);
1671 *nphysicals
= HPSA_MAX_PHYS_LUN
;
1673 if (hpsa_scsi_do_report_log_luns(h
, logdev
, reportlunsize
)) {
1674 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
1677 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
1678 /* Reject Logicals in excess of our max capability. */
1679 if (*nlogicals
> HPSA_MAX_LUN
) {
1680 dev_warn(&h
->pdev
->dev
,
1681 "maximum logical LUNs (%d) exceeded. "
1682 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
1683 *nlogicals
- HPSA_MAX_LUN
);
1684 *nlogicals
= HPSA_MAX_LUN
;
1686 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
1687 dev_warn(&h
->pdev
->dev
,
1688 "maximum logical + physical LUNs (%d) exceeded. "
1689 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
1690 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
1691 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
1696 u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
, int i
,
1697 int nphysicals
, int nlogicals
, struct ReportLUNdata
*physdev_list
,
1698 struct ReportLUNdata
*logdev_list
)
1700 /* Helper function, figure out where the LUN ID info is coming from
1701 * given index i, lists of physical and logical devices, where in
1702 * the list the raid controller is supposed to appear (first or last)
1705 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
1706 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
1708 if (i
== raid_ctlr_position
)
1709 return RAID_CTLR_LUNID
;
1711 if (i
< logicals_start
)
1712 return &physdev_list
->LUN
[i
- (raid_ctlr_position
== 0)][0];
1714 if (i
< last_device
)
1715 return &logdev_list
->LUN
[i
- nphysicals
-
1716 (raid_ctlr_position
== 0)][0];
1721 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
)
1723 /* the idea here is we could get notified
1724 * that some devices have changed, so we do a report
1725 * physical luns and report logical luns cmd, and adjust
1726 * our list of devices accordingly.
1728 * The scsi3addr's of devices won't change so long as the
1729 * adapter is not reset. That means we can rescan and
1730 * tell which devices we already know about, vs. new
1731 * devices, vs. disappearing devices.
1733 struct ReportLUNdata
*physdev_list
= NULL
;
1734 struct ReportLUNdata
*logdev_list
= NULL
;
1735 unsigned char *inq_buff
= NULL
;
1738 u32 ndev_allocated
= 0;
1739 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
1741 int reportlunsize
= sizeof(*physdev_list
) + HPSA_MAX_PHYS_LUN
* 8;
1742 int i
, nmsa2xxx_enclosures
, ndevs_to_allocate
;
1743 int bus
, target
, lun
;
1744 int raid_ctlr_position
;
1745 DECLARE_BITMAP(lunzerobits
, HPSA_MAX_TARGETS_PER_CTLR
);
1747 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_SCSI_DEVS_PER_HBA
,
1749 physdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
1750 logdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
1751 inq_buff
= kmalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
1752 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
1754 if (!currentsd
|| !physdev_list
|| !logdev_list
||
1755 !inq_buff
|| !tmpdevice
) {
1756 dev_err(&h
->pdev
->dev
, "out of memory\n");
1759 memset(lunzerobits
, 0, sizeof(lunzerobits
));
1761 if (hpsa_gather_lun_info(h
, reportlunsize
, physdev_list
, &nphysicals
,
1762 logdev_list
, &nlogicals
))
1765 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1766 * but each of them 4 times through different paths. The plus 1
1767 * is for the RAID controller.
1769 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_MSA2XXX_ENCLOSURES
+ 1;
1771 /* Allocate the per device structures */
1772 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
1773 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
1774 if (!currentsd
[i
]) {
1775 dev_warn(&h
->pdev
->dev
, "out of memory at %s:%d\n",
1776 __FILE__
, __LINE__
);
1782 if (unlikely(is_scsi_rev_5(h
)))
1783 raid_ctlr_position
= 0;
1785 raid_ctlr_position
= nphysicals
+ nlogicals
;
1787 /* adjust our table of devices */
1788 nmsa2xxx_enclosures
= 0;
1789 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
1792 /* Figure out where the LUN ID info is coming from */
1793 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
1794 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
1795 /* skip masked physical devices. */
1796 if (lunaddrbytes
[3] & 0xC0 &&
1797 i
< nphysicals
+ (raid_ctlr_position
== 0))
1800 /* Get device type, vendor, model, device id */
1801 if (hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
))
1802 continue; /* skip it if we can't talk to it. */
1803 figure_bus_target_lun(h
, lunaddrbytes
, &bus
, &target
, &lun
,
1805 this_device
= currentsd
[ncurrent
];
1808 * For the msa2xxx boxes, we have to insert a LUN 0 which
1809 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1810 * is nonetheless an enclosure device there. We have to
1811 * present that otherwise linux won't find anything if
1812 * there is no lun 0.
1814 if (add_msa2xxx_enclosure_device(h
, tmpdevice
, this_device
,
1815 lunaddrbytes
, bus
, target
, lun
, lunzerobits
,
1816 &nmsa2xxx_enclosures
)) {
1818 this_device
= currentsd
[ncurrent
];
1821 *this_device
= *tmpdevice
;
1822 hpsa_set_bus_target_lun(this_device
, bus
, target
, lun
);
1824 switch (this_device
->devtype
) {
1826 /* We don't *really* support actual CD-ROM devices,
1827 * just "One Button Disaster Recovery" tape drive
1828 * which temporarily pretends to be a CD-ROM drive.
1829 * So we check that the device is really an OBDR tape
1830 * device by checking for "$DR-10" in bytes 43-48 of
1834 #define OBDR_TAPE_SIG "$DR-10"
1835 strncpy(obdr_sig
, &inq_buff
[43], 6);
1837 if (strncmp(obdr_sig
, OBDR_TAPE_SIG
, 6) != 0)
1838 /* Not OBDR device, ignore it. */
1849 case TYPE_MEDIUM_CHANGER
:
1853 /* Only present the Smartarray HBA as a RAID controller.
1854 * If it's a RAID controller other than the HBA itself
1855 * (an external RAID controller, MSA500 or similar)
1858 if (!is_hba_lunid(lunaddrbytes
))
1865 if (ncurrent
>= HPSA_MAX_SCSI_DEVS_PER_HBA
)
1868 adjust_hpsa_scsi_table(h
, hostno
, currentsd
, ncurrent
);
1871 for (i
= 0; i
< ndev_allocated
; i
++)
1872 kfree(currentsd
[i
]);
1875 kfree(physdev_list
);
1879 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1880 * dma mapping and fills in the scatter gather entries of the
1883 static int hpsa_scatter_gather(struct ctlr_info
*h
,
1884 struct CommandList
*cp
,
1885 struct scsi_cmnd
*cmd
)
1888 struct scatterlist
*sg
;
1890 int use_sg
, i
, sg_index
, chained
;
1891 struct SGDescriptor
*curr_sg
;
1893 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
1895 use_sg
= scsi_dma_map(cmd
);
1900 goto sglist_finished
;
1905 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
1906 if (i
== h
->max_cmd_sg_entries
- 1 &&
1907 use_sg
> h
->max_cmd_sg_entries
) {
1909 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
1912 addr64
= (u64
) sg_dma_address(sg
);
1913 len
= sg_dma_len(sg
);
1914 curr_sg
->Addr
.lower
= (u32
) (addr64
& 0x0FFFFFFFFULL
);
1915 curr_sg
->Addr
.upper
= (u32
) ((addr64
>> 32) & 0x0FFFFFFFFULL
);
1917 curr_sg
->Ext
= 0; /* we are not chaining */
1921 if (use_sg
+ chained
> h
->maxSG
)
1922 h
->maxSG
= use_sg
+ chained
;
1925 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
1926 cp
->Header
.SGTotal
= (u16
) (use_sg
+ 1);
1927 hpsa_map_sg_chain_block(h
, cp
);
1933 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
1934 cp
->Header
.SGTotal
= (u16
) use_sg
; /* total sgs in this cmd list */
1939 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd
*cmd
,
1940 void (*done
)(struct scsi_cmnd
*))
1942 struct ctlr_info
*h
;
1943 struct hpsa_scsi_dev_t
*dev
;
1944 unsigned char scsi3addr
[8];
1945 struct CommandList
*c
;
1946 unsigned long flags
;
1948 /* Get the ptr to our adapter structure out of cmd->host. */
1949 h
= sdev_to_hba(cmd
->device
);
1950 dev
= cmd
->device
->hostdata
;
1952 cmd
->result
= DID_NO_CONNECT
<< 16;
1956 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
1958 /* Need a lock as this is being allocated from the pool */
1959 spin_lock_irqsave(&h
->lock
, flags
);
1961 spin_unlock_irqrestore(&h
->lock
, flags
);
1962 if (c
== NULL
) { /* trouble... */
1963 dev_err(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
1964 return SCSI_MLQUEUE_HOST_BUSY
;
1967 /* Fill in the command list header */
1969 cmd
->scsi_done
= done
; /* save this for use by completion code */
1971 /* save c in case we have to abort it */
1972 cmd
->host_scribble
= (unsigned char *) c
;
1974 c
->cmd_type
= CMD_SCSI
;
1976 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
1977 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
1978 c
->Header
.Tag
.lower
= (c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
1979 c
->Header
.Tag
.lower
|= DIRECT_LOOKUP_BIT
;
1981 /* Fill in the request block... */
1983 c
->Request
.Timeout
= 0;
1984 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
1985 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
1986 c
->Request
.CDBLen
= cmd
->cmd_len
;
1987 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
1988 c
->Request
.Type
.Type
= TYPE_CMD
;
1989 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1990 switch (cmd
->sc_data_direction
) {
1992 c
->Request
.Type
.Direction
= XFER_WRITE
;
1994 case DMA_FROM_DEVICE
:
1995 c
->Request
.Type
.Direction
= XFER_READ
;
1998 c
->Request
.Type
.Direction
= XFER_NONE
;
2000 case DMA_BIDIRECTIONAL
:
2001 /* This can happen if a buggy application does a scsi passthru
2002 * and sets both inlen and outlen to non-zero. ( see
2003 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2006 c
->Request
.Type
.Direction
= XFER_RSVD
;
2007 /* This is technically wrong, and hpsa controllers should
2008 * reject it with CMD_INVALID, which is the most correct
2009 * response, but non-fibre backends appear to let it
2010 * slide by, and give the same results as if this field
2011 * were set correctly. Either way is acceptable for
2012 * our purposes here.
2018 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
2019 cmd
->sc_data_direction
);
2024 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
2026 return SCSI_MLQUEUE_HOST_BUSY
;
2028 enqueue_cmd_and_start_io(h
, c
);
2029 /* the cmd'll come back via intr handler in complete_scsi_command() */
2033 static DEF_SCSI_QCMD(hpsa_scsi_queue_command
)
2035 static void hpsa_scan_start(struct Scsi_Host
*sh
)
2037 struct ctlr_info
*h
= shost_to_hba(sh
);
2038 unsigned long flags
;
2040 /* wait until any scan already in progress is finished. */
2042 spin_lock_irqsave(&h
->scan_lock
, flags
);
2043 if (h
->scan_finished
)
2045 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2046 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
2047 /* Note: We don't need to worry about a race between this
2048 * thread and driver unload because the midlayer will
2049 * have incremented the reference count, so unload won't
2050 * happen if we're in here.
2053 h
->scan_finished
= 0; /* mark scan as in progress */
2054 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2056 hpsa_update_scsi_devices(h
, h
->scsi_host
->host_no
);
2058 spin_lock_irqsave(&h
->scan_lock
, flags
);
2059 h
->scan_finished
= 1; /* mark scan as finished. */
2060 wake_up_all(&h
->scan_wait_queue
);
2061 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2064 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
2065 unsigned long elapsed_time
)
2067 struct ctlr_info
*h
= shost_to_hba(sh
);
2068 unsigned long flags
;
2071 spin_lock_irqsave(&h
->scan_lock
, flags
);
2072 finished
= h
->scan_finished
;
2073 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2077 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
2078 int qdepth
, int reason
)
2080 struct ctlr_info
*h
= sdev_to_hba(sdev
);
2082 if (reason
!= SCSI_QDEPTH_DEFAULT
)
2088 if (qdepth
> h
->nr_cmds
)
2089 qdepth
= h
->nr_cmds
;
2090 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
2091 return sdev
->queue_depth
;
2094 static void hpsa_unregister_scsi(struct ctlr_info
*h
)
2096 /* we are being forcibly unloaded, and may not refuse. */
2097 scsi_remove_host(h
->scsi_host
);
2098 scsi_host_put(h
->scsi_host
);
2099 h
->scsi_host
= NULL
;
2102 static int hpsa_register_scsi(struct ctlr_info
*h
)
2106 rc
= hpsa_scsi_detect(h
);
2108 dev_err(&h
->pdev
->dev
, "hpsa_register_scsi: failed"
2109 " hpsa_scsi_detect(), rc is %d\n", rc
);
2113 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
2114 unsigned char lunaddr
[])
2118 int waittime
= 1; /* seconds */
2119 struct CommandList
*c
;
2121 c
= cmd_special_alloc(h
);
2123 dev_warn(&h
->pdev
->dev
, "out of memory in "
2124 "wait_for_device_to_become_ready.\n");
2128 /* Send test unit ready until device ready, or give up. */
2129 while (count
< HPSA_TUR_RETRY_LIMIT
) {
2131 /* Wait for a bit. do this first, because if we send
2132 * the TUR right away, the reset will just abort it.
2134 msleep(1000 * waittime
);
2137 /* Increase wait time with each try, up to a point. */
2138 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
2139 waittime
= waittime
* 2;
2141 /* Send the Test Unit Ready */
2142 fill_cmd(c
, TEST_UNIT_READY
, h
, NULL
, 0, 0, lunaddr
, TYPE_CMD
);
2143 hpsa_scsi_do_simple_cmd_core(h
, c
);
2144 /* no unmap needed here because no data xfer. */
2146 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
2149 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
2150 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
2151 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
2152 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
2155 dev_warn(&h
->pdev
->dev
, "waiting %d secs "
2156 "for device to become ready.\n", waittime
);
2157 rc
= 1; /* device not ready. */
2161 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
2163 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
2165 cmd_special_free(h
, c
);
2169 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2170 * complaining. Doing a host- or bus-reset can't do anything good here.
2172 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
2175 struct ctlr_info
*h
;
2176 struct hpsa_scsi_dev_t
*dev
;
2178 /* find the controller to which the command to be aborted was sent */
2179 h
= sdev_to_hba(scsicmd
->device
);
2180 if (h
== NULL
) /* paranoia */
2182 dev
= scsicmd
->device
->hostdata
;
2184 dev_err(&h
->pdev
->dev
, "hpsa_eh_device_reset_handler: "
2185 "device lookup failed.\n");
2188 dev_warn(&h
->pdev
->dev
, "resetting device %d:%d:%d:%d\n",
2189 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
2190 /* send a reset to the SCSI LUN which the command was sent to */
2191 rc
= hpsa_send_reset(h
, dev
->scsi3addr
);
2192 if (rc
== 0 && wait_for_device_to_become_ready(h
, dev
->scsi3addr
) == 0)
2195 dev_warn(&h
->pdev
->dev
, "resetting device failed.\n");
2200 * For operations that cannot sleep, a command block is allocated at init,
2201 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2202 * which ones are free or in use. Lock must be held when calling this.
2203 * cmd_free() is the complement.
2205 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
2207 struct CommandList
*c
;
2209 union u64bit temp64
;
2210 dma_addr_t cmd_dma_handle
, err_dma_handle
;
2213 i
= find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
);
2214 if (i
== h
->nr_cmds
)
2216 } while (test_and_set_bit
2217 (i
& (BITS_PER_LONG
- 1),
2218 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
)) != 0);
2219 c
= h
->cmd_pool
+ i
;
2220 memset(c
, 0, sizeof(*c
));
2221 cmd_dma_handle
= h
->cmd_pool_dhandle
2223 c
->err_info
= h
->errinfo_pool
+ i
;
2224 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2225 err_dma_handle
= h
->errinfo_pool_dhandle
2226 + i
* sizeof(*c
->err_info
);
2231 INIT_LIST_HEAD(&c
->list
);
2232 c
->busaddr
= (u32
) cmd_dma_handle
;
2233 temp64
.val
= (u64
) err_dma_handle
;
2234 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
2235 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
2236 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
2242 /* For operations that can wait for kmalloc to possibly sleep,
2243 * this routine can be called. Lock need not be held to call
2244 * cmd_special_alloc. cmd_special_free() is the complement.
2246 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
)
2248 struct CommandList
*c
;
2249 union u64bit temp64
;
2250 dma_addr_t cmd_dma_handle
, err_dma_handle
;
2252 c
= pci_alloc_consistent(h
->pdev
, sizeof(*c
), &cmd_dma_handle
);
2255 memset(c
, 0, sizeof(*c
));
2259 c
->err_info
= pci_alloc_consistent(h
->pdev
, sizeof(*c
->err_info
),
2262 if (c
->err_info
== NULL
) {
2263 pci_free_consistent(h
->pdev
,
2264 sizeof(*c
), c
, cmd_dma_handle
);
2267 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2269 INIT_LIST_HEAD(&c
->list
);
2270 c
->busaddr
= (u32
) cmd_dma_handle
;
2271 temp64
.val
= (u64
) err_dma_handle
;
2272 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
2273 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
2274 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
2280 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
2284 i
= c
- h
->cmd_pool
;
2285 clear_bit(i
& (BITS_PER_LONG
- 1),
2286 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
2290 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
)
2292 union u64bit temp64
;
2294 temp64
.val32
.lower
= c
->ErrDesc
.Addr
.lower
;
2295 temp64
.val32
.upper
= c
->ErrDesc
.Addr
.upper
;
2296 pci_free_consistent(h
->pdev
, sizeof(*c
->err_info
),
2297 c
->err_info
, (dma_addr_t
) temp64
.val
);
2298 pci_free_consistent(h
->pdev
, sizeof(*c
),
2299 c
, (dma_addr_t
) (c
->busaddr
& DIRECT_LOOKUP_MASK
));
2302 #ifdef CONFIG_COMPAT
2304 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
, void *arg
)
2306 IOCTL32_Command_struct __user
*arg32
=
2307 (IOCTL32_Command_struct __user
*) arg
;
2308 IOCTL_Command_struct arg64
;
2309 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
2313 memset(&arg64
, 0, sizeof(arg64
));
2315 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
2316 sizeof(arg64
.LUN_info
));
2317 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
2318 sizeof(arg64
.Request
));
2319 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
2320 sizeof(arg64
.error_info
));
2321 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
2322 err
|= get_user(cp
, &arg32
->buf
);
2323 arg64
.buf
= compat_ptr(cp
);
2324 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
2329 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, (void *)p
);
2332 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
2333 sizeof(arg32
->error_info
));
2339 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
2342 BIG_IOCTL32_Command_struct __user
*arg32
=
2343 (BIG_IOCTL32_Command_struct __user
*) arg
;
2344 BIG_IOCTL_Command_struct arg64
;
2345 BIG_IOCTL_Command_struct __user
*p
=
2346 compat_alloc_user_space(sizeof(arg64
));
2350 memset(&arg64
, 0, sizeof(arg64
));
2352 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
2353 sizeof(arg64
.LUN_info
));
2354 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
2355 sizeof(arg64
.Request
));
2356 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
2357 sizeof(arg64
.error_info
));
2358 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
2359 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
2360 err
|= get_user(cp
, &arg32
->buf
);
2361 arg64
.buf
= compat_ptr(cp
);
2362 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
2367 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, (void *)p
);
2370 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
2371 sizeof(arg32
->error_info
));
2377 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
2380 case CCISS_GETPCIINFO
:
2381 case CCISS_GETINTINFO
:
2382 case CCISS_SETINTINFO
:
2383 case CCISS_GETNODENAME
:
2384 case CCISS_SETNODENAME
:
2385 case CCISS_GETHEARTBEAT
:
2386 case CCISS_GETBUSTYPES
:
2387 case CCISS_GETFIRMVER
:
2388 case CCISS_GETDRIVVER
:
2389 case CCISS_REVALIDVOLS
:
2390 case CCISS_DEREGDISK
:
2391 case CCISS_REGNEWDISK
:
2393 case CCISS_RESCANDISK
:
2394 case CCISS_GETLUNINFO
:
2395 return hpsa_ioctl(dev
, cmd
, arg
);
2397 case CCISS_PASSTHRU32
:
2398 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
2399 case CCISS_BIG_PASSTHRU32
:
2400 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
2403 return -ENOIOCTLCMD
;
2408 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2410 struct hpsa_pci_info pciinfo
;
2414 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
2415 pciinfo
.bus
= h
->pdev
->bus
->number
;
2416 pciinfo
.dev_fn
= h
->pdev
->devfn
;
2417 pciinfo
.board_id
= h
->board_id
;
2418 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
2423 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2425 DriverVer_type DriverVer
;
2426 unsigned char vmaj
, vmin
, vsubmin
;
2429 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
2430 &vmaj
, &vmin
, &vsubmin
);
2432 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
2433 "unrecognized.", HPSA_DRIVER_VERSION
);
2438 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
2441 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
2446 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2448 IOCTL_Command_struct iocommand
;
2449 struct CommandList
*c
;
2451 union u64bit temp64
;
2455 if (!capable(CAP_SYS_RAWIO
))
2457 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
2459 if ((iocommand
.buf_size
< 1) &&
2460 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
2463 if (iocommand
.buf_size
> 0) {
2464 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
2467 if (iocommand
.Request
.Type
.Direction
== XFER_WRITE
) {
2468 /* Copy the data into the buffer we created */
2469 if (copy_from_user(buff
, iocommand
.buf
,
2470 iocommand
.buf_size
)) {
2475 memset(buff
, 0, iocommand
.buf_size
);
2478 c
= cmd_special_alloc(h
);
2483 /* Fill in the command type */
2484 c
->cmd_type
= CMD_IOCTL_PEND
;
2485 /* Fill in Command Header */
2486 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
2487 if (iocommand
.buf_size
> 0) { /* buffer to fill */
2488 c
->Header
.SGList
= 1;
2489 c
->Header
.SGTotal
= 1;
2490 } else { /* no buffers to fill */
2491 c
->Header
.SGList
= 0;
2492 c
->Header
.SGTotal
= 0;
2494 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
2495 /* use the kernel address the cmd block for tag */
2496 c
->Header
.Tag
.lower
= c
->busaddr
;
2498 /* Fill in Request block */
2499 memcpy(&c
->Request
, &iocommand
.Request
,
2500 sizeof(c
->Request
));
2502 /* Fill in the scatter gather information */
2503 if (iocommand
.buf_size
> 0) {
2504 temp64
.val
= pci_map_single(h
->pdev
, buff
,
2505 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
2506 c
->SG
[0].Addr
.lower
= temp64
.val32
.lower
;
2507 c
->SG
[0].Addr
.upper
= temp64
.val32
.upper
;
2508 c
->SG
[0].Len
= iocommand
.buf_size
;
2509 c
->SG
[0].Ext
= 0; /* we are not chaining*/
2511 hpsa_scsi_do_simple_cmd_core(h
, c
);
2512 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
2513 check_ioctl_unit_attention(h
, c
);
2515 /* Copy the error information out */
2516 memcpy(&iocommand
.error_info
, c
->err_info
,
2517 sizeof(iocommand
.error_info
));
2518 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
2520 cmd_special_free(h
, c
);
2523 if (iocommand
.Request
.Type
.Direction
== XFER_READ
&&
2524 iocommand
.buf_size
> 0) {
2525 /* Copy the data out of the buffer we created */
2526 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
2528 cmd_special_free(h
, c
);
2533 cmd_special_free(h
, c
);
2537 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2539 BIG_IOCTL_Command_struct
*ioc
;
2540 struct CommandList
*c
;
2541 unsigned char **buff
= NULL
;
2542 int *buff_size
= NULL
;
2543 union u64bit temp64
;
2549 BYTE __user
*data_ptr
;
2553 if (!capable(CAP_SYS_RAWIO
))
2555 ioc
= (BIG_IOCTL_Command_struct
*)
2556 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
2561 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
2565 if ((ioc
->buf_size
< 1) &&
2566 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
2570 /* Check kmalloc limits using all SGs */
2571 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
2575 if (ioc
->buf_size
> ioc
->malloc_size
* MAXSGENTRIES
) {
2579 buff
= kzalloc(MAXSGENTRIES
* sizeof(char *), GFP_KERNEL
);
2584 buff_size
= kmalloc(MAXSGENTRIES
* sizeof(int), GFP_KERNEL
);
2589 left
= ioc
->buf_size
;
2590 data_ptr
= ioc
->buf
;
2592 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
2593 buff_size
[sg_used
] = sz
;
2594 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
2595 if (buff
[sg_used
] == NULL
) {
2599 if (ioc
->Request
.Type
.Direction
== XFER_WRITE
) {
2600 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
2605 memset(buff
[sg_used
], 0, sz
);
2610 c
= cmd_special_alloc(h
);
2615 c
->cmd_type
= CMD_IOCTL_PEND
;
2616 c
->Header
.ReplyQueue
= 0;
2617 c
->Header
.SGList
= c
->Header
.SGTotal
= sg_used
;
2618 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
2619 c
->Header
.Tag
.lower
= c
->busaddr
;
2620 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
2621 if (ioc
->buf_size
> 0) {
2623 for (i
= 0; i
< sg_used
; i
++) {
2624 temp64
.val
= pci_map_single(h
->pdev
, buff
[i
],
2625 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
2626 c
->SG
[i
].Addr
.lower
= temp64
.val32
.lower
;
2627 c
->SG
[i
].Addr
.upper
= temp64
.val32
.upper
;
2628 c
->SG
[i
].Len
= buff_size
[i
];
2629 /* we are not chaining */
2633 hpsa_scsi_do_simple_cmd_core(h
, c
);
2635 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
2636 check_ioctl_unit_attention(h
, c
);
2637 /* Copy the error information out */
2638 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
2639 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
2640 cmd_special_free(h
, c
);
2644 if (ioc
->Request
.Type
.Direction
== XFER_READ
&& ioc
->buf_size
> 0) {
2645 /* Copy the data out of the buffer we created */
2646 BYTE __user
*ptr
= ioc
->buf
;
2647 for (i
= 0; i
< sg_used
; i
++) {
2648 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
2649 cmd_special_free(h
, c
);
2653 ptr
+= buff_size
[i
];
2656 cmd_special_free(h
, c
);
2660 for (i
= 0; i
< sg_used
; i
++)
2669 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
2670 struct CommandList
*c
)
2672 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
2673 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
2674 (void) check_for_unit_attention(h
, c
);
2679 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
2681 struct ctlr_info
*h
;
2682 void __user
*argp
= (void __user
*)arg
;
2684 h
= sdev_to_hba(dev
);
2687 case CCISS_DEREGDISK
:
2688 case CCISS_REGNEWDISK
:
2690 hpsa_scan_start(h
->scsi_host
);
2692 case CCISS_GETPCIINFO
:
2693 return hpsa_getpciinfo_ioctl(h
, argp
);
2694 case CCISS_GETDRIVVER
:
2695 return hpsa_getdrivver_ioctl(h
, argp
);
2696 case CCISS_PASSTHRU
:
2697 return hpsa_passthru_ioctl(h
, argp
);
2698 case CCISS_BIG_PASSTHRU
:
2699 return hpsa_big_passthru_ioctl(h
, argp
);
2705 static void fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
2706 void *buff
, size_t size
, u8 page_code
, unsigned char *scsi3addr
,
2709 int pci_dir
= XFER_NONE
;
2711 c
->cmd_type
= CMD_IOCTL_PEND
;
2712 c
->Header
.ReplyQueue
= 0;
2713 if (buff
!= NULL
&& size
> 0) {
2714 c
->Header
.SGList
= 1;
2715 c
->Header
.SGTotal
= 1;
2717 c
->Header
.SGList
= 0;
2718 c
->Header
.SGTotal
= 0;
2720 c
->Header
.Tag
.lower
= c
->busaddr
;
2721 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
2723 c
->Request
.Type
.Type
= cmd_type
;
2724 if (cmd_type
== TYPE_CMD
) {
2727 /* are we trying to read a vital product page */
2728 if (page_code
!= 0) {
2729 c
->Request
.CDB
[1] = 0x01;
2730 c
->Request
.CDB
[2] = page_code
;
2732 c
->Request
.CDBLen
= 6;
2733 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2734 c
->Request
.Type
.Direction
= XFER_READ
;
2735 c
->Request
.Timeout
= 0;
2736 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
2737 c
->Request
.CDB
[4] = size
& 0xFF;
2739 case HPSA_REPORT_LOG
:
2740 case HPSA_REPORT_PHYS
:
2741 /* Talking to controller so It's a physical command
2742 mode = 00 target = 0. Nothing to write.
2744 c
->Request
.CDBLen
= 12;
2745 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2746 c
->Request
.Type
.Direction
= XFER_READ
;
2747 c
->Request
.Timeout
= 0;
2748 c
->Request
.CDB
[0] = cmd
;
2749 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
2750 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
2751 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
2752 c
->Request
.CDB
[9] = size
& 0xFF;
2754 case HPSA_CACHE_FLUSH
:
2755 c
->Request
.CDBLen
= 12;
2756 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2757 c
->Request
.Type
.Direction
= XFER_WRITE
;
2758 c
->Request
.Timeout
= 0;
2759 c
->Request
.CDB
[0] = BMIC_WRITE
;
2760 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
2762 case TEST_UNIT_READY
:
2763 c
->Request
.CDBLen
= 6;
2764 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2765 c
->Request
.Type
.Direction
= XFER_NONE
;
2766 c
->Request
.Timeout
= 0;
2769 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
2773 } else if (cmd_type
== TYPE_MSG
) {
2776 case HPSA_DEVICE_RESET_MSG
:
2777 c
->Request
.CDBLen
= 16;
2778 c
->Request
.Type
.Type
= 1; /* It is a MSG not a CMD */
2779 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2780 c
->Request
.Type
.Direction
= XFER_NONE
;
2781 c
->Request
.Timeout
= 0; /* Don't time out */
2782 c
->Request
.CDB
[0] = 0x01; /* RESET_MSG is 0x01 */
2783 c
->Request
.CDB
[1] = 0x03; /* Reset target above */
2784 /* If bytes 4-7 are zero, it means reset the */
2786 c
->Request
.CDB
[4] = 0x00;
2787 c
->Request
.CDB
[5] = 0x00;
2788 c
->Request
.CDB
[6] = 0x00;
2789 c
->Request
.CDB
[7] = 0x00;
2793 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
2798 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
2802 switch (c
->Request
.Type
.Direction
) {
2804 pci_dir
= PCI_DMA_FROMDEVICE
;
2807 pci_dir
= PCI_DMA_TODEVICE
;
2810 pci_dir
= PCI_DMA_NONE
;
2813 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
2816 hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
);
2822 * Map (physical) PCI mem into (virtual) kernel space
2824 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
2826 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
2827 ulong page_offs
= ((ulong
) base
) - page_base
;
2828 void __iomem
*page_remapped
= ioremap(page_base
, page_offs
+ size
);
2830 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
2833 /* Takes cmds off the submission queue and sends them to the hardware,
2834 * then puts them on the queue of cmds waiting for completion.
2836 static void start_io(struct ctlr_info
*h
)
2838 struct CommandList
*c
;
2840 while (!list_empty(&h
->reqQ
)) {
2841 c
= list_entry(h
->reqQ
.next
, struct CommandList
, list
);
2842 /* can't do anything if fifo is full */
2843 if ((h
->access
.fifo_full(h
))) {
2844 dev_warn(&h
->pdev
->dev
, "fifo full\n");
2848 /* Get the first entry from the Request Q */
2852 /* Tell the controller execute command */
2853 h
->access
.submit_command(h
, c
);
2855 /* Put job onto the completed Q */
2860 static inline unsigned long get_next_completion(struct ctlr_info
*h
)
2862 return h
->access
.command_completed(h
);
2865 static inline bool interrupt_pending(struct ctlr_info
*h
)
2867 return h
->access
.intr_pending(h
);
2870 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
2872 return (h
->access
.intr_pending(h
) == 0) ||
2873 (h
->interrupts_enabled
== 0);
2876 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
2879 if (unlikely(tag_index
>= h
->nr_cmds
)) {
2880 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
2886 static inline void finish_cmd(struct CommandList
*c
, u32 raw_tag
)
2889 if (likely(c
->cmd_type
== CMD_SCSI
))
2890 complete_scsi_command(c
, 0, raw_tag
);
2891 else if (c
->cmd_type
== CMD_IOCTL_PEND
)
2892 complete(c
->waiting
);
2895 static inline u32
hpsa_tag_contains_index(u32 tag
)
2897 return tag
& DIRECT_LOOKUP_BIT
;
2900 static inline u32
hpsa_tag_to_index(u32 tag
)
2902 return tag
>> DIRECT_LOOKUP_SHIFT
;
2905 static inline u32
hpsa_tag_discard_error_bits(u32 tag
)
2907 #define HPSA_ERROR_BITS 0x03
2908 return tag
& ~HPSA_ERROR_BITS
;
2911 /* process completion of an indexed ("direct lookup") command */
2912 static inline u32
process_indexed_cmd(struct ctlr_info
*h
,
2916 struct CommandList
*c
;
2918 tag_index
= hpsa_tag_to_index(raw_tag
);
2919 if (bad_tag(h
, tag_index
, raw_tag
))
2920 return next_command(h
);
2921 c
= h
->cmd_pool
+ tag_index
;
2922 finish_cmd(c
, raw_tag
);
2923 return next_command(h
);
2926 /* process completion of a non-indexed command */
2927 static inline u32
process_nonindexed_cmd(struct ctlr_info
*h
,
2931 struct CommandList
*c
= NULL
;
2933 tag
= hpsa_tag_discard_error_bits(raw_tag
);
2934 list_for_each_entry(c
, &h
->cmpQ
, list
) {
2935 if ((c
->busaddr
& 0xFFFFFFE0) == (tag
& 0xFFFFFFE0)) {
2936 finish_cmd(c
, raw_tag
);
2937 return next_command(h
);
2940 bad_tag(h
, h
->nr_cmds
+ 1, raw_tag
);
2941 return next_command(h
);
2944 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
)
2946 struct ctlr_info
*h
= dev_id
;
2947 unsigned long flags
;
2950 if (interrupt_not_for_us(h
))
2952 spin_lock_irqsave(&h
->lock
, flags
);
2953 while (interrupt_pending(h
)) {
2954 raw_tag
= get_next_completion(h
);
2955 while (raw_tag
!= FIFO_EMPTY
) {
2956 if (hpsa_tag_contains_index(raw_tag
))
2957 raw_tag
= process_indexed_cmd(h
, raw_tag
);
2959 raw_tag
= process_nonindexed_cmd(h
, raw_tag
);
2962 spin_unlock_irqrestore(&h
->lock
, flags
);
2966 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
)
2968 struct ctlr_info
*h
= dev_id
;
2969 unsigned long flags
;
2972 spin_lock_irqsave(&h
->lock
, flags
);
2973 raw_tag
= get_next_completion(h
);
2974 while (raw_tag
!= FIFO_EMPTY
) {
2975 if (hpsa_tag_contains_index(raw_tag
))
2976 raw_tag
= process_indexed_cmd(h
, raw_tag
);
2978 raw_tag
= process_nonindexed_cmd(h
, raw_tag
);
2980 spin_unlock_irqrestore(&h
->lock
, flags
);
2984 /* Send a message CDB to the firmware. */
2985 static __devinit
int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
2989 struct CommandListHeader CommandHeader
;
2990 struct RequestBlock Request
;
2991 struct ErrDescriptor ErrorDescriptor
;
2993 struct Command
*cmd
;
2994 static const size_t cmd_sz
= sizeof(*cmd
) +
2995 sizeof(cmd
->ErrorDescriptor
);
2997 uint32_t paddr32
, tag
;
2998 void __iomem
*vaddr
;
3001 vaddr
= pci_ioremap_bar(pdev
, 0);
3005 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3006 * CCISS commands, so they must be allocated from the lower 4GiB of
3009 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3015 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
3021 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3022 * although there's no guarantee, we assume that the address is at
3023 * least 4-byte aligned (most likely, it's page-aligned).
3027 cmd
->CommandHeader
.ReplyQueue
= 0;
3028 cmd
->CommandHeader
.SGList
= 0;
3029 cmd
->CommandHeader
.SGTotal
= 0;
3030 cmd
->CommandHeader
.Tag
.lower
= paddr32
;
3031 cmd
->CommandHeader
.Tag
.upper
= 0;
3032 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
3034 cmd
->Request
.CDBLen
= 16;
3035 cmd
->Request
.Type
.Type
= TYPE_MSG
;
3036 cmd
->Request
.Type
.Attribute
= ATTR_HEADOFQUEUE
;
3037 cmd
->Request
.Type
.Direction
= XFER_NONE
;
3038 cmd
->Request
.Timeout
= 0; /* Don't time out */
3039 cmd
->Request
.CDB
[0] = opcode
;
3040 cmd
->Request
.CDB
[1] = type
;
3041 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
3042 cmd
->ErrorDescriptor
.Addr
.lower
= paddr32
+ sizeof(*cmd
);
3043 cmd
->ErrorDescriptor
.Addr
.upper
= 0;
3044 cmd
->ErrorDescriptor
.Len
= sizeof(struct ErrorInfo
);
3046 writel(paddr32
, vaddr
+ SA5_REQUEST_PORT_OFFSET
);
3048 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
3049 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
3050 if (hpsa_tag_discard_error_bits(tag
) == paddr32
)
3052 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
3057 /* we leak the DMA buffer here ... no choice since the controller could
3058 * still complete the command.
3060 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
3061 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
3066 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
3068 if (tag
& HPSA_ERROR_BIT
) {
3069 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
3074 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
3079 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3080 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3082 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
3083 void * __iomem vaddr
, bool use_doorbell
)
3089 /* For everything after the P600, the PCI power state method
3090 * of resetting the controller doesn't work, so we have this
3091 * other way using the doorbell register.
3093 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
3094 writel(DOORBELL_CTLR_RESET
, vaddr
+ SA5_DOORBELL
);
3096 } else { /* Try to do it the PCI power state way */
3098 /* Quoting from the Open CISS Specification: "The Power
3099 * Management Control/Status Register (CSR) controls the power
3100 * state of the device. The normal operating state is D0,
3101 * CSR=00h. The software off state is D3, CSR=03h. To reset
3102 * the controller, place the interface device in D3 then to D0,
3103 * this causes a secondary PCI reset which will reset the
3106 pos
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
3109 "hpsa_reset_controller: "
3110 "PCI PM not supported\n");
3113 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
3114 /* enter the D3hot power management state */
3115 pci_read_config_word(pdev
, pos
+ PCI_PM_CTRL
, &pmcsr
);
3116 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3118 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
3122 /* enter the D0 power management state */
3123 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3125 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
3132 /* This does a hard reset of the controller using PCI power management
3133 * states or the using the doorbell register.
3135 static __devinit
int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
)
3139 u64 cfg_base_addr_index
;
3140 void __iomem
*vaddr
;
3141 unsigned long paddr
;
3142 u32 misc_fw_support
, active_transport
;
3144 struct CfgTable __iomem
*cfgtable
;
3147 u16 command_register
;
3149 /* For controllers as old as the P600, this is very nearly
3152 * pci_save_state(pci_dev);
3153 * pci_set_power_state(pci_dev, PCI_D3hot);
3154 * pci_set_power_state(pci_dev, PCI_D0);
3155 * pci_restore_state(pci_dev);
3157 * For controllers newer than the P600, the pci power state
3158 * method of resetting doesn't work so we have another way
3159 * using the doorbell register.
3162 /* Exclude 640x boards. These are two pci devices in one slot
3163 * which share a battery backed cache module. One controls the
3164 * cache, the other accesses the cache through the one that controls
3165 * it. If we reset the one controlling the cache, the other will
3166 * likely not be happy. Just forbid resetting this conjoined mess.
3167 * The 640x isn't really supported by hpsa anyway.
3169 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
3171 dev_warn(&pdev
->dev
, "Not resetting device.\n");
3174 if (board_id
== 0x409C0E11 || board_id
== 0x409D0E11)
3177 /* Save the PCI command register */
3178 pci_read_config_word(pdev
, 4, &command_register
);
3179 /* Turn the board off. This is so that later pci_restore_state()
3180 * won't turn the board on before the rest of config space is ready.
3182 pci_disable_device(pdev
);
3183 pci_save_state(pdev
);
3185 /* find the first memory BAR, so we can find the cfg table */
3186 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
3189 vaddr
= remap_pci_mem(paddr
, 0x250);
3193 /* find cfgtable in order to check if reset via doorbell is supported */
3194 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
3195 &cfg_base_addr_index
, &cfg_offset
);
3198 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
3199 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
3205 /* If reset via doorbell register is supported, use that. */
3206 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
3207 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
3209 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
3211 goto unmap_cfgtable
;
3213 pci_restore_state(pdev
);
3214 rc
= pci_enable_device(pdev
);
3216 dev_warn(&pdev
->dev
, "failed to enable device.\n");
3217 goto unmap_cfgtable
;
3219 pci_write_config_word(pdev
, 4, command_register
);
3221 /* Some devices (notably the HP Smart Array 5i Controller)
3222 need a little pause here */
3223 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
3225 /* Wait for board to become not ready, then ready. */
3226 dev_info(&pdev
->dev
, "Waiting for board to become ready.\n");
3227 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_NOT_READY
);
3229 dev_warn(&pdev
->dev
,
3230 "failed waiting for board to become not ready\n");
3231 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
3233 dev_warn(&pdev
->dev
,
3234 "failed waiting for board to become ready\n");
3235 goto unmap_cfgtable
;
3237 dev_info(&pdev
->dev
, "board ready.\n");
3239 /* Controller should be in simple mode at this point. If it's not,
3240 * It means we're on one of those controllers which doesn't support
3241 * the doorbell reset method and on which the PCI power management reset
3242 * method doesn't work (P800, for example.)
3243 * In those cases, pretend the reset worked and hope for the best.
3245 active_transport
= readl(&cfgtable
->TransportActive
);
3246 if (active_transport
& PERFORMANT_MODE
) {
3247 dev_warn(&pdev
->dev
, "Unable to successfully reset controller,"
3248 " proceeding anyway.\n");
3261 * We cannot read the structure directly, for portability we must use
3263 * This is for debug only.
3265 static void print_cfg_table(struct device
*dev
, struct CfgTable
*tb
)
3271 dev_info(dev
, "Controller Configuration information\n");
3272 dev_info(dev
, "------------------------------------\n");
3273 for (i
= 0; i
< 4; i
++)
3274 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
3275 temp_name
[4] = '\0';
3276 dev_info(dev
, " Signature = %s\n", temp_name
);
3277 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
3278 dev_info(dev
, " Transport methods supported = 0x%x\n",
3279 readl(&(tb
->TransportSupport
)));
3280 dev_info(dev
, " Transport methods active = 0x%x\n",
3281 readl(&(tb
->TransportActive
)));
3282 dev_info(dev
, " Requested transport Method = 0x%x\n",
3283 readl(&(tb
->HostWrite
.TransportRequest
)));
3284 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
3285 readl(&(tb
->HostWrite
.CoalIntDelay
)));
3286 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
3287 readl(&(tb
->HostWrite
.CoalIntCount
)));
3288 dev_info(dev
, " Max outstanding commands = 0x%d\n",
3289 readl(&(tb
->CmdsOutMax
)));
3290 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
3291 for (i
= 0; i
< 16; i
++)
3292 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
3293 temp_name
[16] = '\0';
3294 dev_info(dev
, " Server Name = %s\n", temp_name
);
3295 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
3296 readl(&(tb
->HeartBeat
)));
3297 #endif /* HPSA_DEBUG */
3300 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
3302 int i
, offset
, mem_type
, bar_type
;
3304 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
3307 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
3308 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
3309 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
3312 mem_type
= pci_resource_flags(pdev
, i
) &
3313 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
3315 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
3316 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
3317 offset
+= 4; /* 32 bit */
3319 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
3322 default: /* reserved in PCI 2.2 */
3323 dev_warn(&pdev
->dev
,
3324 "base address is invalid\n");
3329 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
3335 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3336 * controllers that are capable. If not, we use IO-APIC mode.
3339 static void __devinit
hpsa_interrupt_mode(struct ctlr_info
*h
)
3341 #ifdef CONFIG_PCI_MSI
3343 struct msix_entry hpsa_msix_entries
[4] = { {0, 0}, {0, 1},
3347 /* Some boards advertise MSI but don't really support it */
3348 if ((h
->board_id
== 0x40700E11) || (h
->board_id
== 0x40800E11) ||
3349 (h
->board_id
== 0x40820E11) || (h
->board_id
== 0x40830E11))
3350 goto default_int_mode
;
3351 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSIX
)) {
3352 dev_info(&h
->pdev
->dev
, "MSIX\n");
3353 err
= pci_enable_msix(h
->pdev
, hpsa_msix_entries
, 4);
3355 h
->intr
[0] = hpsa_msix_entries
[0].vector
;
3356 h
->intr
[1] = hpsa_msix_entries
[1].vector
;
3357 h
->intr
[2] = hpsa_msix_entries
[2].vector
;
3358 h
->intr
[3] = hpsa_msix_entries
[3].vector
;
3363 dev_warn(&h
->pdev
->dev
, "only %d MSI-X vectors "
3364 "available\n", err
);
3365 goto default_int_mode
;
3367 dev_warn(&h
->pdev
->dev
, "MSI-X init failed %d\n",
3369 goto default_int_mode
;
3372 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSI
)) {
3373 dev_info(&h
->pdev
->dev
, "MSI\n");
3374 if (!pci_enable_msi(h
->pdev
))
3377 dev_warn(&h
->pdev
->dev
, "MSI init failed\n");
3380 #endif /* CONFIG_PCI_MSI */
3381 /* if we get here we're going to use the default interrupt mode */
3382 h
->intr
[PERF_MODE_INT
] = h
->pdev
->irq
;
3385 static int __devinit
hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
3388 u32 subsystem_vendor_id
, subsystem_device_id
;
3390 subsystem_vendor_id
= pdev
->subsystem_vendor
;
3391 subsystem_device_id
= pdev
->subsystem_device
;
3392 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
3393 subsystem_vendor_id
;
3395 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
3396 if (*board_id
== products
[i
].board_id
)
3399 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
3400 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
3402 dev_warn(&pdev
->dev
, "unrecognized board ID: "
3403 "0x%08x, ignoring.\n", *board_id
);
3406 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
3409 static inline bool hpsa_board_disabled(struct pci_dev
*pdev
)
3413 (void) pci_read_config_word(pdev
, PCI_COMMAND
, &command
);
3414 return ((command
& PCI_COMMAND_MEMORY
) == 0);
3417 static int __devinit
hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
3418 unsigned long *memory_bar
)
3422 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
3423 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
3424 /* addressing mode bits already removed */
3425 *memory_bar
= pci_resource_start(pdev
, i
);
3426 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
3430 dev_warn(&pdev
->dev
, "no memory BAR found\n");
3434 static int __devinit
hpsa_wait_for_board_state(struct pci_dev
*pdev
,
3435 void __iomem
*vaddr
, int wait_for_ready
)
3440 iterations
= HPSA_BOARD_READY_ITERATIONS
;
3442 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
3444 for (i
= 0; i
< iterations
; i
++) {
3445 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
3446 if (wait_for_ready
) {
3447 if (scratchpad
== HPSA_FIRMWARE_READY
)
3450 if (scratchpad
!= HPSA_FIRMWARE_READY
)
3453 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
3455 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
3459 static int __devinit
hpsa_find_cfg_addrs(struct pci_dev
*pdev
,
3460 void __iomem
*vaddr
, u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
3463 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
3464 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
3465 *cfg_base_addr
&= (u32
) 0x0000ffff;
3466 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
3467 if (*cfg_base_addr_index
== -1) {
3468 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
3474 static int __devinit
hpsa_find_cfgtables(struct ctlr_info
*h
)
3478 u64 cfg_base_addr_index
;
3482 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
3483 &cfg_base_addr_index
, &cfg_offset
);
3486 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
3487 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
3490 /* Find performant mode table. */
3491 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
3492 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
3493 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
3494 sizeof(*h
->transtable
));
3500 static void __devinit
hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
3502 h
->max_commands
= readl(&(h
->cfgtable
->MaxPerformantModeCommands
));
3504 /* Limit commands in memory limited kdump scenario. */
3505 if (reset_devices
&& h
->max_commands
> 32)
3506 h
->max_commands
= 32;
3508 if (h
->max_commands
< 16) {
3509 dev_warn(&h
->pdev
->dev
, "Controller reports "
3510 "max supported commands of %d, an obvious lie. "
3511 "Using 16. Ensure that firmware is up to date.\n",
3513 h
->max_commands
= 16;
3517 /* Interrogate the hardware for some limits:
3518 * max commands, max SG elements without chaining, and with chaining,
3519 * SG chain block size, etc.
3521 static void __devinit
hpsa_find_board_params(struct ctlr_info
*h
)
3523 hpsa_get_max_perf_mode_cmds(h
);
3524 h
->nr_cmds
= h
->max_commands
- 4; /* Allow room for some ioctls */
3525 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
3527 * Limit in-command s/g elements to 32 save dma'able memory.
3528 * Howvever spec says if 0, use 31
3530 h
->max_cmd_sg_entries
= 31;
3531 if (h
->maxsgentries
> 512) {
3532 h
->max_cmd_sg_entries
= 32;
3533 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
+ 1;
3534 h
->maxsgentries
--; /* save one for chain pointer */
3536 h
->maxsgentries
= 31; /* default to traditional values */
3541 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
3543 if ((readb(&h
->cfgtable
->Signature
[0]) != 'C') ||
3544 (readb(&h
->cfgtable
->Signature
[1]) != 'I') ||
3545 (readb(&h
->cfgtable
->Signature
[2]) != 'S') ||
3546 (readb(&h
->cfgtable
->Signature
[3]) != 'S')) {
3547 dev_warn(&h
->pdev
->dev
, "not a valid CISS config table\n");
3553 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3554 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info
*h
)
3559 prefetch
= readl(&(h
->cfgtable
->SCSI_Prefetch
));
3561 writel(prefetch
, &(h
->cfgtable
->SCSI_Prefetch
));
3565 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3566 * in a prefetch beyond physical memory.
3568 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
3572 if (h
->board_id
!= 0x3225103C)
3574 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
3575 dma_prefetch
|= 0x8000;
3576 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
3579 static void __devinit
hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
3583 unsigned long flags
;
3585 /* under certain very rare conditions, this can take awhile.
3586 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3587 * as we enter this code.)
3589 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
3590 spin_lock_irqsave(&h
->lock
, flags
);
3591 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
3592 spin_unlock_irqrestore(&h
->lock
, flags
);
3593 if (!doorbell_value
& CFGTBL_ChangeReq
)
3595 /* delay and try again */
3596 usleep_range(10000, 20000);
3600 static int __devinit
hpsa_enter_simple_mode(struct ctlr_info
*h
)
3604 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
3605 if (!(trans_support
& SIMPLE_MODE
))
3608 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
3609 /* Update the field, and then ring the doorbell */
3610 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
3611 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
3612 hpsa_wait_for_mode_change_ack(h
);
3613 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
3614 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
)) {
3615 dev_warn(&h
->pdev
->dev
,
3616 "unable to get board into simple mode\n");
3622 static int __devinit
hpsa_pci_init(struct ctlr_info
*h
)
3624 int prod_index
, err
;
3626 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
3629 h
->product_name
= products
[prod_index
].product_name
;
3630 h
->access
= *(products
[prod_index
].access
);
3632 if (hpsa_board_disabled(h
->pdev
)) {
3633 dev_warn(&h
->pdev
->dev
, "controller appears to be disabled\n");
3636 err
= pci_enable_device(h
->pdev
);
3638 dev_warn(&h
->pdev
->dev
, "unable to enable PCI device\n");
3642 err
= pci_request_regions(h
->pdev
, "hpsa");
3644 dev_err(&h
->pdev
->dev
,
3645 "cannot obtain PCI resources, aborting\n");
3648 hpsa_interrupt_mode(h
);
3649 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
3651 goto err_out_free_res
;
3652 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
3655 goto err_out_free_res
;
3657 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
3659 goto err_out_free_res
;
3660 err
= hpsa_find_cfgtables(h
);
3662 goto err_out_free_res
;
3663 hpsa_find_board_params(h
);
3665 if (!hpsa_CISS_signature_present(h
)) {
3667 goto err_out_free_res
;
3669 hpsa_enable_scsi_prefetch(h
);
3670 hpsa_p600_dma_prefetch_quirk(h
);
3671 err
= hpsa_enter_simple_mode(h
);
3673 goto err_out_free_res
;
3678 iounmap(h
->transtable
);
3680 iounmap(h
->cfgtable
);
3684 * Deliberately omit pci_disable_device(): it does something nasty to
3685 * Smart Array controllers that pci_enable_device does not undo
3687 pci_release_regions(h
->pdev
);
3691 static void __devinit
hpsa_hba_inquiry(struct ctlr_info
*h
)
3695 #define HBA_INQUIRY_BYTE_COUNT 64
3696 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
3697 if (!h
->hba_inquiry_data
)
3699 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
3700 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
3702 kfree(h
->hba_inquiry_data
);
3703 h
->hba_inquiry_data
= NULL
;
3707 static __devinit
int hpsa_init_reset_devices(struct pci_dev
*pdev
)
3714 /* Reset the controller with a PCI power-cycle or via doorbell */
3715 rc
= hpsa_kdump_hard_reset_controller(pdev
);
3717 /* -ENOTSUPP here means we cannot reset the controller
3718 * but it's already (and still) up and running in
3719 * "performant mode". Or, it might be 640x, which can't reset
3720 * due to concerns about shared bbwc between 6402/6404 pair.
3722 if (rc
== -ENOTSUPP
)
3723 return 0; /* just try to do the kdump anyhow. */
3727 /* Now try to get the controller to respond to a no-op */
3728 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
3729 if (hpsa_noop(pdev
) == 0)
3732 dev_warn(&pdev
->dev
, "no-op failed%s\n",
3733 (i
< 11 ? "; re-trying" : ""));
3738 static int __devinit
hpsa_init_one(struct pci_dev
*pdev
,
3739 const struct pci_device_id
*ent
)
3742 struct ctlr_info
*h
;
3744 if (number_of_controllers
== 0)
3745 printk(KERN_INFO DRIVER_NAME
"\n");
3747 rc
= hpsa_init_reset_devices(pdev
);
3751 /* Command structures must be aligned on a 32-byte boundary because
3752 * the 5 lower bits of the address are used by the hardware. and by
3753 * the driver. See comments in hpsa.h for more info.
3755 #define COMMANDLIST_ALIGNMENT 32
3756 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
3757 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
3762 h
->busy_initializing
= 1;
3763 INIT_LIST_HEAD(&h
->cmpQ
);
3764 INIT_LIST_HEAD(&h
->reqQ
);
3765 spin_lock_init(&h
->lock
);
3766 spin_lock_init(&h
->scan_lock
);
3767 rc
= hpsa_pci_init(h
);
3771 sprintf(h
->devname
, "hpsa%d", number_of_controllers
);
3772 h
->ctlr
= number_of_controllers
;
3773 number_of_controllers
++;
3775 /* configure PCI DMA stuff */
3776 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3780 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3784 dev_err(&pdev
->dev
, "no suitable DMA available\n");
3789 /* make sure the board interrupts are off */
3790 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
3792 if (h
->msix_vector
|| h
->msi_vector
)
3793 rc
= request_irq(h
->intr
[PERF_MODE_INT
], do_hpsa_intr_msi
,
3794 IRQF_DISABLED
, h
->devname
, h
);
3796 rc
= request_irq(h
->intr
[PERF_MODE_INT
], do_hpsa_intr_intx
,
3797 IRQF_DISABLED
, h
->devname
, h
);
3799 dev_err(&pdev
->dev
, "unable to get irq %d for %s\n",
3800 h
->intr
[PERF_MODE_INT
], h
->devname
);
3804 dev_info(&pdev
->dev
, "%s: <0x%x> at IRQ %d%s using DAC\n",
3805 h
->devname
, pdev
->device
,
3806 h
->intr
[PERF_MODE_INT
], dac
? "" : " not");
3809 kmalloc(((h
->nr_cmds
+ BITS_PER_LONG
-
3810 1) / BITS_PER_LONG
) * sizeof(unsigned long), GFP_KERNEL
);
3811 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
3812 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
3813 &(h
->cmd_pool_dhandle
));
3814 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
3815 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
3816 &(h
->errinfo_pool_dhandle
));
3817 if ((h
->cmd_pool_bits
== NULL
)
3818 || (h
->cmd_pool
== NULL
)
3819 || (h
->errinfo_pool
== NULL
)) {
3820 dev_err(&pdev
->dev
, "out of memory");
3824 if (hpsa_allocate_sg_chain_blocks(h
))
3826 init_waitqueue_head(&h
->scan_wait_queue
);
3827 h
->scan_finished
= 1; /* no scan currently in progress */
3829 pci_set_drvdata(pdev
, h
);
3830 memset(h
->cmd_pool_bits
, 0,
3831 ((h
->nr_cmds
+ BITS_PER_LONG
-
3832 1) / BITS_PER_LONG
) * sizeof(unsigned long));
3836 /* Turn the interrupts on so we can service requests */
3837 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
3839 hpsa_put_ctlr_into_performant_mode(h
);
3840 hpsa_hba_inquiry(h
);
3841 hpsa_register_scsi(h
); /* hook ourselves into SCSI subsystem */
3842 h
->busy_initializing
= 0;
3846 hpsa_free_sg_chain_blocks(h
);
3847 kfree(h
->cmd_pool_bits
);
3849 pci_free_consistent(h
->pdev
,
3850 h
->nr_cmds
* sizeof(struct CommandList
),
3851 h
->cmd_pool
, h
->cmd_pool_dhandle
);
3852 if (h
->errinfo_pool
)
3853 pci_free_consistent(h
->pdev
,
3854 h
->nr_cmds
* sizeof(struct ErrorInfo
),
3856 h
->errinfo_pool_dhandle
);
3857 free_irq(h
->intr
[PERF_MODE_INT
], h
);
3860 h
->busy_initializing
= 0;
3865 static void hpsa_flush_cache(struct ctlr_info
*h
)
3868 struct CommandList
*c
;
3870 flush_buf
= kzalloc(4, GFP_KERNEL
);
3874 c
= cmd_special_alloc(h
);
3876 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
3879 fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
3880 RAID_CTLR_LUNID
, TYPE_CMD
);
3881 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_TODEVICE
);
3882 if (c
->err_info
->CommandStatus
!= 0)
3883 dev_warn(&h
->pdev
->dev
,
3884 "error flushing cache on controller\n");
3885 cmd_special_free(h
, c
);
3890 static void hpsa_shutdown(struct pci_dev
*pdev
)
3892 struct ctlr_info
*h
;
3894 h
= pci_get_drvdata(pdev
);
3895 /* Turn board interrupts off and send the flush cache command
3896 * sendcmd will turn off interrupt, and send the flush...
3897 * To write all data in the battery backed cache to disks
3899 hpsa_flush_cache(h
);
3900 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
3901 free_irq(h
->intr
[PERF_MODE_INT
], h
);
3902 #ifdef CONFIG_PCI_MSI
3904 pci_disable_msix(h
->pdev
);
3905 else if (h
->msi_vector
)
3906 pci_disable_msi(h
->pdev
);
3907 #endif /* CONFIG_PCI_MSI */
3910 static void __devexit
hpsa_remove_one(struct pci_dev
*pdev
)
3912 struct ctlr_info
*h
;
3914 if (pci_get_drvdata(pdev
) == NULL
) {
3915 dev_err(&pdev
->dev
, "unable to remove device \n");
3918 h
= pci_get_drvdata(pdev
);
3919 hpsa_unregister_scsi(h
); /* unhook from SCSI subsystem */
3920 hpsa_shutdown(pdev
);
3922 iounmap(h
->transtable
);
3923 iounmap(h
->cfgtable
);
3924 hpsa_free_sg_chain_blocks(h
);
3925 pci_free_consistent(h
->pdev
,
3926 h
->nr_cmds
* sizeof(struct CommandList
),
3927 h
->cmd_pool
, h
->cmd_pool_dhandle
);
3928 pci_free_consistent(h
->pdev
,
3929 h
->nr_cmds
* sizeof(struct ErrorInfo
),
3930 h
->errinfo_pool
, h
->errinfo_pool_dhandle
);
3931 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
3932 h
->reply_pool
, h
->reply_pool_dhandle
);
3933 kfree(h
->cmd_pool_bits
);
3934 kfree(h
->blockFetchTable
);
3935 kfree(h
->hba_inquiry_data
);
3937 * Deliberately omit pci_disable_device(): it does something nasty to
3938 * Smart Array controllers that pci_enable_device does not undo
3940 pci_release_regions(pdev
);
3941 pci_set_drvdata(pdev
, NULL
);
3945 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
3946 __attribute__((unused
)) pm_message_t state
)
3951 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
3956 static struct pci_driver hpsa_pci_driver
= {
3958 .probe
= hpsa_init_one
,
3959 .remove
= __devexit_p(hpsa_remove_one
),
3960 .id_table
= hpsa_pci_device_id
, /* id_table */
3961 .shutdown
= hpsa_shutdown
,
3962 .suspend
= hpsa_suspend
,
3963 .resume
= hpsa_resume
,
3966 /* Fill in bucket_map[], given nsgs (the max number of
3967 * scatter gather elements supported) and bucket[],
3968 * which is an array of 8 integers. The bucket[] array
3969 * contains 8 different DMA transfer sizes (in 16
3970 * byte increments) which the controller uses to fetch
3971 * commands. This function fills in bucket_map[], which
3972 * maps a given number of scatter gather elements to one of
3973 * the 8 DMA transfer sizes. The point of it is to allow the
3974 * controller to only do as much DMA as needed to fetch the
3975 * command, with the DMA transfer size encoded in the lower
3976 * bits of the command address.
3978 static void calc_bucket_map(int bucket
[], int num_buckets
,
3979 int nsgs
, int *bucket_map
)
3983 /* even a command with 0 SGs requires 4 blocks */
3984 #define MINIMUM_TRANSFER_BLOCKS 4
3985 #define NUM_BUCKETS 8
3986 /* Note, bucket_map must have nsgs+1 entries. */
3987 for (i
= 0; i
<= nsgs
; i
++) {
3988 /* Compute size of a command with i SG entries */
3989 size
= i
+ MINIMUM_TRANSFER_BLOCKS
;
3990 b
= num_buckets
; /* Assume the biggest bucket */
3991 /* Find the bucket that is just big enough */
3992 for (j
= 0; j
< 8; j
++) {
3993 if (bucket
[j
] >= size
) {
3998 /* for a command with i SG entries, use bucket b. */
4003 static __devinit
void hpsa_enter_performant_mode(struct ctlr_info
*h
)
4006 unsigned long register_value
;
4008 /* This is a bit complicated. There are 8 registers on
4009 * the controller which we write to to tell it 8 different
4010 * sizes of commands which there may be. It's a way of
4011 * reducing the DMA done to fetch each command. Encoded into
4012 * each command's tag are 3 bits which communicate to the controller
4013 * which of the eight sizes that command fits within. The size of
4014 * each command depends on how many scatter gather entries there are.
4015 * Each SG entry requires 16 bytes. The eight registers are programmed
4016 * with the number of 16-byte blocks a command of that size requires.
4017 * The smallest command possible requires 5 such 16 byte blocks.
4018 * the largest command possible requires MAXSGENTRIES + 4 16-byte
4019 * blocks. Note, this only extends to the SG entries contained
4020 * within the command block, and does not extend to chained blocks
4021 * of SG elements. bft[] contains the eight values we write to
4022 * the registers. They are not evenly distributed, but have more
4023 * sizes for small commands, and fewer sizes for larger commands.
4025 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES
+ 4};
4026 BUILD_BUG_ON(28 > MAXSGENTRIES
+ 4);
4027 /* 5 = 1 s/g entry or 4k
4028 * 6 = 2 s/g entry or 8k
4029 * 8 = 4 s/g entry or 16k
4030 * 10 = 6 s/g entry or 24k
4033 h
->reply_pool_wraparound
= 1; /* spec: init to 1 */
4035 /* Controller spec: zero out this buffer. */
4036 memset(h
->reply_pool
, 0, h
->reply_pool_size
);
4037 h
->reply_pool_head
= h
->reply_pool
;
4039 bft
[7] = h
->max_sg_entries
+ 4;
4040 calc_bucket_map(bft
, ARRAY_SIZE(bft
), 32, h
->blockFetchTable
);
4041 for (i
= 0; i
< 8; i
++)
4042 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
4044 /* size of controller ring buffer */
4045 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
4046 writel(1, &h
->transtable
->RepQCount
);
4047 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
4048 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
4049 writel(h
->reply_pool_dhandle
, &h
->transtable
->RepQAddr0Low32
);
4050 writel(0, &h
->transtable
->RepQAddr0High32
);
4051 writel(CFGTBL_Trans_Performant
,
4052 &(h
->cfgtable
->HostWrite
.TransportRequest
));
4053 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
4054 hpsa_wait_for_mode_change_ack(h
);
4055 register_value
= readl(&(h
->cfgtable
->TransportActive
));
4056 if (!(register_value
& CFGTBL_Trans_Performant
)) {
4057 dev_warn(&h
->pdev
->dev
, "unable to get board into"
4058 " performant mode\n");
4063 static __devinit
void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
4067 if (hpsa_simple_mode
)
4070 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
4071 if (!(trans_support
& PERFORMANT_MODE
))
4074 hpsa_get_max_perf_mode_cmds(h
);
4075 h
->max_sg_entries
= 32;
4076 /* Performant mode ring buffer and supporting data structures */
4077 h
->reply_pool_size
= h
->max_commands
* sizeof(u64
);
4078 h
->reply_pool
= pci_alloc_consistent(h
->pdev
, h
->reply_pool_size
,
4079 &(h
->reply_pool_dhandle
));
4081 /* Need a block fetch table for performant mode */
4082 h
->blockFetchTable
= kmalloc(((h
->max_sg_entries
+1) *
4083 sizeof(u32
)), GFP_KERNEL
);
4085 if ((h
->reply_pool
== NULL
)
4086 || (h
->blockFetchTable
== NULL
))
4089 hpsa_enter_performant_mode(h
);
4091 /* Change the access methods to the performant access methods */
4092 h
->access
= SA5_performant_access
;
4093 h
->transMethod
= CFGTBL_Trans_Performant
;
4099 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
4100 h
->reply_pool
, h
->reply_pool_dhandle
);
4101 kfree(h
->blockFetchTable
);
4105 * This is it. Register the PCI driver information for the cards we control
4106 * the OS will call our registered routines when it finds one of our cards.
4108 static int __init
hpsa_init(void)
4110 return pci_register_driver(&hpsa_pci_driver
);
4113 static void __exit
hpsa_cleanup(void)
4115 pci_unregister_driver(&hpsa_pci_driver
);
4118 module_init(hpsa_init
);
4119 module_exit(hpsa_cleanup
);