2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/seq_file.h>
33 #include <linux/init.h>
34 #include <linux/spinlock.h>
35 #include <linux/compat.h>
36 #include <linux/blktrace_api.h>
37 #include <linux/uaccess.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/completion.h>
41 #include <linux/moduleparam.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/kthread.h>
52 #include <linux/jiffies.h>
56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57 #define HPSA_DRIVER_VERSION "3.4.0-1"
58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
61 /* How long to wait (in milliseconds) for board to go into simple mode */
62 #define MAX_CONFIG_WAIT 30000
63 #define MAX_IOCTL_CONFIG_WAIT 1000
65 /*define how many times we will try a command because of bus resets */
66 #define MAX_CMD_RETRIES 3
68 /* Embedded module documentation macros - see modules.h */
69 MODULE_AUTHOR("Hewlett-Packard Company");
70 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
72 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73 MODULE_VERSION(HPSA_DRIVER_VERSION
);
74 MODULE_LICENSE("GPL");
76 static int hpsa_allow_any
;
77 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
78 MODULE_PARM_DESC(hpsa_allow_any
,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
80 static int hpsa_simple_mode
;
81 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
82 MODULE_PARM_DESC(hpsa_simple_mode
,
83 "Use 'simple mode' rather than 'performant mode'");
85 /* define the PCI info for the cards we can control */
86 static const struct pci_device_id hpsa_pci_device_id
[] = {
87 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
92 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324A},
93 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324B},
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x334D},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1925},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1929},
111 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BD},
112 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BE},
113 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BF},
114 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C0},
115 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C1},
116 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C2},
117 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C3},
118 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C4},
119 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C5},
120 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C7},
121 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C8},
122 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C9},
123 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
124 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
128 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
130 /* board_id = Subsystem Device ID & Vendor ID
131 * product = Marketing Name for the board
132 * access = Address of the struct of function pointers
134 static struct board_type products
[] = {
135 {0x3241103C, "Smart Array P212", &SA5_access
},
136 {0x3243103C, "Smart Array P410", &SA5_access
},
137 {0x3245103C, "Smart Array P410i", &SA5_access
},
138 {0x3247103C, "Smart Array P411", &SA5_access
},
139 {0x3249103C, "Smart Array P812", &SA5_access
},
140 {0x324A103C, "Smart Array P712m", &SA5_access
},
141 {0x324B103C, "Smart Array P711m", &SA5_access
},
142 {0x3350103C, "Smart Array P222", &SA5_access
},
143 {0x3351103C, "Smart Array P420", &SA5_access
},
144 {0x3352103C, "Smart Array P421", &SA5_access
},
145 {0x3353103C, "Smart Array P822", &SA5_access
},
146 {0x334D103C, "Smart Array P822se", &SA5_access
},
147 {0x3354103C, "Smart Array P420i", &SA5_access
},
148 {0x3355103C, "Smart Array P220i", &SA5_access
},
149 {0x3356103C, "Smart Array P721m", &SA5_access
},
150 {0x1921103C, "Smart Array P830i", &SA5_access
},
151 {0x1922103C, "Smart Array P430", &SA5_access
},
152 {0x1923103C, "Smart Array P431", &SA5_access
},
153 {0x1924103C, "Smart Array P830", &SA5_access
},
154 {0x1926103C, "Smart Array P731m", &SA5_access
},
155 {0x1928103C, "Smart Array P230i", &SA5_access
},
156 {0x1929103C, "Smart Array P530", &SA5_access
},
157 {0x21BD103C, "Smart Array", &SA5_access
},
158 {0x21BE103C, "Smart Array", &SA5_access
},
159 {0x21BF103C, "Smart Array", &SA5_access
},
160 {0x21C0103C, "Smart Array", &SA5_access
},
161 {0x21C1103C, "Smart Array", &SA5_access
},
162 {0x21C2103C, "Smart Array", &SA5_access
},
163 {0x21C3103C, "Smart Array", &SA5_access
},
164 {0x21C4103C, "Smart Array", &SA5_access
},
165 {0x21C5103C, "Smart Array", &SA5_access
},
166 {0x21C7103C, "Smart Array", &SA5_access
},
167 {0x21C8103C, "Smart Array", &SA5_access
},
168 {0x21C9103C, "Smart Array", &SA5_access
},
169 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
172 static int number_of_controllers
;
174 static struct list_head hpsa_ctlr_list
= LIST_HEAD_INIT(hpsa_ctlr_list
);
175 static spinlock_t lockup_detector_lock
;
176 static struct task_struct
*hpsa_lockup_detector
;
178 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
179 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
180 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
181 static void start_io(struct ctlr_info
*h
);
184 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
187 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
188 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
);
189 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
190 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
);
191 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
192 void *buff
, size_t size
, u8 page_code
, unsigned char *scsi3addr
,
195 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
196 static void hpsa_scan_start(struct Scsi_Host
*);
197 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
198 unsigned long elapsed_time
);
199 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
200 int qdepth
, int reason
);
202 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
203 static int hpsa_eh_abort_handler(struct scsi_cmnd
*scsicmd
);
204 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
205 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
207 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
);
208 static int check_for_unit_attention(struct ctlr_info
*h
,
209 struct CommandList
*c
);
210 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
211 struct CommandList
*c
);
212 /* performant mode helper functions */
213 static void calc_bucket_map(int *bucket
, int num_buckets
,
214 int nsgs
, int *bucket_map
);
215 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
216 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
217 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
218 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
220 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
221 unsigned long *memory_bar
);
222 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
223 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
225 static inline void finish_cmd(struct CommandList
*c
);
226 #define BOARD_NOT_READY 0
227 #define BOARD_READY 1
229 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
231 unsigned long *priv
= shost_priv(sdev
->host
);
232 return (struct ctlr_info
*) *priv
;
235 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
237 unsigned long *priv
= shost_priv(sh
);
238 return (struct ctlr_info
*) *priv
;
241 static int check_for_unit_attention(struct ctlr_info
*h
,
242 struct CommandList
*c
)
244 if (c
->err_info
->SenseInfo
[2] != UNIT_ATTENTION
)
247 switch (c
->err_info
->SenseInfo
[12]) {
249 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a state change "
250 "detected, command retried\n", h
->ctlr
);
253 dev_warn(&h
->pdev
->dev
, HPSA
"%d: LUN failure "
254 "detected, action required\n", h
->ctlr
);
256 case REPORT_LUNS_CHANGED
:
257 dev_warn(&h
->pdev
->dev
, HPSA
"%d: report LUN data "
258 "changed, action required\n", h
->ctlr
);
260 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
261 * target (array) devices.
265 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a power on "
266 "or device reset detected\n", h
->ctlr
);
268 case UNIT_ATTENTION_CLEARED
:
269 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unit attention "
270 "cleared by another initiator\n", h
->ctlr
);
273 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unknown "
274 "unit attention detected\n", h
->ctlr
);
280 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
282 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
283 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
284 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
286 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
290 static ssize_t
host_store_rescan(struct device
*dev
,
291 struct device_attribute
*attr
,
292 const char *buf
, size_t count
)
295 struct Scsi_Host
*shost
= class_to_shost(dev
);
296 h
= shost_to_hba(shost
);
297 hpsa_scan_start(h
->scsi_host
);
301 static ssize_t
host_show_firmware_revision(struct device
*dev
,
302 struct device_attribute
*attr
, char *buf
)
305 struct Scsi_Host
*shost
= class_to_shost(dev
);
306 unsigned char *fwrev
;
308 h
= shost_to_hba(shost
);
309 if (!h
->hba_inquiry_data
)
311 fwrev
= &h
->hba_inquiry_data
[32];
312 return snprintf(buf
, 20, "%c%c%c%c\n",
313 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
316 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
317 struct device_attribute
*attr
, char *buf
)
319 struct Scsi_Host
*shost
= class_to_shost(dev
);
320 struct ctlr_info
*h
= shost_to_hba(shost
);
322 return snprintf(buf
, 20, "%d\n", h
->commands_outstanding
);
325 static ssize_t
host_show_transport_mode(struct device
*dev
,
326 struct device_attribute
*attr
, char *buf
)
329 struct Scsi_Host
*shost
= class_to_shost(dev
);
331 h
= shost_to_hba(shost
);
332 return snprintf(buf
, 20, "%s\n",
333 h
->transMethod
& CFGTBL_Trans_Performant
?
334 "performant" : "simple");
337 /* List of controllers which cannot be hard reset on kexec with reset_devices */
338 static u32 unresettable_controller
[] = {
339 0x324a103C, /* Smart Array P712m */
340 0x324b103C, /* SmartArray P711m */
341 0x3223103C, /* Smart Array P800 */
342 0x3234103C, /* Smart Array P400 */
343 0x3235103C, /* Smart Array P400i */
344 0x3211103C, /* Smart Array E200i */
345 0x3212103C, /* Smart Array E200 */
346 0x3213103C, /* Smart Array E200i */
347 0x3214103C, /* Smart Array E200i */
348 0x3215103C, /* Smart Array E200i */
349 0x3237103C, /* Smart Array E500 */
350 0x323D103C, /* Smart Array P700m */
351 0x40800E11, /* Smart Array 5i */
352 0x409C0E11, /* Smart Array 6400 */
353 0x409D0E11, /* Smart Array 6400 EM */
354 0x40700E11, /* Smart Array 5300 */
355 0x40820E11, /* Smart Array 532 */
356 0x40830E11, /* Smart Array 5312 */
357 0x409A0E11, /* Smart Array 641 */
358 0x409B0E11, /* Smart Array 642 */
359 0x40910E11, /* Smart Array 6i */
362 /* List of controllers which cannot even be soft reset */
363 static u32 soft_unresettable_controller
[] = {
364 0x40800E11, /* Smart Array 5i */
365 0x40700E11, /* Smart Array 5300 */
366 0x40820E11, /* Smart Array 532 */
367 0x40830E11, /* Smart Array 5312 */
368 0x409A0E11, /* Smart Array 641 */
369 0x409B0E11, /* Smart Array 642 */
370 0x40910E11, /* Smart Array 6i */
371 /* Exclude 640x boards. These are two pci devices in one slot
372 * which share a battery backed cache module. One controls the
373 * cache, the other accesses the cache through the one that controls
374 * it. If we reset the one controlling the cache, the other will
375 * likely not be happy. Just forbid resetting this conjoined mess.
376 * The 640x isn't really supported by hpsa anyway.
378 0x409C0E11, /* Smart Array 6400 */
379 0x409D0E11, /* Smart Array 6400 EM */
382 static int ctlr_is_hard_resettable(u32 board_id
)
386 for (i
= 0; i
< ARRAY_SIZE(unresettable_controller
); i
++)
387 if (unresettable_controller
[i
] == board_id
)
392 static int ctlr_is_soft_resettable(u32 board_id
)
396 for (i
= 0; i
< ARRAY_SIZE(soft_unresettable_controller
); i
++)
397 if (soft_unresettable_controller
[i
] == board_id
)
402 static int ctlr_is_resettable(u32 board_id
)
404 return ctlr_is_hard_resettable(board_id
) ||
405 ctlr_is_soft_resettable(board_id
);
408 static ssize_t
host_show_resettable(struct device
*dev
,
409 struct device_attribute
*attr
, char *buf
)
412 struct Scsi_Host
*shost
= class_to_shost(dev
);
414 h
= shost_to_hba(shost
);
415 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
418 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
420 return (scsi3addr
[3] & 0xC0) == 0x40;
423 static const char *raid_label
[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
426 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
428 static ssize_t
raid_level_show(struct device
*dev
,
429 struct device_attribute
*attr
, char *buf
)
432 unsigned char rlevel
;
434 struct scsi_device
*sdev
;
435 struct hpsa_scsi_dev_t
*hdev
;
438 sdev
= to_scsi_device(dev
);
439 h
= sdev_to_hba(sdev
);
440 spin_lock_irqsave(&h
->lock
, flags
);
441 hdev
= sdev
->hostdata
;
443 spin_unlock_irqrestore(&h
->lock
, flags
);
447 /* Is this even a logical drive? */
448 if (!is_logical_dev_addr_mode(hdev
->scsi3addr
)) {
449 spin_unlock_irqrestore(&h
->lock
, flags
);
450 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
454 rlevel
= hdev
->raid_level
;
455 spin_unlock_irqrestore(&h
->lock
, flags
);
456 if (rlevel
> RAID_UNKNOWN
)
457 rlevel
= RAID_UNKNOWN
;
458 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
462 static ssize_t
lunid_show(struct device
*dev
,
463 struct device_attribute
*attr
, char *buf
)
466 struct scsi_device
*sdev
;
467 struct hpsa_scsi_dev_t
*hdev
;
469 unsigned char lunid
[8];
471 sdev
= to_scsi_device(dev
);
472 h
= sdev_to_hba(sdev
);
473 spin_lock_irqsave(&h
->lock
, flags
);
474 hdev
= sdev
->hostdata
;
476 spin_unlock_irqrestore(&h
->lock
, flags
);
479 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
480 spin_unlock_irqrestore(&h
->lock
, flags
);
481 return snprintf(buf
, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
482 lunid
[0], lunid
[1], lunid
[2], lunid
[3],
483 lunid
[4], lunid
[5], lunid
[6], lunid
[7]);
486 static ssize_t
unique_id_show(struct device
*dev
,
487 struct device_attribute
*attr
, char *buf
)
490 struct scsi_device
*sdev
;
491 struct hpsa_scsi_dev_t
*hdev
;
493 unsigned char sn
[16];
495 sdev
= to_scsi_device(dev
);
496 h
= sdev_to_hba(sdev
);
497 spin_lock_irqsave(&h
->lock
, flags
);
498 hdev
= sdev
->hostdata
;
500 spin_unlock_irqrestore(&h
->lock
, flags
);
503 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
504 spin_unlock_irqrestore(&h
->lock
, flags
);
505 return snprintf(buf
, 16 * 2 + 2,
506 "%02X%02X%02X%02X%02X%02X%02X%02X"
507 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
508 sn
[0], sn
[1], sn
[2], sn
[3],
509 sn
[4], sn
[5], sn
[6], sn
[7],
510 sn
[8], sn
[9], sn
[10], sn
[11],
511 sn
[12], sn
[13], sn
[14], sn
[15]);
514 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
515 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
516 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
517 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
518 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
519 host_show_firmware_revision
, NULL
);
520 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
521 host_show_commands_outstanding
, NULL
);
522 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
523 host_show_transport_mode
, NULL
);
524 static DEVICE_ATTR(resettable
, S_IRUGO
,
525 host_show_resettable
, NULL
);
527 static struct device_attribute
*hpsa_sdev_attrs
[] = {
528 &dev_attr_raid_level
,
534 static struct device_attribute
*hpsa_shost_attrs
[] = {
536 &dev_attr_firmware_revision
,
537 &dev_attr_commands_outstanding
,
538 &dev_attr_transport_mode
,
539 &dev_attr_resettable
,
543 static struct scsi_host_template hpsa_driver_template
= {
544 .module
= THIS_MODULE
,
547 .queuecommand
= hpsa_scsi_queue_command
,
548 .scan_start
= hpsa_scan_start
,
549 .scan_finished
= hpsa_scan_finished
,
550 .change_queue_depth
= hpsa_change_queue_depth
,
552 .use_clustering
= ENABLE_CLUSTERING
,
553 .eh_abort_handler
= hpsa_eh_abort_handler
,
554 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
556 .slave_alloc
= hpsa_slave_alloc
,
557 .slave_destroy
= hpsa_slave_destroy
,
559 .compat_ioctl
= hpsa_compat_ioctl
,
561 .sdev_attrs
= hpsa_sdev_attrs
,
562 .shost_attrs
= hpsa_shost_attrs
,
567 /* Enqueuing and dequeuing functions for cmdlists. */
568 static inline void addQ(struct list_head
*list
, struct CommandList
*c
)
570 list_add_tail(&c
->list
, list
);
573 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
576 struct reply_pool
*rq
= &h
->reply_queue
[q
];
579 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
580 return h
->access
.command_completed(h
, q
);
582 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
583 a
= rq
->head
[rq
->current_entry
];
585 spin_lock_irqsave(&h
->lock
, flags
);
586 h
->commands_outstanding
--;
587 spin_unlock_irqrestore(&h
->lock
, flags
);
591 /* Check for wraparound */
592 if (rq
->current_entry
== h
->max_commands
) {
593 rq
->current_entry
= 0;
599 /* set_performant_mode: Modify the tag for cciss performant
600 * set bit 0 for pull model, bits 3-1 for block fetch
603 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
)
605 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
606 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
607 if (likely(h
->msix_vector
))
608 c
->Header
.ReplyQueue
=
609 raw_smp_processor_id() % h
->nreply_queues
;
613 static int is_firmware_flash_cmd(u8
*cdb
)
615 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
619 * During firmware flash, the heartbeat register may not update as frequently
620 * as it should. So we dial down lockup detection during firmware flash. and
621 * dial it back up when firmware flash completes.
623 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
624 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
625 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
626 struct CommandList
*c
)
628 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
630 atomic_inc(&h
->firmware_flash_in_progress
);
631 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
634 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
635 struct CommandList
*c
)
637 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
638 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
639 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
642 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
,
643 struct CommandList
*c
)
647 set_performant_mode(h
, c
);
648 dial_down_lockup_detection_during_fw_flash(h
, c
);
649 spin_lock_irqsave(&h
->lock
, flags
);
652 spin_unlock_irqrestore(&h
->lock
, flags
);
656 static inline void removeQ(struct CommandList
*c
)
658 if (WARN_ON(list_empty(&c
->list
)))
660 list_del_init(&c
->list
);
663 static inline int is_hba_lunid(unsigned char scsi3addr
[])
665 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
668 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
670 if (!h
->hba_inquiry_data
)
672 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
677 static int hpsa_find_target_lun(struct ctlr_info
*h
,
678 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
680 /* finds an unused bus, target, lun for a new physical device
681 * assumes h->devlock is held
684 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
686 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
688 for (i
= 0; i
< h
->ndevices
; i
++) {
689 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
690 __set_bit(h
->dev
[i
]->target
, lun_taken
);
693 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
694 if (i
< HPSA_MAX_DEVICES
) {
703 /* Add an entry into h->dev[] array. */
704 static int hpsa_scsi_add_entry(struct ctlr_info
*h
, int hostno
,
705 struct hpsa_scsi_dev_t
*device
,
706 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
708 /* assumes h->devlock is held */
711 unsigned char addr1
[8], addr2
[8];
712 struct hpsa_scsi_dev_t
*sd
;
714 if (n
>= HPSA_MAX_DEVICES
) {
715 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
720 /* physical devices do not have lun or target assigned until now. */
721 if (device
->lun
!= -1)
722 /* Logical device, lun is already assigned. */
725 /* If this device a non-zero lun of a multi-lun device
726 * byte 4 of the 8-byte LUN addr will contain the logical
727 * unit no, zero otherise.
729 if (device
->scsi3addr
[4] == 0) {
730 /* This is not a non-zero lun of a multi-lun device */
731 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
732 device
->bus
, &device
->target
, &device
->lun
) != 0)
737 /* This is a non-zero lun of a multi-lun device.
738 * Search through our list and find the device which
739 * has the same 8 byte LUN address, excepting byte 4.
740 * Assign the same bus and target for this new LUN.
741 * Use the logical unit number from the firmware.
743 memcpy(addr1
, device
->scsi3addr
, 8);
745 for (i
= 0; i
< n
; i
++) {
747 memcpy(addr2
, sd
->scsi3addr
, 8);
749 /* differ only in byte 4? */
750 if (memcmp(addr1
, addr2
, 8) == 0) {
751 device
->bus
= sd
->bus
;
752 device
->target
= sd
->target
;
753 device
->lun
= device
->scsi3addr
[4];
757 if (device
->lun
== -1) {
758 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
759 " suspect firmware bug or unsupported hardware "
768 added
[*nadded
] = device
;
771 /* initially, (before registering with scsi layer) we don't
772 * know our hostno and we don't want to print anything first
773 * time anyway (the scsi layer's inquiries will show that info)
775 /* if (hostno != -1) */
776 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d added.\n",
777 scsi_device_type(device
->devtype
), hostno
,
778 device
->bus
, device
->target
, device
->lun
);
782 /* Update an entry in h->dev[] array. */
783 static void hpsa_scsi_update_entry(struct ctlr_info
*h
, int hostno
,
784 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
786 /* assumes h->devlock is held */
787 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
789 /* Raid level changed. */
790 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
791 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d updated.\n",
792 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
793 new_entry
->target
, new_entry
->lun
);
796 /* Replace an entry from h->dev[] array. */
797 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
, int hostno
,
798 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
799 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
800 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
802 /* assumes h->devlock is held */
803 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
804 removed
[*nremoved
] = h
->dev
[entry
];
808 * New physical devices won't have target/lun assigned yet
809 * so we need to preserve the values in the slot we are replacing.
811 if (new_entry
->target
== -1) {
812 new_entry
->target
= h
->dev
[entry
]->target
;
813 new_entry
->lun
= h
->dev
[entry
]->lun
;
816 h
->dev
[entry
] = new_entry
;
817 added
[*nadded
] = new_entry
;
819 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d changed.\n",
820 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
821 new_entry
->target
, new_entry
->lun
);
824 /* Remove an entry from h->dev[] array. */
825 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int hostno
, int entry
,
826 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
828 /* assumes h->devlock is held */
830 struct hpsa_scsi_dev_t
*sd
;
832 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
835 removed
[*nremoved
] = h
->dev
[entry
];
838 for (i
= entry
; i
< h
->ndevices
-1; i
++)
839 h
->dev
[i
] = h
->dev
[i
+1];
841 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d removed.\n",
842 scsi_device_type(sd
->devtype
), hostno
, sd
->bus
, sd
->target
,
846 #define SCSI3ADDR_EQ(a, b) ( \
847 (a)[7] == (b)[7] && \
848 (a)[6] == (b)[6] && \
849 (a)[5] == (b)[5] && \
850 (a)[4] == (b)[4] && \
851 (a)[3] == (b)[3] && \
852 (a)[2] == (b)[2] && \
853 (a)[1] == (b)[1] && \
856 static void fixup_botched_add(struct ctlr_info
*h
,
857 struct hpsa_scsi_dev_t
*added
)
859 /* called when scsi_add_device fails in order to re-adjust
860 * h->dev[] to match the mid layer's view.
865 spin_lock_irqsave(&h
->lock
, flags
);
866 for (i
= 0; i
< h
->ndevices
; i
++) {
867 if (h
->dev
[i
] == added
) {
868 for (j
= i
; j
< h
->ndevices
-1; j
++)
869 h
->dev
[j
] = h
->dev
[j
+1];
874 spin_unlock_irqrestore(&h
->lock
, flags
);
878 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
879 struct hpsa_scsi_dev_t
*dev2
)
881 /* we compare everything except lun and target as these
882 * are not yet assigned. Compare parts likely
885 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
886 sizeof(dev1
->scsi3addr
)) != 0)
888 if (memcmp(dev1
->device_id
, dev2
->device_id
,
889 sizeof(dev1
->device_id
)) != 0)
891 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
893 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
895 if (dev1
->devtype
!= dev2
->devtype
)
897 if (dev1
->bus
!= dev2
->bus
)
902 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
903 struct hpsa_scsi_dev_t
*dev2
)
905 /* Device attributes that can change, but don't mean
906 * that the device is a different device, nor that the OS
907 * needs to be told anything about the change.
909 if (dev1
->raid_level
!= dev2
->raid_level
)
914 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
915 * and return needle location in *index. If scsi3addr matches, but not
916 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
917 * location in *index.
918 * In the case of a minor device attribute change, such as RAID level, just
919 * return DEVICE_UPDATED, along with the updated device's location in index.
920 * If needle not found, return DEVICE_NOT_FOUND.
922 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
923 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
927 #define DEVICE_NOT_FOUND 0
928 #define DEVICE_CHANGED 1
929 #define DEVICE_SAME 2
930 #define DEVICE_UPDATED 3
931 for (i
= 0; i
< haystack_size
; i
++) {
932 if (haystack
[i
] == NULL
) /* previously removed. */
934 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
936 if (device_is_the_same(needle
, haystack
[i
])) {
937 if (device_updated(needle
, haystack
[i
]))
938 return DEVICE_UPDATED
;
941 return DEVICE_CHANGED
;
946 return DEVICE_NOT_FOUND
;
949 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
, int hostno
,
950 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
952 /* sd contains scsi3 addresses and devtypes, and inquiry
953 * data. This function takes what's in sd to be the current
954 * reality and updates h->dev[] to reflect that reality.
956 int i
, entry
, device_change
, changes
= 0;
957 struct hpsa_scsi_dev_t
*csd
;
959 struct hpsa_scsi_dev_t
**added
, **removed
;
960 int nadded
, nremoved
;
961 struct Scsi_Host
*sh
= NULL
;
963 added
= kzalloc(sizeof(*added
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
964 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
966 if (!added
|| !removed
) {
967 dev_warn(&h
->pdev
->dev
, "out of memory in "
968 "adjust_hpsa_scsi_table\n");
972 spin_lock_irqsave(&h
->devlock
, flags
);
974 /* find any devices in h->dev[] that are not in
975 * sd[] and remove them from h->dev[], and for any
976 * devices which have changed, remove the old device
977 * info and add the new device info.
978 * If minor device attributes change, just update
979 * the existing device structure.
984 while (i
< h
->ndevices
) {
986 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
987 if (device_change
== DEVICE_NOT_FOUND
) {
989 hpsa_scsi_remove_entry(h
, hostno
, i
,
991 continue; /* remove ^^^, hence i not incremented */
992 } else if (device_change
== DEVICE_CHANGED
) {
994 hpsa_scsi_replace_entry(h
, hostno
, i
, sd
[entry
],
995 added
, &nadded
, removed
, &nremoved
);
996 /* Set it to NULL to prevent it from being freed
997 * at the bottom of hpsa_update_scsi_devices()
1000 } else if (device_change
== DEVICE_UPDATED
) {
1001 hpsa_scsi_update_entry(h
, hostno
, i
, sd
[entry
]);
1006 /* Now, make sure every device listed in sd[] is also
1007 * listed in h->dev[], adding them if they aren't found
1010 for (i
= 0; i
< nsds
; i
++) {
1011 if (!sd
[i
]) /* if already added above. */
1013 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
1014 h
->ndevices
, &entry
);
1015 if (device_change
== DEVICE_NOT_FOUND
) {
1017 if (hpsa_scsi_add_entry(h
, hostno
, sd
[i
],
1018 added
, &nadded
) != 0)
1020 sd
[i
] = NULL
; /* prevent from being freed later. */
1021 } else if (device_change
== DEVICE_CHANGED
) {
1022 /* should never happen... */
1024 dev_warn(&h
->pdev
->dev
,
1025 "device unexpectedly changed.\n");
1026 /* but if it does happen, we just ignore that device */
1029 spin_unlock_irqrestore(&h
->devlock
, flags
);
1031 /* Don't notify scsi mid layer of any changes the first time through
1032 * (or if there are no changes) scsi_scan_host will do it later the
1033 * first time through.
1035 if (hostno
== -1 || !changes
)
1039 /* Notify scsi mid layer of any removed devices */
1040 for (i
= 0; i
< nremoved
; i
++) {
1041 struct scsi_device
*sdev
=
1042 scsi_device_lookup(sh
, removed
[i
]->bus
,
1043 removed
[i
]->target
, removed
[i
]->lun
);
1045 scsi_remove_device(sdev
);
1046 scsi_device_put(sdev
);
1048 /* We don't expect to get here.
1049 * future cmds to this device will get selection
1050 * timeout as if the device was gone.
1052 dev_warn(&h
->pdev
->dev
, "didn't find c%db%dt%dl%d "
1053 " for removal.", hostno
, removed
[i
]->bus
,
1054 removed
[i
]->target
, removed
[i
]->lun
);
1060 /* Notify scsi mid layer of any added devices */
1061 for (i
= 0; i
< nadded
; i
++) {
1062 if (scsi_add_device(sh
, added
[i
]->bus
,
1063 added
[i
]->target
, added
[i
]->lun
) == 0)
1065 dev_warn(&h
->pdev
->dev
, "scsi_add_device c%db%dt%dl%d failed, "
1066 "device not added.\n", hostno
, added
[i
]->bus
,
1067 added
[i
]->target
, added
[i
]->lun
);
1068 /* now we have to remove it from h->dev,
1069 * since it didn't get added to scsi mid layer
1071 fixup_botched_add(h
, added
[i
]);
1080 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1081 * Assume's h->devlock is held.
1083 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
1084 int bus
, int target
, int lun
)
1087 struct hpsa_scsi_dev_t
*sd
;
1089 for (i
= 0; i
< h
->ndevices
; i
++) {
1091 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
1097 /* link sdev->hostdata to our per-device structure. */
1098 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
1100 struct hpsa_scsi_dev_t
*sd
;
1101 unsigned long flags
;
1102 struct ctlr_info
*h
;
1104 h
= sdev_to_hba(sdev
);
1105 spin_lock_irqsave(&h
->devlock
, flags
);
1106 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
1107 sdev_id(sdev
), sdev
->lun
);
1109 sdev
->hostdata
= sd
;
1110 spin_unlock_irqrestore(&h
->devlock
, flags
);
1114 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
1116 /* nothing to do. */
1119 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
1123 if (!h
->cmd_sg_list
)
1125 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1126 kfree(h
->cmd_sg_list
[i
]);
1127 h
->cmd_sg_list
[i
] = NULL
;
1129 kfree(h
->cmd_sg_list
);
1130 h
->cmd_sg_list
= NULL
;
1133 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info
*h
)
1137 if (h
->chainsize
<= 0)
1140 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
1142 if (!h
->cmd_sg_list
)
1144 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1145 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
1146 h
->chainsize
, GFP_KERNEL
);
1147 if (!h
->cmd_sg_list
[i
])
1153 hpsa_free_sg_chain_blocks(h
);
1157 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
1158 struct CommandList
*c
)
1160 struct SGDescriptor
*chain_sg
, *chain_block
;
1163 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1164 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
1165 chain_sg
->Ext
= HPSA_SG_CHAIN
;
1166 chain_sg
->Len
= sizeof(*chain_sg
) *
1167 (c
->Header
.SGTotal
- h
->max_cmd_sg_entries
);
1168 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_sg
->Len
,
1170 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
1171 /* prevent subsequent unmapping */
1172 chain_sg
->Addr
.lower
= 0;
1173 chain_sg
->Addr
.upper
= 0;
1176 chain_sg
->Addr
.lower
= (u32
) (temp64
& 0x0FFFFFFFFULL
);
1177 chain_sg
->Addr
.upper
= (u32
) ((temp64
>> 32) & 0x0FFFFFFFFULL
);
1181 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
1182 struct CommandList
*c
)
1184 struct SGDescriptor
*chain_sg
;
1185 union u64bit temp64
;
1187 if (c
->Header
.SGTotal
<= h
->max_cmd_sg_entries
)
1190 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1191 temp64
.val32
.lower
= chain_sg
->Addr
.lower
;
1192 temp64
.val32
.upper
= chain_sg
->Addr
.upper
;
1193 pci_unmap_single(h
->pdev
, temp64
.val
, chain_sg
->Len
, PCI_DMA_TODEVICE
);
1196 static void complete_scsi_command(struct CommandList
*cp
)
1198 struct scsi_cmnd
*cmd
;
1199 struct ctlr_info
*h
;
1200 struct ErrorInfo
*ei
;
1202 unsigned char sense_key
;
1203 unsigned char asc
; /* additional sense code */
1204 unsigned char ascq
; /* additional sense code qualifier */
1205 unsigned long sense_data_size
;
1208 cmd
= (struct scsi_cmnd
*) cp
->scsi_cmd
;
1211 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
1212 if (cp
->Header
.SGTotal
> h
->max_cmd_sg_entries
)
1213 hpsa_unmap_sg_chain_block(h
, cp
);
1215 cmd
->result
= (DID_OK
<< 16); /* host byte */
1216 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
1217 cmd
->result
|= ei
->ScsiStatus
;
1219 /* copy the sense data whether we need to or not. */
1220 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
1221 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
1223 sense_data_size
= sizeof(ei
->SenseInfo
);
1224 if (ei
->SenseLen
< sense_data_size
)
1225 sense_data_size
= ei
->SenseLen
;
1227 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
1228 scsi_set_resid(cmd
, ei
->ResidualCnt
);
1230 if (ei
->CommandStatus
== 0) {
1232 cmd
->scsi_done(cmd
);
1236 /* an error has occurred */
1237 switch (ei
->CommandStatus
) {
1239 case CMD_TARGET_STATUS
:
1240 if (ei
->ScsiStatus
) {
1242 sense_key
= 0xf & ei
->SenseInfo
[2];
1243 /* Get additional sense code */
1244 asc
= ei
->SenseInfo
[12];
1245 /* Get addition sense code qualifier */
1246 ascq
= ei
->SenseInfo
[13];
1249 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
1250 if (check_for_unit_attention(h
, cp
)) {
1251 cmd
->result
= DID_SOFT_ERROR
<< 16;
1254 if (sense_key
== ILLEGAL_REQUEST
) {
1256 * SCSI REPORT_LUNS is commonly unsupported on
1257 * Smart Array. Suppress noisy complaint.
1259 if (cp
->Request
.CDB
[0] == REPORT_LUNS
)
1262 /* If ASC/ASCQ indicate Logical Unit
1263 * Not Supported condition,
1265 if ((asc
== 0x25) && (ascq
== 0x0)) {
1266 dev_warn(&h
->pdev
->dev
, "cp %p "
1267 "has check condition\n", cp
);
1272 if (sense_key
== NOT_READY
) {
1273 /* If Sense is Not Ready, Logical Unit
1274 * Not ready, Manual Intervention
1277 if ((asc
== 0x04) && (ascq
== 0x03)) {
1278 dev_warn(&h
->pdev
->dev
, "cp %p "
1279 "has check condition: unit "
1280 "not ready, manual "
1281 "intervention required\n", cp
);
1285 if (sense_key
== ABORTED_COMMAND
) {
1286 /* Aborted command is retryable */
1287 dev_warn(&h
->pdev
->dev
, "cp %p "
1288 "has check condition: aborted command: "
1289 "ASC: 0x%x, ASCQ: 0x%x\n",
1291 cmd
->result
= DID_SOFT_ERROR
<< 16;
1294 /* Must be some other type of check condition */
1295 dev_dbg(&h
->pdev
->dev
, "cp %p has check condition: "
1297 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1298 "Returning result: 0x%x, "
1299 "cmd=[%02x %02x %02x %02x %02x "
1300 "%02x %02x %02x %02x %02x %02x "
1301 "%02x %02x %02x %02x %02x]\n",
1302 cp
, sense_key
, asc
, ascq
,
1304 cmd
->cmnd
[0], cmd
->cmnd
[1],
1305 cmd
->cmnd
[2], cmd
->cmnd
[3],
1306 cmd
->cmnd
[4], cmd
->cmnd
[5],
1307 cmd
->cmnd
[6], cmd
->cmnd
[7],
1308 cmd
->cmnd
[8], cmd
->cmnd
[9],
1309 cmd
->cmnd
[10], cmd
->cmnd
[11],
1310 cmd
->cmnd
[12], cmd
->cmnd
[13],
1311 cmd
->cmnd
[14], cmd
->cmnd
[15]);
1316 /* Problem was not a check condition
1317 * Pass it up to the upper layers...
1319 if (ei
->ScsiStatus
) {
1320 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
1321 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1322 "Returning result: 0x%x\n",
1324 sense_key
, asc
, ascq
,
1326 } else { /* scsi status is zero??? How??? */
1327 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
1328 "Returning no connection.\n", cp
),
1330 /* Ordinarily, this case should never happen,
1331 * but there is a bug in some released firmware
1332 * revisions that allows it to happen if, for
1333 * example, a 4100 backplane loses power and
1334 * the tape drive is in it. We assume that
1335 * it's a fatal error of some kind because we
1336 * can't show that it wasn't. We will make it
1337 * look like selection timeout since that is
1338 * the most common reason for this to occur,
1339 * and it's severe enough.
1342 cmd
->result
= DID_NO_CONNECT
<< 16;
1346 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1348 case CMD_DATA_OVERRUN
:
1349 dev_warn(&h
->pdev
->dev
, "cp %p has"
1350 " completed with data overrun "
1354 /* print_bytes(cp, sizeof(*cp), 1, 0);
1356 /* We get CMD_INVALID if you address a non-existent device
1357 * instead of a selection timeout (no response). You will
1358 * see this if you yank out a drive, then try to access it.
1359 * This is kind of a shame because it means that any other
1360 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1361 * missing target. */
1362 cmd
->result
= DID_NO_CONNECT
<< 16;
1365 case CMD_PROTOCOL_ERR
:
1366 cmd
->result
= DID_ERROR
<< 16;
1367 dev_warn(&h
->pdev
->dev
, "cp %p has "
1368 "protocol error\n", cp
);
1370 case CMD_HARDWARE_ERR
:
1371 cmd
->result
= DID_ERROR
<< 16;
1372 dev_warn(&h
->pdev
->dev
, "cp %p had hardware error\n", cp
);
1374 case CMD_CONNECTION_LOST
:
1375 cmd
->result
= DID_ERROR
<< 16;
1376 dev_warn(&h
->pdev
->dev
, "cp %p had connection lost\n", cp
);
1379 cmd
->result
= DID_ABORT
<< 16;
1380 dev_warn(&h
->pdev
->dev
, "cp %p was aborted with status 0x%x\n",
1381 cp
, ei
->ScsiStatus
);
1383 case CMD_ABORT_FAILED
:
1384 cmd
->result
= DID_ERROR
<< 16;
1385 dev_warn(&h
->pdev
->dev
, "cp %p reports abort failed\n", cp
);
1387 case CMD_UNSOLICITED_ABORT
:
1388 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
1389 dev_warn(&h
->pdev
->dev
, "cp %p aborted due to an unsolicited "
1393 cmd
->result
= DID_TIME_OUT
<< 16;
1394 dev_warn(&h
->pdev
->dev
, "cp %p timedout\n", cp
);
1396 case CMD_UNABORTABLE
:
1397 cmd
->result
= DID_ERROR
<< 16;
1398 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
1401 cmd
->result
= DID_ERROR
<< 16;
1402 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
1403 cp
, ei
->CommandStatus
);
1406 cmd
->scsi_done(cmd
);
1409 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
1410 struct CommandList
*c
, int sg_used
, int data_direction
)
1413 union u64bit addr64
;
1415 for (i
= 0; i
< sg_used
; i
++) {
1416 addr64
.val32
.lower
= c
->SG
[i
].Addr
.lower
;
1417 addr64
.val32
.upper
= c
->SG
[i
].Addr
.upper
;
1418 pci_unmap_single(pdev
, (dma_addr_t
) addr64
.val
, c
->SG
[i
].Len
,
1423 static int hpsa_map_one(struct pci_dev
*pdev
,
1424 struct CommandList
*cp
,
1431 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
1432 cp
->Header
.SGList
= 0;
1433 cp
->Header
.SGTotal
= 0;
1437 addr64
= (u64
) pci_map_single(pdev
, buf
, buflen
, data_direction
);
1438 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
1439 /* Prevent subsequent unmap of something never mapped */
1440 cp
->Header
.SGList
= 0;
1441 cp
->Header
.SGTotal
= 0;
1444 cp
->SG
[0].Addr
.lower
=
1445 (u32
) (addr64
& (u64
) 0x00000000FFFFFFFF);
1446 cp
->SG
[0].Addr
.upper
=
1447 (u32
) ((addr64
>> 32) & (u64
) 0x00000000FFFFFFFF);
1448 cp
->SG
[0].Len
= buflen
;
1449 cp
->Header
.SGList
= (u8
) 1; /* no. SGs contig in this cmd */
1450 cp
->Header
.SGTotal
= (u16
) 1; /* total sgs in this cmd list */
1454 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
1455 struct CommandList
*c
)
1457 DECLARE_COMPLETION_ONSTACK(wait
);
1460 enqueue_cmd_and_start_io(h
, c
);
1461 wait_for_completion(&wait
);
1464 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info
*h
,
1465 struct CommandList
*c
)
1467 unsigned long flags
;
1469 /* If controller lockup detected, fake a hardware error. */
1470 spin_lock_irqsave(&h
->lock
, flags
);
1471 if (unlikely(h
->lockup_detected
)) {
1472 spin_unlock_irqrestore(&h
->lock
, flags
);
1473 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
1475 spin_unlock_irqrestore(&h
->lock
, flags
);
1476 hpsa_scsi_do_simple_cmd_core(h
, c
);
1480 #define MAX_DRIVER_CMD_RETRIES 25
1481 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
1482 struct CommandList
*c
, int data_direction
)
1484 int backoff_time
= 10, retry_count
= 0;
1487 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
1488 hpsa_scsi_do_simple_cmd_core(h
, c
);
1490 if (retry_count
> 3) {
1491 msleep(backoff_time
);
1492 if (backoff_time
< 1000)
1495 } while ((check_for_unit_attention(h
, c
) ||
1496 check_for_busy(h
, c
)) &&
1497 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
1498 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
1501 static void hpsa_scsi_interpret_error(struct CommandList
*cp
)
1503 struct ErrorInfo
*ei
;
1504 struct device
*d
= &cp
->h
->pdev
->dev
;
1507 switch (ei
->CommandStatus
) {
1508 case CMD_TARGET_STATUS
:
1509 dev_warn(d
, "cmd %p has completed with errors\n", cp
);
1510 dev_warn(d
, "cmd %p has SCSI Status = %x\n", cp
,
1512 if (ei
->ScsiStatus
== 0)
1513 dev_warn(d
, "SCSI status is abnormally zero. "
1514 "(probably indicates selection timeout "
1515 "reported incorrectly due to a known "
1516 "firmware bug, circa July, 2001.)\n");
1518 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1519 dev_info(d
, "UNDERRUN\n");
1521 case CMD_DATA_OVERRUN
:
1522 dev_warn(d
, "cp %p has completed with data overrun\n", cp
);
1525 /* controller unfortunately reports SCSI passthru's
1526 * to non-existent targets as invalid commands.
1528 dev_warn(d
, "cp %p is reported invalid (probably means "
1529 "target device no longer present)\n", cp
);
1530 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1534 case CMD_PROTOCOL_ERR
:
1535 dev_warn(d
, "cp %p has protocol error \n", cp
);
1537 case CMD_HARDWARE_ERR
:
1538 /* cmd->result = DID_ERROR << 16; */
1539 dev_warn(d
, "cp %p had hardware error\n", cp
);
1541 case CMD_CONNECTION_LOST
:
1542 dev_warn(d
, "cp %p had connection lost\n", cp
);
1545 dev_warn(d
, "cp %p was aborted\n", cp
);
1547 case CMD_ABORT_FAILED
:
1548 dev_warn(d
, "cp %p reports abort failed\n", cp
);
1550 case CMD_UNSOLICITED_ABORT
:
1551 dev_warn(d
, "cp %p aborted due to an unsolicited abort\n", cp
);
1554 dev_warn(d
, "cp %p timed out\n", cp
);
1556 case CMD_UNABORTABLE
:
1557 dev_warn(d
, "Command unabortable\n");
1560 dev_warn(d
, "cp %p returned unknown status %x\n", cp
,
1565 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
1566 unsigned char page
, unsigned char *buf
,
1567 unsigned char bufsize
)
1570 struct CommandList
*c
;
1571 struct ErrorInfo
*ei
;
1573 c
= cmd_special_alloc(h
);
1575 if (c
== NULL
) { /* trouble... */
1576 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1580 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
1581 page
, scsi3addr
, TYPE_CMD
)) {
1585 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
1587 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
1588 hpsa_scsi_interpret_error(c
);
1592 cmd_special_free(h
, c
);
1596 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
)
1599 struct CommandList
*c
;
1600 struct ErrorInfo
*ei
;
1602 c
= cmd_special_alloc(h
);
1604 if (c
== NULL
) { /* trouble... */
1605 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1609 /* fill_cmd can't fail here, no data buffer to map. */
1610 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
,
1611 NULL
, 0, 0, scsi3addr
, TYPE_MSG
);
1612 hpsa_scsi_do_simple_cmd_core(h
, c
);
1613 /* no unmap needed here because no data xfer. */
1616 if (ei
->CommandStatus
!= 0) {
1617 hpsa_scsi_interpret_error(c
);
1620 cmd_special_free(h
, c
);
1624 static void hpsa_get_raid_level(struct ctlr_info
*h
,
1625 unsigned char *scsi3addr
, unsigned char *raid_level
)
1630 *raid_level
= RAID_UNKNOWN
;
1631 buf
= kzalloc(64, GFP_KERNEL
);
1634 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, 0xC1, buf
, 64);
1636 *raid_level
= buf
[8];
1637 if (*raid_level
> RAID_UNKNOWN
)
1638 *raid_level
= RAID_UNKNOWN
;
1643 /* Get the device id from inquiry page 0x83 */
1644 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
1645 unsigned char *device_id
, int buflen
)
1652 buf
= kzalloc(64, GFP_KERNEL
);
1655 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, 0x83, buf
, 64);
1657 memcpy(device_id
, &buf
[8], buflen
);
1662 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
1663 struct ReportLUNdata
*buf
, int bufsize
,
1664 int extended_response
)
1667 struct CommandList
*c
;
1668 unsigned char scsi3addr
[8];
1669 struct ErrorInfo
*ei
;
1671 c
= cmd_special_alloc(h
);
1672 if (c
== NULL
) { /* trouble... */
1673 dev_err(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1676 /* address the controller */
1677 memset(scsi3addr
, 0, sizeof(scsi3addr
));
1678 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
1679 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
1683 if (extended_response
)
1684 c
->Request
.CDB
[1] = extended_response
;
1685 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
1687 if (ei
->CommandStatus
!= 0 &&
1688 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
1689 hpsa_scsi_interpret_error(c
);
1693 cmd_special_free(h
, c
);
1697 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
1698 struct ReportLUNdata
*buf
,
1699 int bufsize
, int extended_response
)
1701 return hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
, extended_response
);
1704 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
1705 struct ReportLUNdata
*buf
, int bufsize
)
1707 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
1710 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
1711 int bus
, int target
, int lun
)
1714 device
->target
= target
;
1718 static int hpsa_update_device_info(struct ctlr_info
*h
,
1719 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
1720 unsigned char *is_OBDR_device
)
1723 #define OBDR_SIG_OFFSET 43
1724 #define OBDR_TAPE_SIG "$DR-10"
1725 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1726 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1728 unsigned char *inq_buff
;
1729 unsigned char *obdr_sig
;
1731 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
1735 /* Do an inquiry to the device to see what it is. */
1736 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
1737 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
1738 /* Inquiry failed (msg printed already) */
1739 dev_err(&h
->pdev
->dev
,
1740 "hpsa_update_device_info: inquiry failed\n");
1744 this_device
->devtype
= (inq_buff
[0] & 0x1f);
1745 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
1746 memcpy(this_device
->vendor
, &inq_buff
[8],
1747 sizeof(this_device
->vendor
));
1748 memcpy(this_device
->model
, &inq_buff
[16],
1749 sizeof(this_device
->model
));
1750 memset(this_device
->device_id
, 0,
1751 sizeof(this_device
->device_id
));
1752 hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
,
1753 sizeof(this_device
->device_id
));
1755 if (this_device
->devtype
== TYPE_DISK
&&
1756 is_logical_dev_addr_mode(scsi3addr
))
1757 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
1759 this_device
->raid_level
= RAID_UNKNOWN
;
1761 if (is_OBDR_device
) {
1762 /* See if this is a One-Button-Disaster-Recovery device
1763 * by looking for "$DR-10" at offset 43 in inquiry data.
1765 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
1766 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
1767 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
1768 OBDR_SIG_LEN
) == 0);
1779 static unsigned char *ext_target_model
[] = {
1788 static int is_ext_target(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
1792 for (i
= 0; ext_target_model
[i
]; i
++)
1793 if (strncmp(device
->model
, ext_target_model
[i
],
1794 strlen(ext_target_model
[i
])) == 0)
1799 /* Helper function to assign bus, target, lun mapping of devices.
1800 * Puts non-external target logical volumes on bus 0, external target logical
1801 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1802 * Logical drive target and lun are assigned at this time, but
1803 * physical device lun and target assignment are deferred (assigned
1804 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1806 static void figure_bus_target_lun(struct ctlr_info
*h
,
1807 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
1809 u32 lunid
= le32_to_cpu(*((__le32
*) lunaddrbytes
));
1811 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
1812 /* physical device, target and lun filled in later */
1813 if (is_hba_lunid(lunaddrbytes
))
1814 hpsa_set_bus_target_lun(device
, 3, 0, lunid
& 0x3fff);
1816 /* defer target, lun assignment for physical devices */
1817 hpsa_set_bus_target_lun(device
, 2, -1, -1);
1820 /* It's a logical device */
1821 if (is_ext_target(h
, device
)) {
1822 /* external target way, put logicals on bus 1
1823 * and match target/lun numbers box
1824 * reports, other smart array, bus 0, target 0, match lunid
1826 hpsa_set_bus_target_lun(device
,
1827 1, (lunid
>> 16) & 0x3fff, lunid
& 0x00ff);
1830 hpsa_set_bus_target_lun(device
, 0, 0, lunid
& 0x3fff);
1834 * If there is no lun 0 on a target, linux won't find any devices.
1835 * For the external targets (arrays), we have to manually detect the enclosure
1836 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1837 * it for some reason. *tmpdevice is the target we're adding,
1838 * this_device is a pointer into the current element of currentsd[]
1839 * that we're building up in update_scsi_devices(), below.
1840 * lunzerobits is a bitmap that tracks which targets already have a
1842 * Returns 1 if an enclosure was added, 0 if not.
1844 static int add_ext_target_dev(struct ctlr_info
*h
,
1845 struct hpsa_scsi_dev_t
*tmpdevice
,
1846 struct hpsa_scsi_dev_t
*this_device
, u8
*lunaddrbytes
,
1847 unsigned long lunzerobits
[], int *n_ext_target_devs
)
1849 unsigned char scsi3addr
[8];
1851 if (test_bit(tmpdevice
->target
, lunzerobits
))
1852 return 0; /* There is already a lun 0 on this target. */
1854 if (!is_logical_dev_addr_mode(lunaddrbytes
))
1855 return 0; /* It's the logical targets that may lack lun 0. */
1857 if (!is_ext_target(h
, tmpdevice
))
1858 return 0; /* Only external target devices have this problem. */
1860 if (tmpdevice
->lun
== 0) /* if lun is 0, then we have a lun 0. */
1863 memset(scsi3addr
, 0, 8);
1864 scsi3addr
[3] = tmpdevice
->target
;
1865 if (is_hba_lunid(scsi3addr
))
1866 return 0; /* Don't add the RAID controller here. */
1868 if (is_scsi_rev_5(h
))
1869 return 0; /* p1210m doesn't need to do this. */
1871 if (*n_ext_target_devs
>= MAX_EXT_TARGETS
) {
1872 dev_warn(&h
->pdev
->dev
, "Maximum number of external "
1873 "target devices exceeded. Check your hardware "
1878 if (hpsa_update_device_info(h
, scsi3addr
, this_device
, NULL
))
1880 (*n_ext_target_devs
)++;
1881 hpsa_set_bus_target_lun(this_device
,
1882 tmpdevice
->bus
, tmpdevice
->target
, 0);
1883 set_bit(tmpdevice
->target
, lunzerobits
);
1888 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1889 * logdev. The number of luns in physdev and logdev are returned in
1890 * *nphysicals and *nlogicals, respectively.
1891 * Returns 0 on success, -1 otherwise.
1893 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
1895 struct ReportLUNdata
*physdev
, u32
*nphysicals
,
1896 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
1898 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, reportlunsize
, 0)) {
1899 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
1902 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 8;
1903 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
1904 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded."
1905 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
1906 *nphysicals
- HPSA_MAX_PHYS_LUN
);
1907 *nphysicals
= HPSA_MAX_PHYS_LUN
;
1909 if (hpsa_scsi_do_report_log_luns(h
, logdev
, reportlunsize
)) {
1910 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
1913 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
1914 /* Reject Logicals in excess of our max capability. */
1915 if (*nlogicals
> HPSA_MAX_LUN
) {
1916 dev_warn(&h
->pdev
->dev
,
1917 "maximum logical LUNs (%d) exceeded. "
1918 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
1919 *nlogicals
- HPSA_MAX_LUN
);
1920 *nlogicals
= HPSA_MAX_LUN
;
1922 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
1923 dev_warn(&h
->pdev
->dev
,
1924 "maximum logical + physical LUNs (%d) exceeded. "
1925 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
1926 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
1927 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
1932 u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
, int i
,
1933 int nphysicals
, int nlogicals
, struct ReportLUNdata
*physdev_list
,
1934 struct ReportLUNdata
*logdev_list
)
1936 /* Helper function, figure out where the LUN ID info is coming from
1937 * given index i, lists of physical and logical devices, where in
1938 * the list the raid controller is supposed to appear (first or last)
1941 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
1942 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
1944 if (i
== raid_ctlr_position
)
1945 return RAID_CTLR_LUNID
;
1947 if (i
< logicals_start
)
1948 return &physdev_list
->LUN
[i
- (raid_ctlr_position
== 0)][0];
1950 if (i
< last_device
)
1951 return &logdev_list
->LUN
[i
- nphysicals
-
1952 (raid_ctlr_position
== 0)][0];
1957 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
)
1959 /* the idea here is we could get notified
1960 * that some devices have changed, so we do a report
1961 * physical luns and report logical luns cmd, and adjust
1962 * our list of devices accordingly.
1964 * The scsi3addr's of devices won't change so long as the
1965 * adapter is not reset. That means we can rescan and
1966 * tell which devices we already know about, vs. new
1967 * devices, vs. disappearing devices.
1969 struct ReportLUNdata
*physdev_list
= NULL
;
1970 struct ReportLUNdata
*logdev_list
= NULL
;
1973 u32 ndev_allocated
= 0;
1974 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
1976 int reportlunsize
= sizeof(*physdev_list
) + HPSA_MAX_PHYS_LUN
* 8;
1977 int i
, n_ext_target_devs
, ndevs_to_allocate
;
1978 int raid_ctlr_position
;
1979 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
1981 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1982 physdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
1983 logdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
1984 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
1986 if (!currentsd
|| !physdev_list
|| !logdev_list
|| !tmpdevice
) {
1987 dev_err(&h
->pdev
->dev
, "out of memory\n");
1990 memset(lunzerobits
, 0, sizeof(lunzerobits
));
1992 if (hpsa_gather_lun_info(h
, reportlunsize
, physdev_list
, &nphysicals
,
1993 logdev_list
, &nlogicals
))
1996 /* We might see up to the maximum number of logical and physical disks
1997 * plus external target devices, and a device for the local RAID
2000 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
2002 /* Allocate the per device structures */
2003 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
2004 if (i
>= HPSA_MAX_DEVICES
) {
2005 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
2006 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
2007 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
2011 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
2012 if (!currentsd
[i
]) {
2013 dev_warn(&h
->pdev
->dev
, "out of memory at %s:%d\n",
2014 __FILE__
, __LINE__
);
2020 if (unlikely(is_scsi_rev_5(h
)))
2021 raid_ctlr_position
= 0;
2023 raid_ctlr_position
= nphysicals
+ nlogicals
;
2025 /* adjust our table of devices */
2026 n_ext_target_devs
= 0;
2027 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
2028 u8
*lunaddrbytes
, is_OBDR
= 0;
2030 /* Figure out where the LUN ID info is coming from */
2031 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
2032 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
2033 /* skip masked physical devices. */
2034 if (lunaddrbytes
[3] & 0xC0 &&
2035 i
< nphysicals
+ (raid_ctlr_position
== 0))
2038 /* Get device type, vendor, model, device id */
2039 if (hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
2041 continue; /* skip it if we can't talk to it. */
2042 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
2043 this_device
= currentsd
[ncurrent
];
2046 * For external target devices, we have to insert a LUN 0 which
2047 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
2048 * is nonetheless an enclosure device there. We have to
2049 * present that otherwise linux won't find anything if
2050 * there is no lun 0.
2052 if (add_ext_target_dev(h
, tmpdevice
, this_device
,
2053 lunaddrbytes
, lunzerobits
,
2054 &n_ext_target_devs
)) {
2056 this_device
= currentsd
[ncurrent
];
2059 *this_device
= *tmpdevice
;
2061 switch (this_device
->devtype
) {
2063 /* We don't *really* support actual CD-ROM devices,
2064 * just "One Button Disaster Recovery" tape drive
2065 * which temporarily pretends to be a CD-ROM drive.
2066 * So we check that the device is really an OBDR tape
2067 * device by checking for "$DR-10" in bytes 43-48 of
2079 case TYPE_MEDIUM_CHANGER
:
2083 /* Only present the Smartarray HBA as a RAID controller.
2084 * If it's a RAID controller other than the HBA itself
2085 * (an external RAID controller, MSA500 or similar)
2088 if (!is_hba_lunid(lunaddrbytes
))
2095 if (ncurrent
>= HPSA_MAX_DEVICES
)
2098 adjust_hpsa_scsi_table(h
, hostno
, currentsd
, ncurrent
);
2101 for (i
= 0; i
< ndev_allocated
; i
++)
2102 kfree(currentsd
[i
]);
2104 kfree(physdev_list
);
2108 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
2109 * dma mapping and fills in the scatter gather entries of the
2112 static int hpsa_scatter_gather(struct ctlr_info
*h
,
2113 struct CommandList
*cp
,
2114 struct scsi_cmnd
*cmd
)
2117 struct scatterlist
*sg
;
2119 int use_sg
, i
, sg_index
, chained
;
2120 struct SGDescriptor
*curr_sg
;
2122 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
2124 use_sg
= scsi_dma_map(cmd
);
2129 goto sglist_finished
;
2134 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
2135 if (i
== h
->max_cmd_sg_entries
- 1 &&
2136 use_sg
> h
->max_cmd_sg_entries
) {
2138 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
2141 addr64
= (u64
) sg_dma_address(sg
);
2142 len
= sg_dma_len(sg
);
2143 curr_sg
->Addr
.lower
= (u32
) (addr64
& 0x0FFFFFFFFULL
);
2144 curr_sg
->Addr
.upper
= (u32
) ((addr64
>> 32) & 0x0FFFFFFFFULL
);
2146 curr_sg
->Ext
= 0; /* we are not chaining */
2150 if (use_sg
+ chained
> h
->maxSG
)
2151 h
->maxSG
= use_sg
+ chained
;
2154 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
2155 cp
->Header
.SGTotal
= (u16
) (use_sg
+ 1);
2156 if (hpsa_map_sg_chain_block(h
, cp
)) {
2157 scsi_dma_unmap(cmd
);
2165 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
2166 cp
->Header
.SGTotal
= (u16
) use_sg
; /* total sgs in this cmd list */
2171 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd
*cmd
,
2172 void (*done
)(struct scsi_cmnd
*))
2174 struct ctlr_info
*h
;
2175 struct hpsa_scsi_dev_t
*dev
;
2176 unsigned char scsi3addr
[8];
2177 struct CommandList
*c
;
2178 unsigned long flags
;
2180 /* Get the ptr to our adapter structure out of cmd->host. */
2181 h
= sdev_to_hba(cmd
->device
);
2182 dev
= cmd
->device
->hostdata
;
2184 cmd
->result
= DID_NO_CONNECT
<< 16;
2188 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
2190 spin_lock_irqsave(&h
->lock
, flags
);
2191 if (unlikely(h
->lockup_detected
)) {
2192 spin_unlock_irqrestore(&h
->lock
, flags
);
2193 cmd
->result
= DID_ERROR
<< 16;
2197 spin_unlock_irqrestore(&h
->lock
, flags
);
2199 if (c
== NULL
) { /* trouble... */
2200 dev_err(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
2201 return SCSI_MLQUEUE_HOST_BUSY
;
2204 /* Fill in the command list header */
2206 cmd
->scsi_done
= done
; /* save this for use by completion code */
2208 /* save c in case we have to abort it */
2209 cmd
->host_scribble
= (unsigned char *) c
;
2211 c
->cmd_type
= CMD_SCSI
;
2213 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
2214 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
2215 c
->Header
.Tag
.lower
= (c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
2216 c
->Header
.Tag
.lower
|= DIRECT_LOOKUP_BIT
;
2218 /* Fill in the request block... */
2220 c
->Request
.Timeout
= 0;
2221 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
2222 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
2223 c
->Request
.CDBLen
= cmd
->cmd_len
;
2224 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
2225 c
->Request
.Type
.Type
= TYPE_CMD
;
2226 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2227 switch (cmd
->sc_data_direction
) {
2229 c
->Request
.Type
.Direction
= XFER_WRITE
;
2231 case DMA_FROM_DEVICE
:
2232 c
->Request
.Type
.Direction
= XFER_READ
;
2235 c
->Request
.Type
.Direction
= XFER_NONE
;
2237 case DMA_BIDIRECTIONAL
:
2238 /* This can happen if a buggy application does a scsi passthru
2239 * and sets both inlen and outlen to non-zero. ( see
2240 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2243 c
->Request
.Type
.Direction
= XFER_RSVD
;
2244 /* This is technically wrong, and hpsa controllers should
2245 * reject it with CMD_INVALID, which is the most correct
2246 * response, but non-fibre backends appear to let it
2247 * slide by, and give the same results as if this field
2248 * were set correctly. Either way is acceptable for
2249 * our purposes here.
2255 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
2256 cmd
->sc_data_direction
);
2261 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
2263 return SCSI_MLQUEUE_HOST_BUSY
;
2265 enqueue_cmd_and_start_io(h
, c
);
2266 /* the cmd'll come back via intr handler in complete_scsi_command() */
2270 static DEF_SCSI_QCMD(hpsa_scsi_queue_command
)
2272 static void hpsa_scan_start(struct Scsi_Host
*sh
)
2274 struct ctlr_info
*h
= shost_to_hba(sh
);
2275 unsigned long flags
;
2277 /* wait until any scan already in progress is finished. */
2279 spin_lock_irqsave(&h
->scan_lock
, flags
);
2280 if (h
->scan_finished
)
2282 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2283 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
2284 /* Note: We don't need to worry about a race between this
2285 * thread and driver unload because the midlayer will
2286 * have incremented the reference count, so unload won't
2287 * happen if we're in here.
2290 h
->scan_finished
= 0; /* mark scan as in progress */
2291 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2293 hpsa_update_scsi_devices(h
, h
->scsi_host
->host_no
);
2295 spin_lock_irqsave(&h
->scan_lock
, flags
);
2296 h
->scan_finished
= 1; /* mark scan as finished. */
2297 wake_up_all(&h
->scan_wait_queue
);
2298 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2301 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
2302 unsigned long elapsed_time
)
2304 struct ctlr_info
*h
= shost_to_hba(sh
);
2305 unsigned long flags
;
2308 spin_lock_irqsave(&h
->scan_lock
, flags
);
2309 finished
= h
->scan_finished
;
2310 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2314 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
2315 int qdepth
, int reason
)
2317 struct ctlr_info
*h
= sdev_to_hba(sdev
);
2319 if (reason
!= SCSI_QDEPTH_DEFAULT
)
2325 if (qdepth
> h
->nr_cmds
)
2326 qdepth
= h
->nr_cmds
;
2327 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
2328 return sdev
->queue_depth
;
2331 static void hpsa_unregister_scsi(struct ctlr_info
*h
)
2333 /* we are being forcibly unloaded, and may not refuse. */
2334 scsi_remove_host(h
->scsi_host
);
2335 scsi_host_put(h
->scsi_host
);
2336 h
->scsi_host
= NULL
;
2339 static int hpsa_register_scsi(struct ctlr_info
*h
)
2341 struct Scsi_Host
*sh
;
2344 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
2351 sh
->max_channel
= 3;
2352 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
2353 sh
->max_lun
= HPSA_MAX_LUN
;
2354 sh
->max_id
= HPSA_MAX_LUN
;
2355 sh
->can_queue
= h
->nr_cmds
;
2356 sh
->cmd_per_lun
= h
->nr_cmds
;
2357 sh
->sg_tablesize
= h
->maxsgentries
;
2359 sh
->hostdata
[0] = (unsigned long) h
;
2360 sh
->irq
= h
->intr
[h
->intr_mode
];
2361 sh
->unique_id
= sh
->irq
;
2362 error
= scsi_add_host(sh
, &h
->pdev
->dev
);
2369 dev_err(&h
->pdev
->dev
, "%s: scsi_add_host"
2370 " failed for controller %d\n", __func__
, h
->ctlr
);
2374 dev_err(&h
->pdev
->dev
, "%s: scsi_host_alloc"
2375 " failed for controller %d\n", __func__
, h
->ctlr
);
2379 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
2380 unsigned char lunaddr
[])
2384 int waittime
= 1; /* seconds */
2385 struct CommandList
*c
;
2387 c
= cmd_special_alloc(h
);
2389 dev_warn(&h
->pdev
->dev
, "out of memory in "
2390 "wait_for_device_to_become_ready.\n");
2394 /* Send test unit ready until device ready, or give up. */
2395 while (count
< HPSA_TUR_RETRY_LIMIT
) {
2397 /* Wait for a bit. do this first, because if we send
2398 * the TUR right away, the reset will just abort it.
2400 msleep(1000 * waittime
);
2403 /* Increase wait time with each try, up to a point. */
2404 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
2405 waittime
= waittime
* 2;
2407 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
2408 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
2409 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
2410 hpsa_scsi_do_simple_cmd_core(h
, c
);
2411 /* no unmap needed here because no data xfer. */
2413 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
2416 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
2417 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
2418 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
2419 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
2422 dev_warn(&h
->pdev
->dev
, "waiting %d secs "
2423 "for device to become ready.\n", waittime
);
2424 rc
= 1; /* device not ready. */
2428 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
2430 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
2432 cmd_special_free(h
, c
);
2436 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2437 * complaining. Doing a host- or bus-reset can't do anything good here.
2439 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
2442 struct ctlr_info
*h
;
2443 struct hpsa_scsi_dev_t
*dev
;
2445 /* find the controller to which the command to be aborted was sent */
2446 h
= sdev_to_hba(scsicmd
->device
);
2447 if (h
== NULL
) /* paranoia */
2449 dev
= scsicmd
->device
->hostdata
;
2451 dev_err(&h
->pdev
->dev
, "hpsa_eh_device_reset_handler: "
2452 "device lookup failed.\n");
2455 dev_warn(&h
->pdev
->dev
, "resetting device %d:%d:%d:%d\n",
2456 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
2457 /* send a reset to the SCSI LUN which the command was sent to */
2458 rc
= hpsa_send_reset(h
, dev
->scsi3addr
);
2459 if (rc
== 0 && wait_for_device_to_become_ready(h
, dev
->scsi3addr
) == 0)
2462 dev_warn(&h
->pdev
->dev
, "resetting device failed.\n");
2466 static void swizzle_abort_tag(u8
*tag
)
2470 memcpy(original_tag
, tag
, 8);
2471 tag
[0] = original_tag
[3];
2472 tag
[1] = original_tag
[2];
2473 tag
[2] = original_tag
[1];
2474 tag
[3] = original_tag
[0];
2475 tag
[4] = original_tag
[7];
2476 tag
[5] = original_tag
[6];
2477 tag
[6] = original_tag
[5];
2478 tag
[7] = original_tag
[4];
2481 static int hpsa_send_abort(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2482 struct CommandList
*abort
, int swizzle
)
2485 struct CommandList
*c
;
2486 struct ErrorInfo
*ei
;
2488 c
= cmd_special_alloc(h
);
2489 if (c
== NULL
) { /* trouble... */
2490 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
2494 /* fill_cmd can't fail here, no buffer to map */
2495 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, abort
,
2496 0, 0, scsi3addr
, TYPE_MSG
);
2498 swizzle_abort_tag(&c
->Request
.CDB
[4]);
2499 hpsa_scsi_do_simple_cmd_core(h
, c
);
2500 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2501 __func__
, abort
->Header
.Tag
.upper
, abort
->Header
.Tag
.lower
);
2502 /* no unmap needed here because no data xfer. */
2505 switch (ei
->CommandStatus
) {
2508 case CMD_UNABORTABLE
: /* Very common, don't make noise. */
2512 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: interpreting error.\n",
2513 __func__
, abort
->Header
.Tag
.upper
,
2514 abort
->Header
.Tag
.lower
);
2515 hpsa_scsi_interpret_error(c
);
2519 cmd_special_free(h
, c
);
2520 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n", __func__
,
2521 abort
->Header
.Tag
.upper
, abort
->Header
.Tag
.lower
);
2526 * hpsa_find_cmd_in_queue
2528 * Used to determine whether a command (find) is still present
2529 * in queue_head. Optionally excludes the last element of queue_head.
2531 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
2532 * not yet been submitted, and so can be aborted by the driver without
2533 * sending an abort to the hardware.
2535 * Returns pointer to command if found in queue, NULL otherwise.
2537 static struct CommandList
*hpsa_find_cmd_in_queue(struct ctlr_info
*h
,
2538 struct scsi_cmnd
*find
, struct list_head
*queue_head
)
2540 unsigned long flags
;
2541 struct CommandList
*c
= NULL
; /* ptr into cmpQ */
2545 spin_lock_irqsave(&h
->lock
, flags
);
2546 list_for_each_entry(c
, queue_head
, list
) {
2547 if (c
->scsi_cmd
== NULL
) /* e.g.: passthru ioctl */
2549 if (c
->scsi_cmd
== find
) {
2550 spin_unlock_irqrestore(&h
->lock
, flags
);
2554 spin_unlock_irqrestore(&h
->lock
, flags
);
2558 static struct CommandList
*hpsa_find_cmd_in_queue_by_tag(struct ctlr_info
*h
,
2559 u8
*tag
, struct list_head
*queue_head
)
2561 unsigned long flags
;
2562 struct CommandList
*c
;
2564 spin_lock_irqsave(&h
->lock
, flags
);
2565 list_for_each_entry(c
, queue_head
, list
) {
2566 if (memcmp(&c
->Header
.Tag
, tag
, 8) != 0)
2568 spin_unlock_irqrestore(&h
->lock
, flags
);
2571 spin_unlock_irqrestore(&h
->lock
, flags
);
2575 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
2576 * tell which kind we're dealing with, so we send the abort both ways. There
2577 * shouldn't be any collisions between swizzled and unswizzled tags due to the
2578 * way we construct our tags but we check anyway in case the assumptions which
2579 * make this true someday become false.
2581 static int hpsa_send_abort_both_ways(struct ctlr_info
*h
,
2582 unsigned char *scsi3addr
, struct CommandList
*abort
)
2585 struct CommandList
*c
;
2586 int rc
= 0, rc2
= 0;
2588 /* we do not expect to find the swizzled tag in our queue, but
2589 * check anyway just to be sure the assumptions which make this
2590 * the case haven't become wrong.
2592 memcpy(swizzled_tag
, &abort
->Request
.CDB
[4], 8);
2593 swizzle_abort_tag(swizzled_tag
);
2594 c
= hpsa_find_cmd_in_queue_by_tag(h
, swizzled_tag
, &h
->cmpQ
);
2596 dev_warn(&h
->pdev
->dev
, "Unexpectedly found byte-swapped tag in completion queue.\n");
2597 return hpsa_send_abort(h
, scsi3addr
, abort
, 0);
2599 rc
= hpsa_send_abort(h
, scsi3addr
, abort
, 0);
2601 /* if the command is still in our queue, we can't conclude that it was
2602 * aborted (it might have just completed normally) but in any case
2603 * we don't need to try to abort it another way.
2605 c
= hpsa_find_cmd_in_queue(h
, abort
->scsi_cmd
, &h
->cmpQ
);
2607 rc2
= hpsa_send_abort(h
, scsi3addr
, abort
, 1);
2611 /* Send an abort for the specified command.
2612 * If the device and controller support it,
2613 * send a task abort request.
2615 static int hpsa_eh_abort_handler(struct scsi_cmnd
*sc
)
2619 struct ctlr_info
*h
;
2620 struct hpsa_scsi_dev_t
*dev
;
2621 struct CommandList
*abort
; /* pointer to command to be aborted */
2622 struct CommandList
*found
;
2623 struct scsi_cmnd
*as
; /* ptr to scsi cmd inside aborted command. */
2624 char msg
[256]; /* For debug messaging. */
2627 /* Find the controller of the command to be aborted */
2628 h
= sdev_to_hba(sc
->device
);
2630 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
2633 /* Check that controller supports some kind of task abort */
2634 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
) &&
2635 !(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
2638 memset(msg
, 0, sizeof(msg
));
2639 ml
+= sprintf(msg
+ml
, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
2640 h
->scsi_host
->host_no
, sc
->device
->channel
,
2641 sc
->device
->id
, sc
->device
->lun
);
2643 /* Find the device of the command to be aborted */
2644 dev
= sc
->device
->hostdata
;
2646 dev_err(&h
->pdev
->dev
, "%s FAILED, Device lookup failed.\n",
2651 /* Get SCSI command to be aborted */
2652 abort
= (struct CommandList
*) sc
->host_scribble
;
2653 if (abort
== NULL
) {
2654 dev_err(&h
->pdev
->dev
, "%s FAILED, Command to abort is NULL.\n",
2659 ml
+= sprintf(msg
+ml
, "Tag:0x%08x:%08x ",
2660 abort
->Header
.Tag
.upper
, abort
->Header
.Tag
.lower
);
2661 as
= (struct scsi_cmnd
*) abort
->scsi_cmd
;
2663 ml
+= sprintf(msg
+ml
, "Command:0x%x SN:0x%lx ",
2664 as
->cmnd
[0], as
->serial_number
);
2665 dev_dbg(&h
->pdev
->dev
, "%s\n", msg
);
2666 dev_warn(&h
->pdev
->dev
, "Abort request on C%d:B%d:T%d:L%d\n",
2667 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
2669 /* Search reqQ to See if command is queued but not submitted,
2670 * if so, complete the command with aborted status and remove
2673 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->reqQ
);
2675 found
->err_info
->CommandStatus
= CMD_ABORTED
;
2677 dev_info(&h
->pdev
->dev
, "%s Request SUCCEEDED (driver queue).\n",
2682 /* not in reqQ, if also not in cmpQ, must have already completed */
2683 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->cmpQ
);
2685 dev_dbg(&h
->pdev
->dev
, "%s Request SUCCEEDED (not known to driver).\n",
2691 * Command is in flight, or possibly already completed
2692 * by the firmware (but not to the scsi mid layer) but we can't
2693 * distinguish which. Send the abort down.
2695 rc
= hpsa_send_abort_both_ways(h
, dev
->scsi3addr
, abort
);
2697 dev_dbg(&h
->pdev
->dev
, "%s Request FAILED.\n", msg
);
2698 dev_warn(&h
->pdev
->dev
, "FAILED abort on device C%d:B%d:T%d:L%d\n",
2699 h
->scsi_host
->host_no
,
2700 dev
->bus
, dev
->target
, dev
->lun
);
2703 dev_info(&h
->pdev
->dev
, "%s REQUEST SUCCEEDED.\n", msg
);
2705 /* If the abort(s) above completed and actually aborted the
2706 * command, then the command to be aborted should already be
2707 * completed. If not, wait around a bit more to see if they
2708 * manage to complete normally.
2710 #define ABORT_COMPLETE_WAIT_SECS 30
2711 for (i
= 0; i
< ABORT_COMPLETE_WAIT_SECS
* 10; i
++) {
2712 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->cmpQ
);
2717 dev_warn(&h
->pdev
->dev
, "%s FAILED. Aborted command has not completed after %d seconds.\n",
2718 msg
, ABORT_COMPLETE_WAIT_SECS
);
2724 * For operations that cannot sleep, a command block is allocated at init,
2725 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2726 * which ones are free or in use. Lock must be held when calling this.
2727 * cmd_free() is the complement.
2729 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
2731 struct CommandList
*c
;
2733 union u64bit temp64
;
2734 dma_addr_t cmd_dma_handle
, err_dma_handle
;
2735 unsigned long flags
;
2737 spin_lock_irqsave(&h
->lock
, flags
);
2739 i
= find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
);
2740 if (i
== h
->nr_cmds
) {
2741 spin_unlock_irqrestore(&h
->lock
, flags
);
2744 } while (test_and_set_bit
2745 (i
& (BITS_PER_LONG
- 1),
2746 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
)) != 0);
2747 spin_unlock_irqrestore(&h
->lock
, flags
);
2749 c
= h
->cmd_pool
+ i
;
2750 memset(c
, 0, sizeof(*c
));
2751 cmd_dma_handle
= h
->cmd_pool_dhandle
2753 c
->err_info
= h
->errinfo_pool
+ i
;
2754 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2755 err_dma_handle
= h
->errinfo_pool_dhandle
2756 + i
* sizeof(*c
->err_info
);
2760 INIT_LIST_HEAD(&c
->list
);
2761 c
->busaddr
= (u32
) cmd_dma_handle
;
2762 temp64
.val
= (u64
) err_dma_handle
;
2763 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
2764 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
2765 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
2771 /* For operations that can wait for kmalloc to possibly sleep,
2772 * this routine can be called. Lock need not be held to call
2773 * cmd_special_alloc. cmd_special_free() is the complement.
2775 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
)
2777 struct CommandList
*c
;
2778 union u64bit temp64
;
2779 dma_addr_t cmd_dma_handle
, err_dma_handle
;
2781 c
= pci_alloc_consistent(h
->pdev
, sizeof(*c
), &cmd_dma_handle
);
2784 memset(c
, 0, sizeof(*c
));
2788 c
->err_info
= pci_alloc_consistent(h
->pdev
, sizeof(*c
->err_info
),
2791 if (c
->err_info
== NULL
) {
2792 pci_free_consistent(h
->pdev
,
2793 sizeof(*c
), c
, cmd_dma_handle
);
2796 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2798 INIT_LIST_HEAD(&c
->list
);
2799 c
->busaddr
= (u32
) cmd_dma_handle
;
2800 temp64
.val
= (u64
) err_dma_handle
;
2801 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
2802 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
2803 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
2809 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
2812 unsigned long flags
;
2814 i
= c
- h
->cmd_pool
;
2815 spin_lock_irqsave(&h
->lock
, flags
);
2816 clear_bit(i
& (BITS_PER_LONG
- 1),
2817 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
2818 spin_unlock_irqrestore(&h
->lock
, flags
);
2821 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
)
2823 union u64bit temp64
;
2825 temp64
.val32
.lower
= c
->ErrDesc
.Addr
.lower
;
2826 temp64
.val32
.upper
= c
->ErrDesc
.Addr
.upper
;
2827 pci_free_consistent(h
->pdev
, sizeof(*c
->err_info
),
2828 c
->err_info
, (dma_addr_t
) temp64
.val
);
2829 pci_free_consistent(h
->pdev
, sizeof(*c
),
2830 c
, (dma_addr_t
) (c
->busaddr
& DIRECT_LOOKUP_MASK
));
2833 #ifdef CONFIG_COMPAT
2835 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
, void *arg
)
2837 IOCTL32_Command_struct __user
*arg32
=
2838 (IOCTL32_Command_struct __user
*) arg
;
2839 IOCTL_Command_struct arg64
;
2840 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
2844 memset(&arg64
, 0, sizeof(arg64
));
2846 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
2847 sizeof(arg64
.LUN_info
));
2848 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
2849 sizeof(arg64
.Request
));
2850 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
2851 sizeof(arg64
.error_info
));
2852 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
2853 err
|= get_user(cp
, &arg32
->buf
);
2854 arg64
.buf
= compat_ptr(cp
);
2855 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
2860 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, (void *)p
);
2863 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
2864 sizeof(arg32
->error_info
));
2870 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
2873 BIG_IOCTL32_Command_struct __user
*arg32
=
2874 (BIG_IOCTL32_Command_struct __user
*) arg
;
2875 BIG_IOCTL_Command_struct arg64
;
2876 BIG_IOCTL_Command_struct __user
*p
=
2877 compat_alloc_user_space(sizeof(arg64
));
2881 memset(&arg64
, 0, sizeof(arg64
));
2883 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
2884 sizeof(arg64
.LUN_info
));
2885 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
2886 sizeof(arg64
.Request
));
2887 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
2888 sizeof(arg64
.error_info
));
2889 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
2890 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
2891 err
|= get_user(cp
, &arg32
->buf
);
2892 arg64
.buf
= compat_ptr(cp
);
2893 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
2898 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, (void *)p
);
2901 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
2902 sizeof(arg32
->error_info
));
2908 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
2911 case CCISS_GETPCIINFO
:
2912 case CCISS_GETINTINFO
:
2913 case CCISS_SETINTINFO
:
2914 case CCISS_GETNODENAME
:
2915 case CCISS_SETNODENAME
:
2916 case CCISS_GETHEARTBEAT
:
2917 case CCISS_GETBUSTYPES
:
2918 case CCISS_GETFIRMVER
:
2919 case CCISS_GETDRIVVER
:
2920 case CCISS_REVALIDVOLS
:
2921 case CCISS_DEREGDISK
:
2922 case CCISS_REGNEWDISK
:
2924 case CCISS_RESCANDISK
:
2925 case CCISS_GETLUNINFO
:
2926 return hpsa_ioctl(dev
, cmd
, arg
);
2928 case CCISS_PASSTHRU32
:
2929 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
2930 case CCISS_BIG_PASSTHRU32
:
2931 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
2934 return -ENOIOCTLCMD
;
2939 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2941 struct hpsa_pci_info pciinfo
;
2945 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
2946 pciinfo
.bus
= h
->pdev
->bus
->number
;
2947 pciinfo
.dev_fn
= h
->pdev
->devfn
;
2948 pciinfo
.board_id
= h
->board_id
;
2949 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
2954 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2956 DriverVer_type DriverVer
;
2957 unsigned char vmaj
, vmin
, vsubmin
;
2960 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
2961 &vmaj
, &vmin
, &vsubmin
);
2963 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
2964 "unrecognized.", HPSA_DRIVER_VERSION
);
2969 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
2972 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
2977 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2979 IOCTL_Command_struct iocommand
;
2980 struct CommandList
*c
;
2982 union u64bit temp64
;
2987 if (!capable(CAP_SYS_RAWIO
))
2989 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
2991 if ((iocommand
.buf_size
< 1) &&
2992 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
2995 if (iocommand
.buf_size
> 0) {
2996 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
2999 if (iocommand
.Request
.Type
.Direction
== XFER_WRITE
) {
3000 /* Copy the data into the buffer we created */
3001 if (copy_from_user(buff
, iocommand
.buf
,
3002 iocommand
.buf_size
)) {
3007 memset(buff
, 0, iocommand
.buf_size
);
3010 c
= cmd_special_alloc(h
);
3015 /* Fill in the command type */
3016 c
->cmd_type
= CMD_IOCTL_PEND
;
3017 /* Fill in Command Header */
3018 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
3019 if (iocommand
.buf_size
> 0) { /* buffer to fill */
3020 c
->Header
.SGList
= 1;
3021 c
->Header
.SGTotal
= 1;
3022 } else { /* no buffers to fill */
3023 c
->Header
.SGList
= 0;
3024 c
->Header
.SGTotal
= 0;
3026 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
3027 /* use the kernel address the cmd block for tag */
3028 c
->Header
.Tag
.lower
= c
->busaddr
;
3030 /* Fill in Request block */
3031 memcpy(&c
->Request
, &iocommand
.Request
,
3032 sizeof(c
->Request
));
3034 /* Fill in the scatter gather information */
3035 if (iocommand
.buf_size
> 0) {
3036 temp64
.val
= pci_map_single(h
->pdev
, buff
,
3037 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
3038 if (dma_mapping_error(&h
->pdev
->dev
, temp64
.val
)) {
3039 c
->SG
[0].Addr
.lower
= 0;
3040 c
->SG
[0].Addr
.upper
= 0;
3045 c
->SG
[0].Addr
.lower
= temp64
.val32
.lower
;
3046 c
->SG
[0].Addr
.upper
= temp64
.val32
.upper
;
3047 c
->SG
[0].Len
= iocommand
.buf_size
;
3048 c
->SG
[0].Ext
= 0; /* we are not chaining*/
3050 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
3051 if (iocommand
.buf_size
> 0)
3052 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
3053 check_ioctl_unit_attention(h
, c
);
3055 /* Copy the error information out */
3056 memcpy(&iocommand
.error_info
, c
->err_info
,
3057 sizeof(iocommand
.error_info
));
3058 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
3062 if (iocommand
.Request
.Type
.Direction
== XFER_READ
&&
3063 iocommand
.buf_size
> 0) {
3064 /* Copy the data out of the buffer we created */
3065 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
3071 cmd_special_free(h
, c
);
3077 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
3079 BIG_IOCTL_Command_struct
*ioc
;
3080 struct CommandList
*c
;
3081 unsigned char **buff
= NULL
;
3082 int *buff_size
= NULL
;
3083 union u64bit temp64
;
3089 BYTE __user
*data_ptr
;
3093 if (!capable(CAP_SYS_RAWIO
))
3095 ioc
= (BIG_IOCTL_Command_struct
*)
3096 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
3101 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
3105 if ((ioc
->buf_size
< 1) &&
3106 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
3110 /* Check kmalloc limits using all SGs */
3111 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
3115 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
3119 buff
= kzalloc(SG_ENTRIES_IN_CMD
* sizeof(char *), GFP_KERNEL
);
3124 buff_size
= kmalloc(SG_ENTRIES_IN_CMD
* sizeof(int), GFP_KERNEL
);
3129 left
= ioc
->buf_size
;
3130 data_ptr
= ioc
->buf
;
3132 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
3133 buff_size
[sg_used
] = sz
;
3134 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
3135 if (buff
[sg_used
] == NULL
) {
3139 if (ioc
->Request
.Type
.Direction
== XFER_WRITE
) {
3140 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
3145 memset(buff
[sg_used
], 0, sz
);
3150 c
= cmd_special_alloc(h
);
3155 c
->cmd_type
= CMD_IOCTL_PEND
;
3156 c
->Header
.ReplyQueue
= 0;
3157 c
->Header
.SGList
= c
->Header
.SGTotal
= sg_used
;
3158 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
3159 c
->Header
.Tag
.lower
= c
->busaddr
;
3160 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
3161 if (ioc
->buf_size
> 0) {
3163 for (i
= 0; i
< sg_used
; i
++) {
3164 temp64
.val
= pci_map_single(h
->pdev
, buff
[i
],
3165 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
3166 if (dma_mapping_error(&h
->pdev
->dev
, temp64
.val
)) {
3167 c
->SG
[i
].Addr
.lower
= 0;
3168 c
->SG
[i
].Addr
.upper
= 0;
3170 hpsa_pci_unmap(h
->pdev
, c
, i
,
3171 PCI_DMA_BIDIRECTIONAL
);
3175 c
->SG
[i
].Addr
.lower
= temp64
.val32
.lower
;
3176 c
->SG
[i
].Addr
.upper
= temp64
.val32
.upper
;
3177 c
->SG
[i
].Len
= buff_size
[i
];
3178 /* we are not chaining */
3182 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
3184 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
3185 check_ioctl_unit_attention(h
, c
);
3186 /* Copy the error information out */
3187 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
3188 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
3189 cmd_special_free(h
, c
);
3193 if (ioc
->Request
.Type
.Direction
== XFER_READ
&& ioc
->buf_size
> 0) {
3194 /* Copy the data out of the buffer we created */
3195 BYTE __user
*ptr
= ioc
->buf
;
3196 for (i
= 0; i
< sg_used
; i
++) {
3197 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
3198 cmd_special_free(h
, c
);
3202 ptr
+= buff_size
[i
];
3205 cmd_special_free(h
, c
);
3209 for (i
= 0; i
< sg_used
; i
++)
3218 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
3219 struct CommandList
*c
)
3221 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
3222 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
3223 (void) check_for_unit_attention(h
, c
);
3228 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
3230 struct ctlr_info
*h
;
3231 void __user
*argp
= (void __user
*)arg
;
3233 h
= sdev_to_hba(dev
);
3236 case CCISS_DEREGDISK
:
3237 case CCISS_REGNEWDISK
:
3239 hpsa_scan_start(h
->scsi_host
);
3241 case CCISS_GETPCIINFO
:
3242 return hpsa_getpciinfo_ioctl(h
, argp
);
3243 case CCISS_GETDRIVVER
:
3244 return hpsa_getdrivver_ioctl(h
, argp
);
3245 case CCISS_PASSTHRU
:
3246 return hpsa_passthru_ioctl(h
, argp
);
3247 case CCISS_BIG_PASSTHRU
:
3248 return hpsa_big_passthru_ioctl(h
, argp
);
3254 static int hpsa_send_host_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3257 struct CommandList
*c
;
3262 /* fill_cmd can't fail here, no data buffer to map */
3263 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
3264 RAID_CTLR_LUNID
, TYPE_MSG
);
3265 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
3267 enqueue_cmd_and_start_io(h
, c
);
3268 /* Don't wait for completion, the reset won't complete. Don't free
3269 * the command either. This is the last command we will send before
3270 * re-initializing everything, so it doesn't matter and won't leak.
3275 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
3276 void *buff
, size_t size
, u8 page_code
, unsigned char *scsi3addr
,
3279 int pci_dir
= XFER_NONE
;
3280 struct CommandList
*a
; /* for commands to be aborted */
3282 c
->cmd_type
= CMD_IOCTL_PEND
;
3283 c
->Header
.ReplyQueue
= 0;
3284 if (buff
!= NULL
&& size
> 0) {
3285 c
->Header
.SGList
= 1;
3286 c
->Header
.SGTotal
= 1;
3288 c
->Header
.SGList
= 0;
3289 c
->Header
.SGTotal
= 0;
3291 c
->Header
.Tag
.lower
= c
->busaddr
;
3292 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
3294 c
->Request
.Type
.Type
= cmd_type
;
3295 if (cmd_type
== TYPE_CMD
) {
3298 /* are we trying to read a vital product page */
3299 if (page_code
!= 0) {
3300 c
->Request
.CDB
[1] = 0x01;
3301 c
->Request
.CDB
[2] = page_code
;
3303 c
->Request
.CDBLen
= 6;
3304 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3305 c
->Request
.Type
.Direction
= XFER_READ
;
3306 c
->Request
.Timeout
= 0;
3307 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
3308 c
->Request
.CDB
[4] = size
& 0xFF;
3310 case HPSA_REPORT_LOG
:
3311 case HPSA_REPORT_PHYS
:
3312 /* Talking to controller so It's a physical command
3313 mode = 00 target = 0. Nothing to write.
3315 c
->Request
.CDBLen
= 12;
3316 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3317 c
->Request
.Type
.Direction
= XFER_READ
;
3318 c
->Request
.Timeout
= 0;
3319 c
->Request
.CDB
[0] = cmd
;
3320 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
3321 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
3322 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
3323 c
->Request
.CDB
[9] = size
& 0xFF;
3325 case HPSA_CACHE_FLUSH
:
3326 c
->Request
.CDBLen
= 12;
3327 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3328 c
->Request
.Type
.Direction
= XFER_WRITE
;
3329 c
->Request
.Timeout
= 0;
3330 c
->Request
.CDB
[0] = BMIC_WRITE
;
3331 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
3332 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
3333 c
->Request
.CDB
[8] = size
& 0xFF;
3335 case TEST_UNIT_READY
:
3336 c
->Request
.CDBLen
= 6;
3337 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3338 c
->Request
.Type
.Direction
= XFER_NONE
;
3339 c
->Request
.Timeout
= 0;
3342 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
3346 } else if (cmd_type
== TYPE_MSG
) {
3349 case HPSA_DEVICE_RESET_MSG
:
3350 c
->Request
.CDBLen
= 16;
3351 c
->Request
.Type
.Type
= 1; /* It is a MSG not a CMD */
3352 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3353 c
->Request
.Type
.Direction
= XFER_NONE
;
3354 c
->Request
.Timeout
= 0; /* Don't time out */
3355 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
3356 c
->Request
.CDB
[0] = cmd
;
3357 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
3358 /* If bytes 4-7 are zero, it means reset the */
3360 c
->Request
.CDB
[4] = 0x00;
3361 c
->Request
.CDB
[5] = 0x00;
3362 c
->Request
.CDB
[6] = 0x00;
3363 c
->Request
.CDB
[7] = 0x00;
3365 case HPSA_ABORT_MSG
:
3366 a
= buff
; /* point to command to be aborted */
3367 dev_dbg(&h
->pdev
->dev
, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
3368 a
->Header
.Tag
.upper
, a
->Header
.Tag
.lower
,
3369 c
->Header
.Tag
.upper
, c
->Header
.Tag
.lower
);
3370 c
->Request
.CDBLen
= 16;
3371 c
->Request
.Type
.Type
= TYPE_MSG
;
3372 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3373 c
->Request
.Type
.Direction
= XFER_WRITE
;
3374 c
->Request
.Timeout
= 0; /* Don't time out */
3375 c
->Request
.CDB
[0] = HPSA_TASK_MANAGEMENT
;
3376 c
->Request
.CDB
[1] = HPSA_TMF_ABORT_TASK
;
3377 c
->Request
.CDB
[2] = 0x00; /* reserved */
3378 c
->Request
.CDB
[3] = 0x00; /* reserved */
3379 /* Tag to abort goes in CDB[4]-CDB[11] */
3380 c
->Request
.CDB
[4] = a
->Header
.Tag
.lower
& 0xFF;
3381 c
->Request
.CDB
[5] = (a
->Header
.Tag
.lower
>> 8) & 0xFF;
3382 c
->Request
.CDB
[6] = (a
->Header
.Tag
.lower
>> 16) & 0xFF;
3383 c
->Request
.CDB
[7] = (a
->Header
.Tag
.lower
>> 24) & 0xFF;
3384 c
->Request
.CDB
[8] = a
->Header
.Tag
.upper
& 0xFF;
3385 c
->Request
.CDB
[9] = (a
->Header
.Tag
.upper
>> 8) & 0xFF;
3386 c
->Request
.CDB
[10] = (a
->Header
.Tag
.upper
>> 16) & 0xFF;
3387 c
->Request
.CDB
[11] = (a
->Header
.Tag
.upper
>> 24) & 0xFF;
3388 c
->Request
.CDB
[12] = 0x00; /* reserved */
3389 c
->Request
.CDB
[13] = 0x00; /* reserved */
3390 c
->Request
.CDB
[14] = 0x00; /* reserved */
3391 c
->Request
.CDB
[15] = 0x00; /* reserved */
3394 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
3399 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
3403 switch (c
->Request
.Type
.Direction
) {
3405 pci_dir
= PCI_DMA_FROMDEVICE
;
3408 pci_dir
= PCI_DMA_TODEVICE
;
3411 pci_dir
= PCI_DMA_NONE
;
3414 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
3416 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
))
3422 * Map (physical) PCI mem into (virtual) kernel space
3424 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
3426 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
3427 ulong page_offs
= ((ulong
) base
) - page_base
;
3428 void __iomem
*page_remapped
= ioremap_nocache(page_base
,
3431 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
3434 /* Takes cmds off the submission queue and sends them to the hardware,
3435 * then puts them on the queue of cmds waiting for completion.
3437 static void start_io(struct ctlr_info
*h
)
3439 struct CommandList
*c
;
3440 unsigned long flags
;
3442 spin_lock_irqsave(&h
->lock
, flags
);
3443 while (!list_empty(&h
->reqQ
)) {
3444 c
= list_entry(h
->reqQ
.next
, struct CommandList
, list
);
3445 /* can't do anything if fifo is full */
3446 if ((h
->access
.fifo_full(h
))) {
3447 dev_warn(&h
->pdev
->dev
, "fifo full\n");
3451 /* Get the first entry from the Request Q */
3455 /* Put job onto the completed Q */
3458 /* Must increment commands_outstanding before unlocking
3459 * and submitting to avoid race checking for fifo full
3462 h
->commands_outstanding
++;
3463 if (h
->commands_outstanding
> h
->max_outstanding
)
3464 h
->max_outstanding
= h
->commands_outstanding
;
3466 /* Tell the controller execute command */
3467 spin_unlock_irqrestore(&h
->lock
, flags
);
3468 h
->access
.submit_command(h
, c
);
3469 spin_lock_irqsave(&h
->lock
, flags
);
3471 spin_unlock_irqrestore(&h
->lock
, flags
);
3474 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
3476 return h
->access
.command_completed(h
, q
);
3479 static inline bool interrupt_pending(struct ctlr_info
*h
)
3481 return h
->access
.intr_pending(h
);
3484 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
3486 return (h
->access
.intr_pending(h
) == 0) ||
3487 (h
->interrupts_enabled
== 0);
3490 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
3493 if (unlikely(tag_index
>= h
->nr_cmds
)) {
3494 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
3500 static inline void finish_cmd(struct CommandList
*c
)
3502 unsigned long flags
;
3504 spin_lock_irqsave(&c
->h
->lock
, flags
);
3506 spin_unlock_irqrestore(&c
->h
->lock
, flags
);
3507 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
3508 if (likely(c
->cmd_type
== CMD_SCSI
))
3509 complete_scsi_command(c
);
3510 else if (c
->cmd_type
== CMD_IOCTL_PEND
)
3511 complete(c
->waiting
);
3514 static inline u32
hpsa_tag_contains_index(u32 tag
)
3516 return tag
& DIRECT_LOOKUP_BIT
;
3519 static inline u32
hpsa_tag_to_index(u32 tag
)
3521 return tag
>> DIRECT_LOOKUP_SHIFT
;
3525 static inline u32
hpsa_tag_discard_error_bits(struct ctlr_info
*h
, u32 tag
)
3527 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3528 #define HPSA_SIMPLE_ERROR_BITS 0x03
3529 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
3530 return tag
& ~HPSA_SIMPLE_ERROR_BITS
;
3531 return tag
& ~HPSA_PERF_ERROR_BITS
;
3534 /* process completion of an indexed ("direct lookup") command */
3535 static inline void process_indexed_cmd(struct ctlr_info
*h
,
3539 struct CommandList
*c
;
3541 tag_index
= hpsa_tag_to_index(raw_tag
);
3542 if (!bad_tag(h
, tag_index
, raw_tag
)) {
3543 c
= h
->cmd_pool
+ tag_index
;
3548 /* process completion of a non-indexed command */
3549 static inline void process_nonindexed_cmd(struct ctlr_info
*h
,
3553 struct CommandList
*c
= NULL
;
3554 unsigned long flags
;
3556 tag
= hpsa_tag_discard_error_bits(h
, raw_tag
);
3557 spin_lock_irqsave(&h
->lock
, flags
);
3558 list_for_each_entry(c
, &h
->cmpQ
, list
) {
3559 if ((c
->busaddr
& 0xFFFFFFE0) == (tag
& 0xFFFFFFE0)) {
3560 spin_unlock_irqrestore(&h
->lock
, flags
);
3565 spin_unlock_irqrestore(&h
->lock
, flags
);
3566 bad_tag(h
, h
->nr_cmds
+ 1, raw_tag
);
3569 /* Some controllers, like p400, will give us one interrupt
3570 * after a soft reset, even if we turned interrupts off.
3571 * Only need to check for this in the hpsa_xxx_discard_completions
3574 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
3576 if (likely(!reset_devices
))
3579 if (likely(h
->interrupts_enabled
))
3582 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
3583 "(known firmware bug.) Ignoring.\n");
3589 * Convert &h->q[x] (passed to interrupt handlers) back to h.
3590 * Relies on (h-q[x] == x) being true for x such that
3591 * 0 <= x < MAX_REPLY_QUEUES.
3593 static struct ctlr_info
*queue_to_hba(u8
*queue
)
3595 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
3598 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
3600 struct ctlr_info
*h
= queue_to_hba(queue
);
3601 u8 q
= *(u8
*) queue
;
3604 if (ignore_bogus_interrupt(h
))
3607 if (interrupt_not_for_us(h
))
3609 h
->last_intr_timestamp
= get_jiffies_64();
3610 while (interrupt_pending(h
)) {
3611 raw_tag
= get_next_completion(h
, q
);
3612 while (raw_tag
!= FIFO_EMPTY
)
3613 raw_tag
= next_command(h
, q
);
3618 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
3620 struct ctlr_info
*h
= queue_to_hba(queue
);
3622 u8 q
= *(u8
*) queue
;
3624 if (ignore_bogus_interrupt(h
))
3627 h
->last_intr_timestamp
= get_jiffies_64();
3628 raw_tag
= get_next_completion(h
, q
);
3629 while (raw_tag
!= FIFO_EMPTY
)
3630 raw_tag
= next_command(h
, q
);
3634 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
3636 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
3638 u8 q
= *(u8
*) queue
;
3640 if (interrupt_not_for_us(h
))
3642 h
->last_intr_timestamp
= get_jiffies_64();
3643 while (interrupt_pending(h
)) {
3644 raw_tag
= get_next_completion(h
, q
);
3645 while (raw_tag
!= FIFO_EMPTY
) {
3646 if (likely(hpsa_tag_contains_index(raw_tag
)))
3647 process_indexed_cmd(h
, raw_tag
);
3649 process_nonindexed_cmd(h
, raw_tag
);
3650 raw_tag
= next_command(h
, q
);
3656 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
3658 struct ctlr_info
*h
= queue_to_hba(queue
);
3660 u8 q
= *(u8
*) queue
;
3662 h
->last_intr_timestamp
= get_jiffies_64();
3663 raw_tag
= get_next_completion(h
, q
);
3664 while (raw_tag
!= FIFO_EMPTY
) {
3665 if (likely(hpsa_tag_contains_index(raw_tag
)))
3666 process_indexed_cmd(h
, raw_tag
);
3668 process_nonindexed_cmd(h
, raw_tag
);
3669 raw_tag
= next_command(h
, q
);
3674 /* Send a message CDB to the firmware. Careful, this only works
3675 * in simple mode, not performant mode due to the tag lookup.
3676 * We only ever use this immediately after a controller reset.
3678 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
3682 struct CommandListHeader CommandHeader
;
3683 struct RequestBlock Request
;
3684 struct ErrDescriptor ErrorDescriptor
;
3686 struct Command
*cmd
;
3687 static const size_t cmd_sz
= sizeof(*cmd
) +
3688 sizeof(cmd
->ErrorDescriptor
);
3690 uint32_t paddr32
, tag
;
3691 void __iomem
*vaddr
;
3694 vaddr
= pci_ioremap_bar(pdev
, 0);
3698 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3699 * CCISS commands, so they must be allocated from the lower 4GiB of
3702 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3708 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
3714 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3715 * although there's no guarantee, we assume that the address is at
3716 * least 4-byte aligned (most likely, it's page-aligned).
3720 cmd
->CommandHeader
.ReplyQueue
= 0;
3721 cmd
->CommandHeader
.SGList
= 0;
3722 cmd
->CommandHeader
.SGTotal
= 0;
3723 cmd
->CommandHeader
.Tag
.lower
= paddr32
;
3724 cmd
->CommandHeader
.Tag
.upper
= 0;
3725 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
3727 cmd
->Request
.CDBLen
= 16;
3728 cmd
->Request
.Type
.Type
= TYPE_MSG
;
3729 cmd
->Request
.Type
.Attribute
= ATTR_HEADOFQUEUE
;
3730 cmd
->Request
.Type
.Direction
= XFER_NONE
;
3731 cmd
->Request
.Timeout
= 0; /* Don't time out */
3732 cmd
->Request
.CDB
[0] = opcode
;
3733 cmd
->Request
.CDB
[1] = type
;
3734 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
3735 cmd
->ErrorDescriptor
.Addr
.lower
= paddr32
+ sizeof(*cmd
);
3736 cmd
->ErrorDescriptor
.Addr
.upper
= 0;
3737 cmd
->ErrorDescriptor
.Len
= sizeof(struct ErrorInfo
);
3739 writel(paddr32
, vaddr
+ SA5_REQUEST_PORT_OFFSET
);
3741 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
3742 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
3743 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr32
)
3745 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
3750 /* we leak the DMA buffer here ... no choice since the controller could
3751 * still complete the command.
3753 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
3754 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
3759 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
3761 if (tag
& HPSA_ERROR_BIT
) {
3762 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
3767 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
3772 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3774 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
3775 void * __iomem vaddr
, u32 use_doorbell
)
3781 /* For everything after the P600, the PCI power state method
3782 * of resetting the controller doesn't work, so we have this
3783 * other way using the doorbell register.
3785 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
3786 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
3787 } else { /* Try to do it the PCI power state way */
3789 /* Quoting from the Open CISS Specification: "The Power
3790 * Management Control/Status Register (CSR) controls the power
3791 * state of the device. The normal operating state is D0,
3792 * CSR=00h. The software off state is D3, CSR=03h. To reset
3793 * the controller, place the interface device in D3 then to D0,
3794 * this causes a secondary PCI reset which will reset the
3797 pos
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
3800 "hpsa_reset_controller: "
3801 "PCI PM not supported\n");
3804 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
3805 /* enter the D3hot power management state */
3806 pci_read_config_word(pdev
, pos
+ PCI_PM_CTRL
, &pmcsr
);
3807 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3809 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
3813 /* enter the D0 power management state */
3814 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3816 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
3819 * The P600 requires a small delay when changing states.
3820 * Otherwise we may think the board did not reset and we bail.
3821 * This for kdump only and is particular to the P600.
3828 static void init_driver_version(char *driver_version
, int len
)
3830 memset(driver_version
, 0, len
);
3831 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
3834 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
3836 char *driver_version
;
3837 int i
, size
= sizeof(cfgtable
->driver_version
);
3839 driver_version
= kmalloc(size
, GFP_KERNEL
);
3840 if (!driver_version
)
3843 init_driver_version(driver_version
, size
);
3844 for (i
= 0; i
< size
; i
++)
3845 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
3846 kfree(driver_version
);
3850 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
3851 unsigned char *driver_ver
)
3855 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
3856 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
3859 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
3862 char *driver_ver
, *old_driver_ver
;
3863 int rc
, size
= sizeof(cfgtable
->driver_version
);
3865 old_driver_ver
= kmalloc(2 * size
, GFP_KERNEL
);
3866 if (!old_driver_ver
)
3868 driver_ver
= old_driver_ver
+ size
;
3870 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3871 * should have been changed, otherwise we know the reset failed.
3873 init_driver_version(old_driver_ver
, size
);
3874 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
3875 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
3876 kfree(old_driver_ver
);
3879 /* This does a hard reset of the controller using PCI power management
3880 * states or the using the doorbell register.
3882 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
)
3886 u64 cfg_base_addr_index
;
3887 void __iomem
*vaddr
;
3888 unsigned long paddr
;
3889 u32 misc_fw_support
;
3891 struct CfgTable __iomem
*cfgtable
;
3894 u16 command_register
;
3896 /* For controllers as old as the P600, this is very nearly
3899 * pci_save_state(pci_dev);
3900 * pci_set_power_state(pci_dev, PCI_D3hot);
3901 * pci_set_power_state(pci_dev, PCI_D0);
3902 * pci_restore_state(pci_dev);
3904 * For controllers newer than the P600, the pci power state
3905 * method of resetting doesn't work so we have another way
3906 * using the doorbell register.
3909 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
3910 if (rc
< 0 || !ctlr_is_resettable(board_id
)) {
3911 dev_warn(&pdev
->dev
, "Not resetting device.\n");
3915 /* if controller is soft- but not hard resettable... */
3916 if (!ctlr_is_hard_resettable(board_id
))
3917 return -ENOTSUPP
; /* try soft reset later. */
3919 /* Save the PCI command register */
3920 pci_read_config_word(pdev
, 4, &command_register
);
3921 /* Turn the board off. This is so that later pci_restore_state()
3922 * won't turn the board on before the rest of config space is ready.
3924 pci_disable_device(pdev
);
3925 pci_save_state(pdev
);
3927 /* find the first memory BAR, so we can find the cfg table */
3928 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
3931 vaddr
= remap_pci_mem(paddr
, 0x250);
3935 /* find cfgtable in order to check if reset via doorbell is supported */
3936 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
3937 &cfg_base_addr_index
, &cfg_offset
);
3940 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
3941 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
3946 rc
= write_driver_ver_to_cfgtable(cfgtable
);
3950 /* If reset via doorbell register is supported, use that.
3951 * There are two such methods. Favor the newest method.
3953 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
3954 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
3956 use_doorbell
= DOORBELL_CTLR_RESET2
;
3958 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
3960 dev_warn(&pdev
->dev
, "Soft reset not supported. "
3961 "Firmware update is required.\n");
3962 rc
= -ENOTSUPP
; /* try soft reset */
3963 goto unmap_cfgtable
;
3967 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
3969 goto unmap_cfgtable
;
3971 pci_restore_state(pdev
);
3972 rc
= pci_enable_device(pdev
);
3974 dev_warn(&pdev
->dev
, "failed to enable device.\n");
3975 goto unmap_cfgtable
;
3977 pci_write_config_word(pdev
, 4, command_register
);
3979 /* Some devices (notably the HP Smart Array 5i Controller)
3980 need a little pause here */
3981 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
3983 /* Wait for board to become not ready, then ready. */
3984 dev_info(&pdev
->dev
, "Waiting for board to reset.\n");
3985 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_NOT_READY
);
3987 dev_warn(&pdev
->dev
,
3988 "failed waiting for board to reset."
3989 " Will try soft reset.\n");
3990 rc
= -ENOTSUPP
; /* Not expected, but try soft reset later */
3991 goto unmap_cfgtable
;
3993 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
3995 dev_warn(&pdev
->dev
,
3996 "failed waiting for board to become ready "
3997 "after hard reset\n");
3998 goto unmap_cfgtable
;
4001 rc
= controller_reset_failed(vaddr
);
4003 goto unmap_cfgtable
;
4005 dev_warn(&pdev
->dev
, "Unable to successfully reset "
4006 "controller. Will try soft reset.\n");
4009 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
4021 * We cannot read the structure directly, for portability we must use
4023 * This is for debug only.
4025 static void print_cfg_table(struct device
*dev
, struct CfgTable
*tb
)
4031 dev_info(dev
, "Controller Configuration information\n");
4032 dev_info(dev
, "------------------------------------\n");
4033 for (i
= 0; i
< 4; i
++)
4034 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
4035 temp_name
[4] = '\0';
4036 dev_info(dev
, " Signature = %s\n", temp_name
);
4037 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
4038 dev_info(dev
, " Transport methods supported = 0x%x\n",
4039 readl(&(tb
->TransportSupport
)));
4040 dev_info(dev
, " Transport methods active = 0x%x\n",
4041 readl(&(tb
->TransportActive
)));
4042 dev_info(dev
, " Requested transport Method = 0x%x\n",
4043 readl(&(tb
->HostWrite
.TransportRequest
)));
4044 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
4045 readl(&(tb
->HostWrite
.CoalIntDelay
)));
4046 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
4047 readl(&(tb
->HostWrite
.CoalIntCount
)));
4048 dev_info(dev
, " Max outstanding commands = 0x%d\n",
4049 readl(&(tb
->CmdsOutMax
)));
4050 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
4051 for (i
= 0; i
< 16; i
++)
4052 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
4053 temp_name
[16] = '\0';
4054 dev_info(dev
, " Server Name = %s\n", temp_name
);
4055 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
4056 readl(&(tb
->HeartBeat
)));
4057 #endif /* HPSA_DEBUG */
4060 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
4062 int i
, offset
, mem_type
, bar_type
;
4064 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
4067 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
4068 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
4069 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
4072 mem_type
= pci_resource_flags(pdev
, i
) &
4073 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
4075 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
4076 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
4077 offset
+= 4; /* 32 bit */
4079 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
4082 default: /* reserved in PCI 2.2 */
4083 dev_warn(&pdev
->dev
,
4084 "base address is invalid\n");
4089 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
4095 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
4096 * controllers that are capable. If not, we use IO-APIC mode.
4099 static void hpsa_interrupt_mode(struct ctlr_info
*h
)
4101 #ifdef CONFIG_PCI_MSI
4103 struct msix_entry hpsa_msix_entries
[MAX_REPLY_QUEUES
];
4105 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++) {
4106 hpsa_msix_entries
[i
].vector
= 0;
4107 hpsa_msix_entries
[i
].entry
= i
;
4110 /* Some boards advertise MSI but don't really support it */
4111 if ((h
->board_id
== 0x40700E11) || (h
->board_id
== 0x40800E11) ||
4112 (h
->board_id
== 0x40820E11) || (h
->board_id
== 0x40830E11))
4113 goto default_int_mode
;
4114 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSIX
)) {
4115 dev_info(&h
->pdev
->dev
, "MSIX\n");
4116 err
= pci_enable_msix(h
->pdev
, hpsa_msix_entries
,
4119 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4120 h
->intr
[i
] = hpsa_msix_entries
[i
].vector
;
4125 dev_warn(&h
->pdev
->dev
, "only %d MSI-X vectors "
4126 "available\n", err
);
4127 goto default_int_mode
;
4129 dev_warn(&h
->pdev
->dev
, "MSI-X init failed %d\n",
4131 goto default_int_mode
;
4134 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSI
)) {
4135 dev_info(&h
->pdev
->dev
, "MSI\n");
4136 if (!pci_enable_msi(h
->pdev
))
4139 dev_warn(&h
->pdev
->dev
, "MSI init failed\n");
4142 #endif /* CONFIG_PCI_MSI */
4143 /* if we get here we're going to use the default interrupt mode */
4144 h
->intr
[h
->intr_mode
] = h
->pdev
->irq
;
4147 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
4150 u32 subsystem_vendor_id
, subsystem_device_id
;
4152 subsystem_vendor_id
= pdev
->subsystem_vendor
;
4153 subsystem_device_id
= pdev
->subsystem_device
;
4154 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
4155 subsystem_vendor_id
;
4157 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
4158 if (*board_id
== products
[i
].board_id
)
4161 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
4162 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
4164 dev_warn(&pdev
->dev
, "unrecognized board ID: "
4165 "0x%08x, ignoring.\n", *board_id
);
4168 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
4171 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
4172 unsigned long *memory_bar
)
4176 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
4177 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
4178 /* addressing mode bits already removed */
4179 *memory_bar
= pci_resource_start(pdev
, i
);
4180 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
4184 dev_warn(&pdev
->dev
, "no memory BAR found\n");
4188 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
4194 iterations
= HPSA_BOARD_READY_ITERATIONS
;
4196 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
4198 for (i
= 0; i
< iterations
; i
++) {
4199 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
4200 if (wait_for_ready
) {
4201 if (scratchpad
== HPSA_FIRMWARE_READY
)
4204 if (scratchpad
!= HPSA_FIRMWARE_READY
)
4207 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
4209 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
4213 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
4214 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
4217 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
4218 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
4219 *cfg_base_addr
&= (u32
) 0x0000ffff;
4220 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
4221 if (*cfg_base_addr_index
== -1) {
4222 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
4228 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
4232 u64 cfg_base_addr_index
;
4236 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
4237 &cfg_base_addr_index
, &cfg_offset
);
4240 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
4241 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
4244 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
4247 /* Find performant mode table. */
4248 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
4249 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
4250 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
4251 sizeof(*h
->transtable
));
4257 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
4259 h
->max_commands
= readl(&(h
->cfgtable
->MaxPerformantModeCommands
));
4261 /* Limit commands in memory limited kdump scenario. */
4262 if (reset_devices
&& h
->max_commands
> 32)
4263 h
->max_commands
= 32;
4265 if (h
->max_commands
< 16) {
4266 dev_warn(&h
->pdev
->dev
, "Controller reports "
4267 "max supported commands of %d, an obvious lie. "
4268 "Using 16. Ensure that firmware is up to date.\n",
4270 h
->max_commands
= 16;
4274 /* Interrogate the hardware for some limits:
4275 * max commands, max SG elements without chaining, and with chaining,
4276 * SG chain block size, etc.
4278 static void hpsa_find_board_params(struct ctlr_info
*h
)
4280 hpsa_get_max_perf_mode_cmds(h
);
4281 h
->nr_cmds
= h
->max_commands
- 4; /* Allow room for some ioctls */
4282 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
4284 * Limit in-command s/g elements to 32 save dma'able memory.
4285 * Howvever spec says if 0, use 31
4287 h
->max_cmd_sg_entries
= 31;
4288 if (h
->maxsgentries
> 512) {
4289 h
->max_cmd_sg_entries
= 32;
4290 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
+ 1;
4291 h
->maxsgentries
--; /* save one for chain pointer */
4293 h
->maxsgentries
= 31; /* default to traditional values */
4297 /* Find out what task management functions are supported and cache */
4298 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
4301 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
4303 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
4304 dev_warn(&h
->pdev
->dev
, "not a valid CISS config table\n");
4310 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
4311 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info
*h
)
4316 prefetch
= readl(&(h
->cfgtable
->SCSI_Prefetch
));
4318 writel(prefetch
, &(h
->cfgtable
->SCSI_Prefetch
));
4322 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
4323 * in a prefetch beyond physical memory.
4325 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
4329 if (h
->board_id
!= 0x3225103C)
4331 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
4332 dma_prefetch
|= 0x8000;
4333 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
4336 static void hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
4340 unsigned long flags
;
4342 /* under certain very rare conditions, this can take awhile.
4343 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
4344 * as we enter this code.)
4346 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
4347 spin_lock_irqsave(&h
->lock
, flags
);
4348 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
4349 spin_unlock_irqrestore(&h
->lock
, flags
);
4350 if (!(doorbell_value
& CFGTBL_ChangeReq
))
4352 /* delay and try again */
4353 usleep_range(10000, 20000);
4357 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
4361 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
4362 if (!(trans_support
& SIMPLE_MODE
))
4365 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
4366 /* Update the field, and then ring the doorbell */
4367 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
4368 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
4369 hpsa_wait_for_mode_change_ack(h
);
4370 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
4371 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
)) {
4372 dev_warn(&h
->pdev
->dev
,
4373 "unable to get board into simple mode\n");
4376 h
->transMethod
= CFGTBL_Trans_Simple
;
4380 static int hpsa_pci_init(struct ctlr_info
*h
)
4382 int prod_index
, err
;
4384 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
4387 h
->product_name
= products
[prod_index
].product_name
;
4388 h
->access
= *(products
[prod_index
].access
);
4390 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
4391 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
4393 err
= pci_enable_device(h
->pdev
);
4395 dev_warn(&h
->pdev
->dev
, "unable to enable PCI device\n");
4399 /* Enable bus mastering (pci_disable_device may disable this) */
4400 pci_set_master(h
->pdev
);
4402 err
= pci_request_regions(h
->pdev
, HPSA
);
4404 dev_err(&h
->pdev
->dev
,
4405 "cannot obtain PCI resources, aborting\n");
4408 hpsa_interrupt_mode(h
);
4409 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
4411 goto err_out_free_res
;
4412 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
4415 goto err_out_free_res
;
4417 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
4419 goto err_out_free_res
;
4420 err
= hpsa_find_cfgtables(h
);
4422 goto err_out_free_res
;
4423 hpsa_find_board_params(h
);
4425 if (!hpsa_CISS_signature_present(h
)) {
4427 goto err_out_free_res
;
4429 hpsa_enable_scsi_prefetch(h
);
4430 hpsa_p600_dma_prefetch_quirk(h
);
4431 err
= hpsa_enter_simple_mode(h
);
4433 goto err_out_free_res
;
4438 iounmap(h
->transtable
);
4440 iounmap(h
->cfgtable
);
4443 pci_disable_device(h
->pdev
);
4444 pci_release_regions(h
->pdev
);
4448 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
4452 #define HBA_INQUIRY_BYTE_COUNT 64
4453 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
4454 if (!h
->hba_inquiry_data
)
4456 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
4457 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
4459 kfree(h
->hba_inquiry_data
);
4460 h
->hba_inquiry_data
= NULL
;
4464 static int hpsa_init_reset_devices(struct pci_dev
*pdev
)
4471 /* Reset the controller with a PCI power-cycle or via doorbell */
4472 rc
= hpsa_kdump_hard_reset_controller(pdev
);
4474 /* -ENOTSUPP here means we cannot reset the controller
4475 * but it's already (and still) up and running in
4476 * "performant mode". Or, it might be 640x, which can't reset
4477 * due to concerns about shared bbwc between 6402/6404 pair.
4479 if (rc
== -ENOTSUPP
)
4480 return rc
; /* just try to do the kdump anyhow. */
4484 /* Now try to get the controller to respond to a no-op */
4485 dev_warn(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
4486 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
4487 if (hpsa_noop(pdev
) == 0)
4490 dev_warn(&pdev
->dev
, "no-op failed%s\n",
4491 (i
< 11 ? "; re-trying" : ""));
4496 static int hpsa_allocate_cmd_pool(struct ctlr_info
*h
)
4498 h
->cmd_pool_bits
= kzalloc(
4499 DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
) *
4500 sizeof(unsigned long), GFP_KERNEL
);
4501 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
4502 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
4503 &(h
->cmd_pool_dhandle
));
4504 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
4505 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
4506 &(h
->errinfo_pool_dhandle
));
4507 if ((h
->cmd_pool_bits
== NULL
)
4508 || (h
->cmd_pool
== NULL
)
4509 || (h
->errinfo_pool
== NULL
)) {
4510 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
4516 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
4518 kfree(h
->cmd_pool_bits
);
4520 pci_free_consistent(h
->pdev
,
4521 h
->nr_cmds
* sizeof(struct CommandList
),
4522 h
->cmd_pool
, h
->cmd_pool_dhandle
);
4523 if (h
->errinfo_pool
)
4524 pci_free_consistent(h
->pdev
,
4525 h
->nr_cmds
* sizeof(struct ErrorInfo
),
4527 h
->errinfo_pool_dhandle
);
4530 static int hpsa_request_irq(struct ctlr_info
*h
,
4531 irqreturn_t (*msixhandler
)(int, void *),
4532 irqreturn_t (*intxhandler
)(int, void *))
4537 * initialize h->q[x] = x so that interrupt handlers know which
4540 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4543 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vector
) {
4544 /* If performant mode and MSI-X, use multiple reply queues */
4545 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4546 rc
= request_irq(h
->intr
[i
], msixhandler
,
4550 /* Use single reply pool */
4551 if (h
->msix_vector
|| h
->msi_vector
) {
4552 rc
= request_irq(h
->intr
[h
->intr_mode
],
4553 msixhandler
, 0, h
->devname
,
4554 &h
->q
[h
->intr_mode
]);
4556 rc
= request_irq(h
->intr
[h
->intr_mode
],
4557 intxhandler
, IRQF_SHARED
, h
->devname
,
4558 &h
->q
[h
->intr_mode
]);
4562 dev_err(&h
->pdev
->dev
, "unable to get irq %d for %s\n",
4563 h
->intr
[h
->intr_mode
], h
->devname
);
4569 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
4571 if (hpsa_send_host_reset(h
, RAID_CTLR_LUNID
,
4572 HPSA_RESET_TYPE_CONTROLLER
)) {
4573 dev_warn(&h
->pdev
->dev
, "Resetting array controller failed.\n");
4577 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
4578 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
)) {
4579 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
4583 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
4584 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
)) {
4585 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
4586 "after soft reset.\n");
4593 static void free_irqs(struct ctlr_info
*h
)
4597 if (!h
->msix_vector
|| h
->intr_mode
!= PERF_MODE_INT
) {
4598 /* Single reply queue, only one irq to free */
4600 free_irq(h
->intr
[i
], &h
->q
[i
]);
4604 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4605 free_irq(h
->intr
[i
], &h
->q
[i
]);
4608 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info
*h
)
4611 #ifdef CONFIG_PCI_MSI
4612 if (h
->msix_vector
) {
4613 if (h
->pdev
->msix_enabled
)
4614 pci_disable_msix(h
->pdev
);
4615 } else if (h
->msi_vector
) {
4616 if (h
->pdev
->msi_enabled
)
4617 pci_disable_msi(h
->pdev
);
4619 #endif /* CONFIG_PCI_MSI */
4622 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
4624 hpsa_free_irqs_and_disable_msix(h
);
4625 hpsa_free_sg_chain_blocks(h
);
4626 hpsa_free_cmd_pool(h
);
4627 kfree(h
->blockFetchTable
);
4628 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
4629 h
->reply_pool
, h
->reply_pool_dhandle
);
4633 iounmap(h
->transtable
);
4635 iounmap(h
->cfgtable
);
4636 pci_release_regions(h
->pdev
);
4640 static void remove_ctlr_from_lockup_detector_list(struct ctlr_info
*h
)
4642 assert_spin_locked(&lockup_detector_lock
);
4643 if (!hpsa_lockup_detector
)
4645 if (h
->lockup_detected
)
4646 return; /* already stopped the lockup detector */
4647 list_del(&h
->lockup_list
);
4650 /* Called when controller lockup detected. */
4651 static void fail_all_cmds_on_list(struct ctlr_info
*h
, struct list_head
*list
)
4653 struct CommandList
*c
= NULL
;
4655 assert_spin_locked(&h
->lock
);
4656 /* Mark all outstanding commands as failed and complete them. */
4657 while (!list_empty(list
)) {
4658 c
= list_entry(list
->next
, struct CommandList
, list
);
4659 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
4664 static void controller_lockup_detected(struct ctlr_info
*h
)
4666 unsigned long flags
;
4668 assert_spin_locked(&lockup_detector_lock
);
4669 remove_ctlr_from_lockup_detector_list(h
);
4670 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4671 spin_lock_irqsave(&h
->lock
, flags
);
4672 h
->lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
4673 spin_unlock_irqrestore(&h
->lock
, flags
);
4674 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x\n",
4675 h
->lockup_detected
);
4676 pci_disable_device(h
->pdev
);
4677 spin_lock_irqsave(&h
->lock
, flags
);
4678 fail_all_cmds_on_list(h
, &h
->cmpQ
);
4679 fail_all_cmds_on_list(h
, &h
->reqQ
);
4680 spin_unlock_irqrestore(&h
->lock
, flags
);
4683 static void detect_controller_lockup(struct ctlr_info
*h
)
4687 unsigned long flags
;
4689 assert_spin_locked(&lockup_detector_lock
);
4690 now
= get_jiffies_64();
4691 /* If we've received an interrupt recently, we're ok. */
4692 if (time_after64(h
->last_intr_timestamp
+
4693 (h
->heartbeat_sample_interval
), now
))
4697 * If we've already checked the heartbeat recently, we're ok.
4698 * This could happen if someone sends us a signal. We
4699 * otherwise don't care about signals in this thread.
4701 if (time_after64(h
->last_heartbeat_timestamp
+
4702 (h
->heartbeat_sample_interval
), now
))
4705 /* If heartbeat has not changed since we last looked, we're not ok. */
4706 spin_lock_irqsave(&h
->lock
, flags
);
4707 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
4708 spin_unlock_irqrestore(&h
->lock
, flags
);
4709 if (h
->last_heartbeat
== heartbeat
) {
4710 controller_lockup_detected(h
);
4715 h
->last_heartbeat
= heartbeat
;
4716 h
->last_heartbeat_timestamp
= now
;
4719 static int detect_controller_lockup_thread(void *notused
)
4721 struct ctlr_info
*h
;
4722 unsigned long flags
;
4725 struct list_head
*this, *tmp
;
4727 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL
);
4728 if (kthread_should_stop())
4730 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4731 list_for_each_safe(this, tmp
, &hpsa_ctlr_list
) {
4732 h
= list_entry(this, struct ctlr_info
, lockup_list
);
4733 detect_controller_lockup(h
);
4735 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4740 static void add_ctlr_to_lockup_detector_list(struct ctlr_info
*h
)
4742 unsigned long flags
;
4744 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
4745 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4746 list_add_tail(&h
->lockup_list
, &hpsa_ctlr_list
);
4747 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4750 static void start_controller_lockup_detector(struct ctlr_info
*h
)
4752 /* Start the lockup detector thread if not already started */
4753 if (!hpsa_lockup_detector
) {
4754 spin_lock_init(&lockup_detector_lock
);
4755 hpsa_lockup_detector
=
4756 kthread_run(detect_controller_lockup_thread
,
4759 if (!hpsa_lockup_detector
) {
4760 dev_warn(&h
->pdev
->dev
,
4761 "Could not start lockup detector thread\n");
4764 add_ctlr_to_lockup_detector_list(h
);
4767 static void stop_controller_lockup_detector(struct ctlr_info
*h
)
4769 unsigned long flags
;
4771 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4772 remove_ctlr_from_lockup_detector_list(h
);
4773 /* If the list of ctlr's to monitor is empty, stop the thread */
4774 if (list_empty(&hpsa_ctlr_list
)) {
4775 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4776 kthread_stop(hpsa_lockup_detector
);
4777 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4778 hpsa_lockup_detector
= NULL
;
4780 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4783 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4786 struct ctlr_info
*h
;
4787 int try_soft_reset
= 0;
4788 unsigned long flags
;
4790 if (number_of_controllers
== 0)
4791 printk(KERN_INFO DRIVER_NAME
"\n");
4793 rc
= hpsa_init_reset_devices(pdev
);
4795 if (rc
!= -ENOTSUPP
)
4797 /* If the reset fails in a particular way (it has no way to do
4798 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4799 * a soft reset once we get the controller configured up to the
4800 * point that it can accept a command.
4806 reinit_after_soft_reset
:
4808 /* Command structures must be aligned on a 32-byte boundary because
4809 * the 5 lower bits of the address are used by the hardware. and by
4810 * the driver. See comments in hpsa.h for more info.
4812 #define COMMANDLIST_ALIGNMENT 32
4813 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
4814 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
4819 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
4820 INIT_LIST_HEAD(&h
->cmpQ
);
4821 INIT_LIST_HEAD(&h
->reqQ
);
4822 spin_lock_init(&h
->lock
);
4823 spin_lock_init(&h
->scan_lock
);
4824 rc
= hpsa_pci_init(h
);
4828 sprintf(h
->devname
, HPSA
"%d", number_of_controllers
);
4829 h
->ctlr
= number_of_controllers
;
4830 number_of_controllers
++;
4832 /* configure PCI DMA stuff */
4833 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
4837 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4841 dev_err(&pdev
->dev
, "no suitable DMA available\n");
4846 /* make sure the board interrupts are off */
4847 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4849 if (hpsa_request_irq(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
))
4851 dev_info(&pdev
->dev
, "%s: <0x%x> at IRQ %d%s using DAC\n",
4852 h
->devname
, pdev
->device
,
4853 h
->intr
[h
->intr_mode
], dac
? "" : " not");
4854 if (hpsa_allocate_cmd_pool(h
))
4856 if (hpsa_allocate_sg_chain_blocks(h
))
4858 init_waitqueue_head(&h
->scan_wait_queue
);
4859 h
->scan_finished
= 1; /* no scan currently in progress */
4861 pci_set_drvdata(pdev
, h
);
4863 h
->scsi_host
= NULL
;
4864 spin_lock_init(&h
->devlock
);
4865 hpsa_put_ctlr_into_performant_mode(h
);
4867 /* At this point, the controller is ready to take commands.
4868 * Now, if reset_devices and the hard reset didn't work, try
4869 * the soft reset and see if that works.
4871 if (try_soft_reset
) {
4873 /* This is kind of gross. We may or may not get a completion
4874 * from the soft reset command, and if we do, then the value
4875 * from the fifo may or may not be valid. So, we wait 10 secs
4876 * after the reset throwing away any completions we get during
4877 * that time. Unregister the interrupt handler and register
4878 * fake ones to scoop up any residual completions.
4880 spin_lock_irqsave(&h
->lock
, flags
);
4881 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4882 spin_unlock_irqrestore(&h
->lock
, flags
);
4884 rc
= hpsa_request_irq(h
, hpsa_msix_discard_completions
,
4885 hpsa_intx_discard_completions
);
4887 dev_warn(&h
->pdev
->dev
, "Failed to request_irq after "
4892 rc
= hpsa_kdump_soft_reset(h
);
4894 /* Neither hard nor soft reset worked, we're hosed. */
4897 dev_info(&h
->pdev
->dev
, "Board READY.\n");
4898 dev_info(&h
->pdev
->dev
,
4899 "Waiting for stale completions to drain.\n");
4900 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
4902 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4904 rc
= controller_reset_failed(h
->cfgtable
);
4906 dev_info(&h
->pdev
->dev
,
4907 "Soft reset appears to have failed.\n");
4909 /* since the controller's reset, we have to go back and re-init
4910 * everything. Easiest to just forget what we've done and do it
4913 hpsa_undo_allocations_after_kdump_soft_reset(h
);
4916 /* don't go to clean4, we already unallocated */
4919 goto reinit_after_soft_reset
;
4922 /* Turn the interrupts on so we can service requests */
4923 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
4925 hpsa_hba_inquiry(h
);
4926 hpsa_register_scsi(h
); /* hook ourselves into SCSI subsystem */
4927 start_controller_lockup_detector(h
);
4931 hpsa_free_sg_chain_blocks(h
);
4932 hpsa_free_cmd_pool(h
);
4940 static void hpsa_flush_cache(struct ctlr_info
*h
)
4943 struct CommandList
*c
;
4945 flush_buf
= kzalloc(4, GFP_KERNEL
);
4949 c
= cmd_special_alloc(h
);
4951 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
4954 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
4955 RAID_CTLR_LUNID
, TYPE_CMD
)) {
4958 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_TODEVICE
);
4959 if (c
->err_info
->CommandStatus
!= 0)
4961 dev_warn(&h
->pdev
->dev
,
4962 "error flushing cache on controller\n");
4963 cmd_special_free(h
, c
);
4968 static void hpsa_shutdown(struct pci_dev
*pdev
)
4970 struct ctlr_info
*h
;
4972 h
= pci_get_drvdata(pdev
);
4973 /* Turn board interrupts off and send the flush cache command
4974 * sendcmd will turn off interrupt, and send the flush...
4975 * To write all data in the battery backed cache to disks
4977 hpsa_flush_cache(h
);
4978 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4979 hpsa_free_irqs_and_disable_msix(h
);
4982 static void hpsa_free_device_info(struct ctlr_info
*h
)
4986 for (i
= 0; i
< h
->ndevices
; i
++)
4990 static void hpsa_remove_one(struct pci_dev
*pdev
)
4992 struct ctlr_info
*h
;
4994 if (pci_get_drvdata(pdev
) == NULL
) {
4995 dev_err(&pdev
->dev
, "unable to remove device\n");
4998 h
= pci_get_drvdata(pdev
);
4999 stop_controller_lockup_detector(h
);
5000 hpsa_unregister_scsi(h
); /* unhook from SCSI subsystem */
5001 hpsa_shutdown(pdev
);
5003 iounmap(h
->transtable
);
5004 iounmap(h
->cfgtable
);
5005 hpsa_free_device_info(h
);
5006 hpsa_free_sg_chain_blocks(h
);
5007 pci_free_consistent(h
->pdev
,
5008 h
->nr_cmds
* sizeof(struct CommandList
),
5009 h
->cmd_pool
, h
->cmd_pool_dhandle
);
5010 pci_free_consistent(h
->pdev
,
5011 h
->nr_cmds
* sizeof(struct ErrorInfo
),
5012 h
->errinfo_pool
, h
->errinfo_pool_dhandle
);
5013 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
5014 h
->reply_pool
, h
->reply_pool_dhandle
);
5015 kfree(h
->cmd_pool_bits
);
5016 kfree(h
->blockFetchTable
);
5017 kfree(h
->hba_inquiry_data
);
5018 pci_disable_device(pdev
);
5019 pci_release_regions(pdev
);
5023 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
5024 __attribute__((unused
)) pm_message_t state
)
5029 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
5034 static struct pci_driver hpsa_pci_driver
= {
5036 .probe
= hpsa_init_one
,
5037 .remove
= hpsa_remove_one
,
5038 .id_table
= hpsa_pci_device_id
, /* id_table */
5039 .shutdown
= hpsa_shutdown
,
5040 .suspend
= hpsa_suspend
,
5041 .resume
= hpsa_resume
,
5044 /* Fill in bucket_map[], given nsgs (the max number of
5045 * scatter gather elements supported) and bucket[],
5046 * which is an array of 8 integers. The bucket[] array
5047 * contains 8 different DMA transfer sizes (in 16
5048 * byte increments) which the controller uses to fetch
5049 * commands. This function fills in bucket_map[], which
5050 * maps a given number of scatter gather elements to one of
5051 * the 8 DMA transfer sizes. The point of it is to allow the
5052 * controller to only do as much DMA as needed to fetch the
5053 * command, with the DMA transfer size encoded in the lower
5054 * bits of the command address.
5056 static void calc_bucket_map(int bucket
[], int num_buckets
,
5057 int nsgs
, int *bucket_map
)
5061 /* even a command with 0 SGs requires 4 blocks */
5062 #define MINIMUM_TRANSFER_BLOCKS 4
5063 #define NUM_BUCKETS 8
5064 /* Note, bucket_map must have nsgs+1 entries. */
5065 for (i
= 0; i
<= nsgs
; i
++) {
5066 /* Compute size of a command with i SG entries */
5067 size
= i
+ MINIMUM_TRANSFER_BLOCKS
;
5068 b
= num_buckets
; /* Assume the biggest bucket */
5069 /* Find the bucket that is just big enough */
5070 for (j
= 0; j
< 8; j
++) {
5071 if (bucket
[j
] >= size
) {
5076 /* for a command with i SG entries, use bucket b. */
5081 static void hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 use_short_tags
)
5084 unsigned long register_value
;
5086 /* This is a bit complicated. There are 8 registers on
5087 * the controller which we write to to tell it 8 different
5088 * sizes of commands which there may be. It's a way of
5089 * reducing the DMA done to fetch each command. Encoded into
5090 * each command's tag are 3 bits which communicate to the controller
5091 * which of the eight sizes that command fits within. The size of
5092 * each command depends on how many scatter gather entries there are.
5093 * Each SG entry requires 16 bytes. The eight registers are programmed
5094 * with the number of 16-byte blocks a command of that size requires.
5095 * The smallest command possible requires 5 such 16 byte blocks.
5096 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
5097 * blocks. Note, this only extends to the SG entries contained
5098 * within the command block, and does not extend to chained blocks
5099 * of SG elements. bft[] contains the eight values we write to
5100 * the registers. They are not evenly distributed, but have more
5101 * sizes for small commands, and fewer sizes for larger commands.
5103 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
5104 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
5105 /* 5 = 1 s/g entry or 4k
5106 * 6 = 2 s/g entry or 8k
5107 * 8 = 4 s/g entry or 16k
5108 * 10 = 6 s/g entry or 24k
5111 /* Controller spec: zero out this buffer. */
5112 memset(h
->reply_pool
, 0, h
->reply_pool_size
);
5114 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
5115 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
5116 SG_ENTRIES_IN_CMD
, h
->blockFetchTable
);
5117 for (i
= 0; i
< 8; i
++)
5118 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
5120 /* size of controller ring buffer */
5121 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
5122 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
5123 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
5124 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
5126 for (i
= 0; i
< h
->nreply_queues
; i
++) {
5127 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
5128 writel(h
->reply_pool_dhandle
+
5129 (h
->max_commands
* sizeof(u64
) * i
),
5130 &h
->transtable
->RepQAddr
[i
].lower
);
5133 writel(CFGTBL_Trans_Performant
| use_short_tags
|
5134 CFGTBL_Trans_enable_directed_msix
,
5135 &(h
->cfgtable
->HostWrite
.TransportRequest
));
5136 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
5137 hpsa_wait_for_mode_change_ack(h
);
5138 register_value
= readl(&(h
->cfgtable
->TransportActive
));
5139 if (!(register_value
& CFGTBL_Trans_Performant
)) {
5140 dev_warn(&h
->pdev
->dev
, "unable to get board into"
5141 " performant mode\n");
5144 /* Change the access methods to the performant access methods */
5145 h
->access
= SA5_performant_access
;
5146 h
->transMethod
= CFGTBL_Trans_Performant
;
5149 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
5154 if (hpsa_simple_mode
)
5157 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
5158 if (!(trans_support
& PERFORMANT_MODE
))
5161 h
->nreply_queues
= h
->msix_vector
? MAX_REPLY_QUEUES
: 1;
5162 hpsa_get_max_perf_mode_cmds(h
);
5163 /* Performant mode ring buffer and supporting data structures */
5164 h
->reply_pool_size
= h
->max_commands
* sizeof(u64
) * h
->nreply_queues
;
5165 h
->reply_pool
= pci_alloc_consistent(h
->pdev
, h
->reply_pool_size
,
5166 &(h
->reply_pool_dhandle
));
5168 for (i
= 0; i
< h
->nreply_queues
; i
++) {
5169 h
->reply_queue
[i
].head
= &h
->reply_pool
[h
->max_commands
* i
];
5170 h
->reply_queue
[i
].size
= h
->max_commands
;
5171 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
5172 h
->reply_queue
[i
].current_entry
= 0;
5175 /* Need a block fetch table for performant mode */
5176 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
5177 sizeof(u32
)), GFP_KERNEL
);
5179 if ((h
->reply_pool
== NULL
)
5180 || (h
->blockFetchTable
== NULL
))
5183 hpsa_enter_performant_mode(h
,
5184 trans_support
& CFGTBL_Trans_use_short_tags
);
5190 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
5191 h
->reply_pool
, h
->reply_pool_dhandle
);
5192 kfree(h
->blockFetchTable
);
5196 * This is it. Register the PCI driver information for the cards we control
5197 * the OS will call our registered routines when it finds one of our cards.
5199 static int __init
hpsa_init(void)
5201 return pci_register_driver(&hpsa_pci_driver
);
5204 static void __exit
hpsa_cleanup(void)
5206 pci_unregister_driver(&hpsa_pci_driver
);
5209 module_init(hpsa_init
);
5210 module_exit(hpsa_cleanup
);