2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static DEFINE_SPINLOCK(ipr_driver_lock
);
103 /* This table describes the differences between DMA controller chips */
104 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
105 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108 .cache_line_size
= 0x20,
111 .set_interrupt_mask_reg
= 0x0022C,
112 .clr_interrupt_mask_reg
= 0x00230,
113 .clr_interrupt_mask_reg32
= 0x00230,
114 .sense_interrupt_mask_reg
= 0x0022C,
115 .sense_interrupt_mask_reg32
= 0x0022C,
116 .clr_interrupt_reg
= 0x00228,
117 .clr_interrupt_reg32
= 0x00228,
118 .sense_interrupt_reg
= 0x00224,
119 .sense_interrupt_reg32
= 0x00224,
120 .ioarrin_reg
= 0x00404,
121 .sense_uproc_interrupt_reg
= 0x00214,
122 .sense_uproc_interrupt_reg32
= 0x00214,
123 .set_uproc_interrupt_reg
= 0x00214,
124 .set_uproc_interrupt_reg32
= 0x00214,
125 .clr_uproc_interrupt_reg
= 0x00218,
126 .clr_uproc_interrupt_reg32
= 0x00218
129 { /* Snipe and Scamp */
132 .cache_line_size
= 0x20,
135 .set_interrupt_mask_reg
= 0x00288,
136 .clr_interrupt_mask_reg
= 0x0028C,
137 .clr_interrupt_mask_reg32
= 0x0028C,
138 .sense_interrupt_mask_reg
= 0x00288,
139 .sense_interrupt_mask_reg32
= 0x00288,
140 .clr_interrupt_reg
= 0x00284,
141 .clr_interrupt_reg32
= 0x00284,
142 .sense_interrupt_reg
= 0x00280,
143 .sense_interrupt_reg32
= 0x00280,
144 .ioarrin_reg
= 0x00504,
145 .sense_uproc_interrupt_reg
= 0x00290,
146 .sense_uproc_interrupt_reg32
= 0x00290,
147 .set_uproc_interrupt_reg
= 0x00290,
148 .set_uproc_interrupt_reg32
= 0x00290,
149 .clr_uproc_interrupt_reg
= 0x00294,
150 .clr_uproc_interrupt_reg32
= 0x00294
156 .cache_line_size
= 0x20,
159 .set_interrupt_mask_reg
= 0x00010,
160 .clr_interrupt_mask_reg
= 0x00018,
161 .clr_interrupt_mask_reg32
= 0x0001C,
162 .sense_interrupt_mask_reg
= 0x00010,
163 .sense_interrupt_mask_reg32
= 0x00014,
164 .clr_interrupt_reg
= 0x00008,
165 .clr_interrupt_reg32
= 0x0000C,
166 .sense_interrupt_reg
= 0x00000,
167 .sense_interrupt_reg32
= 0x00004,
168 .ioarrin_reg
= 0x00070,
169 .sense_uproc_interrupt_reg
= 0x00020,
170 .sense_uproc_interrupt_reg32
= 0x00024,
171 .set_uproc_interrupt_reg
= 0x00020,
172 .set_uproc_interrupt_reg32
= 0x00024,
173 .clr_uproc_interrupt_reg
= 0x00028,
174 .clr_uproc_interrupt_reg32
= 0x0002C,
175 .init_feedback_reg
= 0x0005C,
176 .dump_addr_reg
= 0x00064,
177 .dump_data_reg
= 0x00068,
178 .endian_swap_reg
= 0x00084
183 static const struct ipr_chip_t ipr_chip
[] = {
184 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
185 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
186 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
187 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
188 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, IPR_USE_MSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
189 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
190 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
191 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
195 static int ipr_max_bus_speeds
[] = {
196 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
199 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
200 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
201 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
202 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
203 module_param_named(log_level
, ipr_log_level
, uint
, 0);
204 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
205 module_param_named(testmode
, ipr_testmode
, int, 0);
206 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
207 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
208 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
209 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
210 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
211 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
212 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
213 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
214 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
215 module_param_named(max_devs
, ipr_max_devs
, int, 0);
216 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
217 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(IPR_DRIVER_VERSION
);
221 /* A constant array of IOASCs/URCs/Error Messages */
223 struct ipr_error_table_t ipr_error_table
[] = {
224 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
225 "8155: An unknown error was received"},
227 "Soft underlength error"},
229 "Command to be cancelled not found"},
231 "Qualified success"},
232 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
233 "FFFE: Soft device bus error recovered by the IOA"},
234 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
235 "4101: Soft device bus fabric error"},
236 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
237 "FFFC: Logical block guard error recovered by the device"},
238 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
239 "FFFC: Logical block reference tag error recovered by the device"},
240 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
241 "4171: Recovered scatter list tag / sequence number error"},
242 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
243 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
244 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
246 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "FFFD: Recovered logical block reference tag error detected by the IOA"},
248 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FFFD: Logical block guard error recovered by the IOA"},
250 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "FFF9: Device sector reassign successful"},
252 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FFF7: Media error recovered by device rewrite procedures"},
254 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "7001: IOA sector reassignment successful"},
256 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFF9: Soft media error. Sector reassignment recommended"},
258 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFF7: Media error recovered by IOA rewrite procedures"},
260 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "FF3D: Soft PCI bus error recovered by the IOA"},
262 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF6: Device hardware error recovered by the IOA"},
264 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "FFF6: Device hardware error recovered by the device"},
266 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
267 "FF3D: Soft IOA error recovered by the IOA"},
268 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
269 "FFFA: Undefined device response recovered by the IOA"},
270 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
271 "FFF6: Device bus error, message or command phase"},
272 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
273 "FFFE: Task Management Function failed"},
274 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFF6: Failure prediction threshold exceeded"},
276 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
277 "8009: Impending cache battery pack failure"},
279 "34FF: Disk device format in progress"},
280 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
281 "9070: IOA requested reset"},
283 "Synchronization required"},
285 "No ready, IOA shutdown"},
287 "Not ready, IOA has been shutdown"},
288 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
289 "3020: Storage subsystem configuration error"},
291 "FFF5: Medium error, data unreadable, recommend reassign"},
293 "7000: Medium error, data unreadable, do not reassign"},
294 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
295 "FFF3: Disk media format bad"},
296 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
297 "3002: Addressed device failed to respond to selection"},
298 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
299 "3100: Device bus error"},
300 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
301 "3109: IOA timed out a device command"},
303 "3120: SCSI bus is not operational"},
304 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
305 "4100: Hard device bus fabric error"},
306 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
307 "310C: Logical block guard error detected by the device"},
308 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
309 "310C: Logical block reference tag error detected by the device"},
310 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
311 "4170: Scatter list tag / sequence number error"},
312 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
313 "8150: Logical block CRC error on IOA to Host transfer"},
314 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
315 "4170: Logical block sequence number error on IOA to Host transfer"},
316 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "310D: Logical block reference tag error detected by the IOA"},
318 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
319 "310D: Logical block guard error detected by the IOA"},
320 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "9000: IOA reserved area data check"},
322 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
323 "9001: IOA reserved area invalid data pattern"},
324 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "9002: IOA reserved area LRC error"},
326 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
327 "Hardware Error, IOA metadata access error"},
328 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
329 "102E: Out of alternate sectors for disk storage"},
330 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "FFF4: Data transfer underlength error"},
332 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
333 "FFF4: Data transfer overlength error"},
334 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
335 "3400: Logical unit failure"},
336 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "FFF4: Device microcode is corrupt"},
338 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
339 "8150: PCI bus error"},
341 "Unsupported device bus message received"},
342 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
343 "FFF4: Disk device problem"},
344 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
345 "8150: Permanent IOA failure"},
346 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
347 "3010: Disk device returned wrong response to IOA"},
348 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
349 "8151: IOA microcode error"},
351 "Device bus status error"},
352 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
353 "8157: IOA error requiring IOA reset to recover"},
355 "ATA device status error"},
357 "Message reject received from the device"},
358 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
359 "8008: A permanent cache battery pack failure occurred"},
360 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
361 "9090: Disk unit has been modified after the last known status"},
362 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
363 "9081: IOA detected device error"},
364 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
365 "9082: IOA detected device error"},
366 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
367 "3110: Device bus error, message or command phase"},
368 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
369 "3110: SAS Command / Task Management Function failed"},
370 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
371 "9091: Incorrect hardware configuration change has been detected"},
372 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
373 "9073: Invalid multi-adapter configuration"},
374 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
375 "4010: Incorrect connection between cascaded expanders"},
376 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
377 "4020: Connections exceed IOA design limits"},
378 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "4030: Incorrect multipath connection"},
380 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "4110: Unsupported enclosure function"},
382 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
383 "FFF4: Command to logical unit failed"},
385 "Illegal request, invalid request type or request packet"},
387 "Illegal request, invalid resource handle"},
389 "Illegal request, commands not allowed to this device"},
391 "Illegal request, command not allowed to a secondary adapter"},
393 "Illegal request, command not allowed to a non-optimized resource"},
395 "Illegal request, invalid field in parameter list"},
397 "Illegal request, parameter not supported"},
399 "Illegal request, parameter value invalid"},
401 "Illegal request, command sequence error"},
403 "Illegal request, dual adapter support not enabled"},
404 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
405 "9031: Array protection temporarily suspended, protection resuming"},
406 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
407 "9040: Array protection temporarily suspended, protection resuming"},
408 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
409 "3140: Device bus not ready to ready transition"},
410 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
411 "FFFB: SCSI bus was reset"},
413 "FFFE: SCSI bus transition to single ended"},
415 "FFFE: SCSI bus transition to LVD"},
416 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
417 "FFFB: SCSI bus was reset by another initiator"},
418 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
419 "3029: A device replacement has occurred"},
420 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
421 "9051: IOA cache data exists for a missing or failed device"},
422 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
423 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
424 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
425 "9025: Disk unit is not supported at its physical location"},
426 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
427 "3020: IOA detected a SCSI bus configuration error"},
428 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
429 "3150: SCSI bus configuration error"},
430 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "9074: Asymmetric advanced function disk configuration"},
432 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "4040: Incomplete multipath connection between IOA and enclosure"},
434 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "4041: Incomplete multipath connection between enclosure and device"},
436 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "9075: Incomplete multipath connection between IOA and remote IOA"},
438 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
439 "9076: Configuration error, missing remote IOA"},
440 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
441 "4050: Enclosure does not support a required multipath function"},
442 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "4070: Logically bad block written on device"},
444 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
445 "9041: Array protection temporarily suspended"},
446 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
447 "9042: Corrupt array parity detected on specified device"},
448 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "9030: Array no longer protected due to missing or failed disk unit"},
450 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "9071: Link operational transition"},
452 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "9072: Link not operational transition"},
454 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "9032: Array exposed but still protected"},
456 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL
+ 1,
457 "70DD: Device forced failed by disrupt device command"},
458 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "4061: Multipath redundancy level got better"},
460 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "4060: Multipath redundancy level got worse"},
463 "Failure due to other device"},
464 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9008: IOA does not support functions expected by devices"},
466 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "9010: Cache data associated with attached devices cannot be found"},
468 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "9011: Cache data belongs to devices other than those attached"},
470 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "9020: Array missing 2 or more devices with only 1 device present"},
472 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "9021: Array missing 2 or more devices with 2 or more devices present"},
474 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "9022: Exposed array is missing a required device"},
476 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "9023: Array member(s) not at required physical locations"},
478 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "9024: Array not functional due to present hardware configuration"},
480 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "9026: Array not functional due to present hardware configuration"},
482 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "9027: Array is missing a device and parity is out of sync"},
484 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "9028: Maximum number of arrays already exist"},
486 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9050: Required cache data cannot be located for a disk unit"},
488 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9052: Cache data exists for a device that has been modified"},
490 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9054: IOA resources not available due to previous problems"},
492 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
493 "9092: Disk unit requires initialization before use"},
494 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "9029: Incorrect hardware configuration change has been detected"},
496 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "9060: One or more disk pairs are missing from an array"},
498 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
499 "9061: One or more disks are missing from an array"},
500 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "9062: One or more disks are missing from an array"},
502 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "9063: Maximum number of functional arrays has been exceeded"},
505 "Aborted command, invalid descriptor"},
507 "Command terminated by host"}
510 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
511 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
512 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
513 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
514 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
515 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
516 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
517 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
518 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
519 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
520 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
521 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
522 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
527 * Function Prototypes
529 static int ipr_reset_alert(struct ipr_cmnd
*);
530 static void ipr_process_ccn(struct ipr_cmnd
*);
531 static void ipr_process_error(struct ipr_cmnd
*);
532 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
533 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
534 enum ipr_shutdown_type
);
536 #ifdef CONFIG_SCSI_IPR_TRACE
538 * ipr_trc_hook - Add a trace entry to the driver trace
539 * @ipr_cmd: ipr command struct
541 * @add_data: additional data
546 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
547 u8 type
, u32 add_data
)
549 struct ipr_trace_entry
*trace_entry
;
550 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
552 trace_entry
= &ioa_cfg
->trace
[ioa_cfg
->trace_index
++];
553 trace_entry
->time
= jiffies
;
554 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
555 trace_entry
->type
= type
;
556 if (ipr_cmd
->ioa_cfg
->sis64
)
557 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
559 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
560 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
561 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
562 trace_entry
->u
.add_data
= add_data
;
565 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
569 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
570 * @ipr_cmd: ipr command struct
575 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
577 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
578 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
579 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
580 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
582 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
583 ioarcb
->data_transfer_length
= 0;
584 ioarcb
->read_data_transfer_length
= 0;
585 ioarcb
->ioadl_len
= 0;
586 ioarcb
->read_ioadl_len
= 0;
588 if (ipr_cmd
->ioa_cfg
->sis64
) {
589 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
590 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
591 ioasa64
->u
.gata
.status
= 0;
593 ioarcb
->write_ioadl_addr
=
594 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
595 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
596 ioasa
->u
.gata
.status
= 0;
599 ioasa
->hdr
.ioasc
= 0;
600 ioasa
->hdr
.residual_data_len
= 0;
601 ipr_cmd
->scsi_cmd
= NULL
;
603 ipr_cmd
->sense_buffer
[0] = 0;
604 ipr_cmd
->dma_use_sg
= 0;
608 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
609 * @ipr_cmd: ipr command struct
614 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
616 ipr_reinit_ipr_cmnd(ipr_cmd
);
617 ipr_cmd
->u
.scratch
= 0;
618 ipr_cmd
->sibling
= NULL
;
619 init_timer(&ipr_cmd
->timer
);
623 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
624 * @ioa_cfg: ioa config struct
627 * pointer to ipr command struct
630 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
632 struct ipr_cmnd
*ipr_cmd
;
634 ipr_cmd
= list_entry(ioa_cfg
->free_q
.next
, struct ipr_cmnd
, queue
);
635 list_del(&ipr_cmd
->queue
);
636 ipr_init_ipr_cmnd(ipr_cmd
);
642 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
643 * @ioa_cfg: ioa config struct
644 * @clr_ints: interrupts to clear
646 * This function masks all interrupts on the adapter, then clears the
647 * interrupts specified in the mask
652 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
655 volatile u32 int_reg
;
657 /* Stop new interrupts */
658 ioa_cfg
->allow_interrupts
= 0;
660 /* Set interrupt mask to stop all new interrupts */
662 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
664 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
666 /* Clear any pending interrupts */
668 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
669 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
670 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
674 * ipr_save_pcix_cmd_reg - Save PCI-X command register
675 * @ioa_cfg: ioa config struct
678 * 0 on success / -EIO on failure
680 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
682 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
684 if (pcix_cmd_reg
== 0)
687 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
688 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
689 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
693 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
698 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
699 * @ioa_cfg: ioa config struct
702 * 0 on success / -EIO on failure
704 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
706 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
709 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
710 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
711 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
720 * ipr_sata_eh_done - done function for aborted SATA commands
721 * @ipr_cmd: ipr command struct
723 * This function is invoked for ops generated to SATA
724 * devices which are being aborted.
729 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
731 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
732 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
733 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
735 qc
->err_mask
|= AC_ERR_OTHER
;
736 sata_port
->ioasa
.status
|= ATA_BUSY
;
737 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
742 * ipr_scsi_eh_done - mid-layer done function for aborted ops
743 * @ipr_cmd: ipr command struct
745 * This function is invoked by the interrupt handler for
746 * ops generated by the SCSI mid-layer which are being aborted.
751 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
753 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
754 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
756 scsi_cmd
->result
|= (DID_ERROR
<< 16);
758 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
759 scsi_cmd
->scsi_done(scsi_cmd
);
760 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
764 * ipr_fail_all_ops - Fails all outstanding ops.
765 * @ioa_cfg: ioa config struct
767 * This function fails all outstanding ops.
772 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
774 struct ipr_cmnd
*ipr_cmd
, *temp
;
777 list_for_each_entry_safe(ipr_cmd
, temp
, &ioa_cfg
->pending_q
, queue
) {
778 list_del(&ipr_cmd
->queue
);
780 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
781 ipr_cmd
->s
.ioasa
.hdr
.ilid
= cpu_to_be32(IPR_DRIVER_ILID
);
783 if (ipr_cmd
->scsi_cmd
)
784 ipr_cmd
->done
= ipr_scsi_eh_done
;
785 else if (ipr_cmd
->qc
)
786 ipr_cmd
->done
= ipr_sata_eh_done
;
788 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, IPR_IOASC_IOA_WAS_RESET
);
789 del_timer(&ipr_cmd
->timer
);
790 ipr_cmd
->done(ipr_cmd
);
797 * ipr_send_command - Send driver initiated requests.
798 * @ipr_cmd: ipr command struct
800 * This function sends a command to the adapter using the correct write call.
801 * In the case of sis64, calculate the ioarcb size required. Then or in the
807 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
809 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
810 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
812 if (ioa_cfg
->sis64
) {
813 /* The default size is 256 bytes */
814 send_dma_addr
|= 0x1;
816 /* If the number of ioadls * size of ioadl > 128 bytes,
817 then use a 512 byte ioarcb */
818 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
819 send_dma_addr
|= 0x4;
820 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
822 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
826 * ipr_do_req - Send driver initiated requests.
827 * @ipr_cmd: ipr command struct
828 * @done: done function
829 * @timeout_func: timeout function
830 * @timeout: timeout value
832 * This function sends the specified command to the adapter with the
833 * timeout given. The done function is invoked on command completion.
838 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
839 void (*done
) (struct ipr_cmnd
*),
840 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
842 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
844 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
846 ipr_cmd
->done
= done
;
848 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
849 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
850 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
852 add_timer(&ipr_cmd
->timer
);
854 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
856 ipr_send_command(ipr_cmd
);
860 * ipr_internal_cmd_done - Op done function for an internally generated op.
861 * @ipr_cmd: ipr command struct
863 * This function is the op done function for an internally generated,
864 * blocking op. It simply wakes the sleeping thread.
869 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
871 if (ipr_cmd
->sibling
)
872 ipr_cmd
->sibling
= NULL
;
874 complete(&ipr_cmd
->completion
);
878 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
879 * @ipr_cmd: ipr command struct
880 * @dma_addr: dma address
881 * @len: transfer length
882 * @flags: ioadl flag value
884 * This function initializes an ioadl in the case where there is only a single
890 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
893 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
894 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
896 ipr_cmd
->dma_use_sg
= 1;
898 if (ipr_cmd
->ioa_cfg
->sis64
) {
899 ioadl64
->flags
= cpu_to_be32(flags
);
900 ioadl64
->data_len
= cpu_to_be32(len
);
901 ioadl64
->address
= cpu_to_be64(dma_addr
);
903 ipr_cmd
->ioarcb
.ioadl_len
=
904 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
905 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
907 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
908 ioadl
->address
= cpu_to_be32(dma_addr
);
910 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
911 ipr_cmd
->ioarcb
.read_ioadl_len
=
912 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
913 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
915 ipr_cmd
->ioarcb
.ioadl_len
=
916 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
917 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
923 * ipr_send_blocking_cmd - Send command and sleep on its completion.
924 * @ipr_cmd: ipr command struct
925 * @timeout_func: function to invoke if command times out
931 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
932 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
935 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
937 init_completion(&ipr_cmd
->completion
);
938 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
940 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
941 wait_for_completion(&ipr_cmd
->completion
);
942 spin_lock_irq(ioa_cfg
->host
->host_lock
);
946 * ipr_send_hcam - Send an HCAM to the adapter.
947 * @ioa_cfg: ioa config struct
949 * @hostrcb: hostrcb struct
951 * This function will send a Host Controlled Async command to the adapter.
952 * If HCAMs are currently not allowed to be issued to the adapter, it will
953 * place the hostrcb on the free queue.
958 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
959 struct ipr_hostrcb
*hostrcb
)
961 struct ipr_cmnd
*ipr_cmd
;
962 struct ipr_ioarcb
*ioarcb
;
964 if (ioa_cfg
->allow_cmds
) {
965 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
966 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
967 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
969 ipr_cmd
->u
.hostrcb
= hostrcb
;
970 ioarcb
= &ipr_cmd
->ioarcb
;
972 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
973 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
974 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
975 ioarcb
->cmd_pkt
.cdb
[1] = type
;
976 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
977 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
979 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
980 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
982 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
983 ipr_cmd
->done
= ipr_process_ccn
;
985 ipr_cmd
->done
= ipr_process_error
;
987 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
989 ipr_send_command(ipr_cmd
);
991 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
996 * ipr_update_ata_class - Update the ata class in the resource entry
997 * @res: resource entry struct
998 * @proto: cfgte device bus protocol value
1003 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1006 case IPR_PROTO_SATA
:
1007 case IPR_PROTO_SAS_STP
:
1008 res
->ata_class
= ATA_DEV_ATA
;
1010 case IPR_PROTO_SATA_ATAPI
:
1011 case IPR_PROTO_SAS_STP_ATAPI
:
1012 res
->ata_class
= ATA_DEV_ATAPI
;
1015 res
->ata_class
= ATA_DEV_UNKNOWN
;
1021 * ipr_init_res_entry - Initialize a resource entry struct.
1022 * @res: resource entry struct
1023 * @cfgtew: config table entry wrapper struct
1028 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1029 struct ipr_config_table_entry_wrapper
*cfgtew
)
1033 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1034 struct ipr_resource_entry
*gscsi_res
= NULL
;
1036 res
->needs_sync_complete
= 0;
1039 res
->del_from_ml
= 0;
1040 res
->resetting_device
= 0;
1042 res
->sata_port
= NULL
;
1044 if (ioa_cfg
->sis64
) {
1045 proto
= cfgtew
->u
.cfgte64
->proto
;
1046 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1047 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1048 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1050 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1051 sizeof(res
->res_path
));
1054 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1055 sizeof(res
->dev_lun
.scsi_lun
));
1056 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1058 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1059 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1060 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1062 res
->target
= gscsi_res
->target
;
1067 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1068 ioa_cfg
->max_devs_supported
);
1069 set_bit(res
->target
, ioa_cfg
->target_ids
);
1071 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1072 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1074 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1075 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1076 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1077 ioa_cfg
->max_devs_supported
);
1078 set_bit(res
->target
, ioa_cfg
->array_ids
);
1079 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1080 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1081 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1082 ioa_cfg
->max_devs_supported
);
1083 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1085 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1086 ioa_cfg
->max_devs_supported
);
1087 set_bit(res
->target
, ioa_cfg
->target_ids
);
1090 proto
= cfgtew
->u
.cfgte
->proto
;
1091 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1092 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1093 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1094 res
->type
= IPR_RES_TYPE_IOAFP
;
1096 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1098 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1099 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1100 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1101 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1104 ipr_update_ata_class(res
, proto
);
1108 * ipr_is_same_device - Determine if two devices are the same.
1109 * @res: resource entry struct
1110 * @cfgtew: config table entry wrapper struct
1113 * 1 if the devices are the same / 0 otherwise
1115 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1116 struct ipr_config_table_entry_wrapper
*cfgtew
)
1118 if (res
->ioa_cfg
->sis64
) {
1119 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1120 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1121 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1122 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1126 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1127 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1128 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1136 * ipr_format_res_path - Format the resource path for printing.
1137 * @res_path: resource path
1143 static char *ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1149 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1150 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1151 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1157 * ipr_update_res_entry - Update the resource entry.
1158 * @res: resource entry struct
1159 * @cfgtew: config table entry wrapper struct
1164 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1165 struct ipr_config_table_entry_wrapper
*cfgtew
)
1167 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1171 if (res
->ioa_cfg
->sis64
) {
1172 res
->flags
= cfgtew
->u
.cfgte64
->flags
;
1173 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1174 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1176 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1177 sizeof(struct ipr_std_inq_data
));
1179 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1180 proto
= cfgtew
->u
.cfgte64
->proto
;
1181 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1182 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1184 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1185 sizeof(res
->dev_lun
.scsi_lun
));
1187 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1188 sizeof(res
->res_path
))) {
1189 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1190 sizeof(res
->res_path
));
1194 if (res
->sdev
&& new_path
)
1195 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1196 ipr_format_res_path(res
->res_path
, buffer
,
1199 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1200 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1201 res
->type
= IPR_RES_TYPE_IOAFP
;
1203 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1205 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1206 sizeof(struct ipr_std_inq_data
));
1208 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1209 proto
= cfgtew
->u
.cfgte
->proto
;
1210 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1213 ipr_update_ata_class(res
, proto
);
1217 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1219 * @res: resource entry struct
1220 * @cfgtew: config table entry wrapper struct
1225 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1227 struct ipr_resource_entry
*gscsi_res
= NULL
;
1228 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1230 if (!ioa_cfg
->sis64
)
1233 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1234 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1235 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1236 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1237 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1238 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1239 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1241 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1243 } else if (res
->bus
== 0)
1244 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1248 * ipr_handle_config_change - Handle a config change from the adapter
1249 * @ioa_cfg: ioa config struct
1255 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1256 struct ipr_hostrcb
*hostrcb
)
1258 struct ipr_resource_entry
*res
= NULL
;
1259 struct ipr_config_table_entry_wrapper cfgtew
;
1260 __be32 cc_res_handle
;
1264 if (ioa_cfg
->sis64
) {
1265 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1266 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1268 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1269 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1272 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1273 if (res
->res_handle
== cc_res_handle
) {
1280 if (list_empty(&ioa_cfg
->free_res_q
)) {
1281 ipr_send_hcam(ioa_cfg
,
1282 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1287 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1288 struct ipr_resource_entry
, queue
);
1290 list_del(&res
->queue
);
1291 ipr_init_res_entry(res
, &cfgtew
);
1292 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1295 ipr_update_res_entry(res
, &cfgtew
);
1297 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1299 res
->del_from_ml
= 1;
1300 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1301 if (ioa_cfg
->allow_ml_add_del
)
1302 schedule_work(&ioa_cfg
->work_q
);
1304 ipr_clear_res_target(res
);
1305 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1307 } else if (!res
->sdev
|| res
->del_from_ml
) {
1309 if (ioa_cfg
->allow_ml_add_del
)
1310 schedule_work(&ioa_cfg
->work_q
);
1313 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1317 * ipr_process_ccn - Op done function for a CCN.
1318 * @ipr_cmd: ipr command struct
1320 * This function is the op done function for a configuration
1321 * change notification host controlled async from the adapter.
1326 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1328 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1329 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1330 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1332 list_del(&hostrcb
->queue
);
1333 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
1336 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
1337 dev_err(&ioa_cfg
->pdev
->dev
,
1338 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1340 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1342 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1347 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1348 * @i: index into buffer
1349 * @buf: string to modify
1351 * This function will strip all trailing whitespace, pad the end
1352 * of the string with a single space, and NULL terminate the string.
1355 * new length of string
1357 static int strip_and_pad_whitespace(int i
, char *buf
)
1359 while (i
&& buf
[i
] == ' ')
1367 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1368 * @prefix: string to print at start of printk
1369 * @hostrcb: hostrcb pointer
1370 * @vpd: vendor/product id/sn struct
1375 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1376 struct ipr_vpd
*vpd
)
1378 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1381 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1382 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1384 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1385 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1387 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1388 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1390 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1394 * ipr_log_vpd - Log the passed VPD to the error log.
1395 * @vpd: vendor/product id/sn struct
1400 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1402 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1403 + IPR_SERIAL_NUM_LEN
];
1405 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1406 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1408 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1409 ipr_err("Vendor/Product ID: %s\n", buffer
);
1411 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1412 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1413 ipr_err(" Serial Number: %s\n", buffer
);
1417 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1418 * @prefix: string to print at start of printk
1419 * @hostrcb: hostrcb pointer
1420 * @vpd: vendor/product id/sn/wwn struct
1425 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1426 struct ipr_ext_vpd
*vpd
)
1428 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1429 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1430 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1434 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1435 * @vpd: vendor/product id/sn/wwn struct
1440 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1442 ipr_log_vpd(&vpd
->vpd
);
1443 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1444 be32_to_cpu(vpd
->wwid
[1]));
1448 * ipr_log_enhanced_cache_error - Log a cache error.
1449 * @ioa_cfg: ioa config struct
1450 * @hostrcb: hostrcb struct
1455 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1456 struct ipr_hostrcb
*hostrcb
)
1458 struct ipr_hostrcb_type_12_error
*error
;
1461 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1463 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1465 ipr_err("-----Current Configuration-----\n");
1466 ipr_err("Cache Directory Card Information:\n");
1467 ipr_log_ext_vpd(&error
->ioa_vpd
);
1468 ipr_err("Adapter Card Information:\n");
1469 ipr_log_ext_vpd(&error
->cfc_vpd
);
1471 ipr_err("-----Expected Configuration-----\n");
1472 ipr_err("Cache Directory Card Information:\n");
1473 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1474 ipr_err("Adapter Card Information:\n");
1475 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1477 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1478 be32_to_cpu(error
->ioa_data
[0]),
1479 be32_to_cpu(error
->ioa_data
[1]),
1480 be32_to_cpu(error
->ioa_data
[2]));
1484 * ipr_log_cache_error - Log a cache error.
1485 * @ioa_cfg: ioa config struct
1486 * @hostrcb: hostrcb struct
1491 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1492 struct ipr_hostrcb
*hostrcb
)
1494 struct ipr_hostrcb_type_02_error
*error
=
1495 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1497 ipr_err("-----Current Configuration-----\n");
1498 ipr_err("Cache Directory Card Information:\n");
1499 ipr_log_vpd(&error
->ioa_vpd
);
1500 ipr_err("Adapter Card Information:\n");
1501 ipr_log_vpd(&error
->cfc_vpd
);
1503 ipr_err("-----Expected Configuration-----\n");
1504 ipr_err("Cache Directory Card Information:\n");
1505 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1506 ipr_err("Adapter Card Information:\n");
1507 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1509 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1510 be32_to_cpu(error
->ioa_data
[0]),
1511 be32_to_cpu(error
->ioa_data
[1]),
1512 be32_to_cpu(error
->ioa_data
[2]));
1516 * ipr_log_enhanced_config_error - Log a configuration error.
1517 * @ioa_cfg: ioa config struct
1518 * @hostrcb: hostrcb struct
1523 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1524 struct ipr_hostrcb
*hostrcb
)
1526 int errors_logged
, i
;
1527 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1528 struct ipr_hostrcb_type_13_error
*error
;
1530 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1531 errors_logged
= be32_to_cpu(error
->errors_logged
);
1533 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1534 be32_to_cpu(error
->errors_detected
), errors_logged
);
1536 dev_entry
= error
->dev
;
1538 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1541 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1542 ipr_log_ext_vpd(&dev_entry
->vpd
);
1544 ipr_err("-----New Device Information-----\n");
1545 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1547 ipr_err("Cache Directory Card Information:\n");
1548 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1550 ipr_err("Adapter Card Information:\n");
1551 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1556 * ipr_log_sis64_config_error - Log a device error.
1557 * @ioa_cfg: ioa config struct
1558 * @hostrcb: hostrcb struct
1563 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1564 struct ipr_hostrcb
*hostrcb
)
1566 int errors_logged
, i
;
1567 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1568 struct ipr_hostrcb_type_23_error
*error
;
1569 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1571 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1572 errors_logged
= be32_to_cpu(error
->errors_logged
);
1574 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1575 be32_to_cpu(error
->errors_detected
), errors_logged
);
1577 dev_entry
= error
->dev
;
1579 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1582 ipr_err("Device %d : %s", i
+ 1,
1583 ipr_format_res_path(dev_entry
->res_path
, buffer
,
1585 ipr_log_ext_vpd(&dev_entry
->vpd
);
1587 ipr_err("-----New Device Information-----\n");
1588 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1590 ipr_err("Cache Directory Card Information:\n");
1591 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1593 ipr_err("Adapter Card Information:\n");
1594 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1599 * ipr_log_config_error - Log a configuration error.
1600 * @ioa_cfg: ioa config struct
1601 * @hostrcb: hostrcb struct
1606 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1607 struct ipr_hostrcb
*hostrcb
)
1609 int errors_logged
, i
;
1610 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1611 struct ipr_hostrcb_type_03_error
*error
;
1613 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1614 errors_logged
= be32_to_cpu(error
->errors_logged
);
1616 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1617 be32_to_cpu(error
->errors_detected
), errors_logged
);
1619 dev_entry
= error
->dev
;
1621 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1624 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1625 ipr_log_vpd(&dev_entry
->vpd
);
1627 ipr_err("-----New Device Information-----\n");
1628 ipr_log_vpd(&dev_entry
->new_vpd
);
1630 ipr_err("Cache Directory Card Information:\n");
1631 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1633 ipr_err("Adapter Card Information:\n");
1634 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1636 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1637 be32_to_cpu(dev_entry
->ioa_data
[0]),
1638 be32_to_cpu(dev_entry
->ioa_data
[1]),
1639 be32_to_cpu(dev_entry
->ioa_data
[2]),
1640 be32_to_cpu(dev_entry
->ioa_data
[3]),
1641 be32_to_cpu(dev_entry
->ioa_data
[4]));
1646 * ipr_log_enhanced_array_error - Log an array configuration error.
1647 * @ioa_cfg: ioa config struct
1648 * @hostrcb: hostrcb struct
1653 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1654 struct ipr_hostrcb
*hostrcb
)
1657 struct ipr_hostrcb_type_14_error
*error
;
1658 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1659 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1661 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1665 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1666 error
->protection_level
,
1667 ioa_cfg
->host
->host_no
,
1668 error
->last_func_vset_res_addr
.bus
,
1669 error
->last_func_vset_res_addr
.target
,
1670 error
->last_func_vset_res_addr
.lun
);
1674 array_entry
= error
->array_member
;
1675 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1676 ARRAY_SIZE(error
->array_member
));
1678 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1679 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1682 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1683 ipr_err("Exposed Array Member %d:\n", i
);
1685 ipr_err("Array Member %d:\n", i
);
1687 ipr_log_ext_vpd(&array_entry
->vpd
);
1688 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1689 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1690 "Expected Location");
1697 * ipr_log_array_error - Log an array configuration error.
1698 * @ioa_cfg: ioa config struct
1699 * @hostrcb: hostrcb struct
1704 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1705 struct ipr_hostrcb
*hostrcb
)
1708 struct ipr_hostrcb_type_04_error
*error
;
1709 struct ipr_hostrcb_array_data_entry
*array_entry
;
1710 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1712 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1716 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1717 error
->protection_level
,
1718 ioa_cfg
->host
->host_no
,
1719 error
->last_func_vset_res_addr
.bus
,
1720 error
->last_func_vset_res_addr
.target
,
1721 error
->last_func_vset_res_addr
.lun
);
1725 array_entry
= error
->array_member
;
1727 for (i
= 0; i
< 18; i
++) {
1728 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1731 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1732 ipr_err("Exposed Array Member %d:\n", i
);
1734 ipr_err("Array Member %d:\n", i
);
1736 ipr_log_vpd(&array_entry
->vpd
);
1738 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1739 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1740 "Expected Location");
1745 array_entry
= error
->array_member2
;
1752 * ipr_log_hex_data - Log additional hex IOA error data.
1753 * @ioa_cfg: ioa config struct
1754 * @data: IOA error data
1760 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, u32
*data
, int len
)
1767 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1768 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1770 for (i
= 0; i
< len
/ 4; i
+= 4) {
1771 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1772 be32_to_cpu(data
[i
]),
1773 be32_to_cpu(data
[i
+1]),
1774 be32_to_cpu(data
[i
+2]),
1775 be32_to_cpu(data
[i
+3]));
1780 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1781 * @ioa_cfg: ioa config struct
1782 * @hostrcb: hostrcb struct
1787 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1788 struct ipr_hostrcb
*hostrcb
)
1790 struct ipr_hostrcb_type_17_error
*error
;
1793 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1795 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1797 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1798 strim(error
->failure_reason
);
1800 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1801 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1802 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1803 ipr_log_hex_data(ioa_cfg
, error
->data
,
1804 be32_to_cpu(hostrcb
->hcam
.length
) -
1805 (offsetof(struct ipr_hostrcb_error
, u
) +
1806 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1810 * ipr_log_dual_ioa_error - Log a dual adapter error.
1811 * @ioa_cfg: ioa config struct
1812 * @hostrcb: hostrcb struct
1817 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1818 struct ipr_hostrcb
*hostrcb
)
1820 struct ipr_hostrcb_type_07_error
*error
;
1822 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1823 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1824 strim(error
->failure_reason
);
1826 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1827 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1828 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1829 ipr_log_hex_data(ioa_cfg
, error
->data
,
1830 be32_to_cpu(hostrcb
->hcam
.length
) -
1831 (offsetof(struct ipr_hostrcb_error
, u
) +
1832 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1835 static const struct {
1838 } path_active_desc
[] = {
1839 { IPR_PATH_NO_INFO
, "Path" },
1840 { IPR_PATH_ACTIVE
, "Active path" },
1841 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
1844 static const struct {
1847 } path_state_desc
[] = {
1848 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
1849 { IPR_PATH_HEALTHY
, "is healthy" },
1850 { IPR_PATH_DEGRADED
, "is degraded" },
1851 { IPR_PATH_FAILED
, "is failed" }
1855 * ipr_log_fabric_path - Log a fabric path error
1856 * @hostrcb: hostrcb struct
1857 * @fabric: fabric descriptor
1862 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
1863 struct ipr_hostrcb_fabric_desc
*fabric
)
1866 u8 path_state
= fabric
->path_state
;
1867 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
1868 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
1870 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
1871 if (path_active_desc
[i
].active
!= active
)
1874 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
1875 if (path_state_desc
[j
].state
!= state
)
1878 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
1879 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
1880 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1882 } else if (fabric
->cascaded_expander
== 0xff) {
1883 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
1884 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1885 fabric
->ioa_port
, fabric
->phy
);
1886 } else if (fabric
->phy
== 0xff) {
1887 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
1888 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1889 fabric
->ioa_port
, fabric
->cascaded_expander
);
1891 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1892 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1893 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
1899 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
1900 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
1904 * ipr_log64_fabric_path - Log a fabric path error
1905 * @hostrcb: hostrcb struct
1906 * @fabric: fabric descriptor
1911 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
1912 struct ipr_hostrcb64_fabric_desc
*fabric
)
1915 u8 path_state
= fabric
->path_state
;
1916 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
1917 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
1918 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1920 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
1921 if (path_active_desc
[i
].active
!= active
)
1924 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
1925 if (path_state_desc
[j
].state
!= state
)
1928 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
1929 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1930 ipr_format_res_path(fabric
->res_path
, buffer
,
1936 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
1937 ipr_format_res_path(fabric
->res_path
, buffer
, sizeof(buffer
)));
1940 static const struct {
1943 } path_type_desc
[] = {
1944 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
1945 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
1946 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
1947 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
1950 static const struct {
1953 } path_status_desc
[] = {
1954 { IPR_PATH_CFG_NO_PROB
, "Functional" },
1955 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
1956 { IPR_PATH_CFG_FAILED
, "Failed" },
1957 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
1958 { IPR_PATH_NOT_DETECTED
, "Missing" },
1959 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
1962 static const char *link_rate
[] = {
1965 "phy reset problem",
1982 * ipr_log_path_elem - Log a fabric path element.
1983 * @hostrcb: hostrcb struct
1984 * @cfg: fabric path element struct
1989 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
1990 struct ipr_hostrcb_config_element
*cfg
)
1993 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
1994 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
1996 if (type
== IPR_PATH_CFG_NOT_EXIST
)
1999 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2000 if (path_type_desc
[i
].type
!= type
)
2003 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2004 if (path_status_desc
[j
].status
!= status
)
2007 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2008 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2009 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2010 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2011 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2013 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2014 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2015 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2016 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2017 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2018 } else if (cfg
->cascaded_expander
== 0xff) {
2019 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2020 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2021 path_type_desc
[i
].desc
, cfg
->phy
,
2022 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2023 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2024 } else if (cfg
->phy
== 0xff) {
2025 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2026 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2027 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2028 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2029 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2031 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2032 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2033 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2034 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2035 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2042 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2043 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2044 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2045 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2049 * ipr_log64_path_elem - Log a fabric path element.
2050 * @hostrcb: hostrcb struct
2051 * @cfg: fabric path element struct
2056 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2057 struct ipr_hostrcb64_config_element
*cfg
)
2060 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2061 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2062 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2063 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2065 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2068 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2069 if (path_type_desc
[i
].type
!= type
)
2072 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2073 if (path_status_desc
[j
].status
!= status
)
2076 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2077 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2078 ipr_format_res_path(cfg
->res_path
, buffer
,
2080 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2081 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2085 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2086 "WWN=%08X%08X\n", cfg
->type_status
,
2087 ipr_format_res_path(cfg
->res_path
, buffer
, sizeof(buffer
)),
2088 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2089 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2093 * ipr_log_fabric_error - Log a fabric error.
2094 * @ioa_cfg: ioa config struct
2095 * @hostrcb: hostrcb struct
2100 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2101 struct ipr_hostrcb
*hostrcb
)
2103 struct ipr_hostrcb_type_20_error
*error
;
2104 struct ipr_hostrcb_fabric_desc
*fabric
;
2105 struct ipr_hostrcb_config_element
*cfg
;
2108 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2109 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2110 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2112 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2113 (offsetof(struct ipr_hostrcb_error
, u
) +
2114 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2116 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2117 ipr_log_fabric_path(hostrcb
, fabric
);
2118 for_each_fabric_cfg(fabric
, cfg
)
2119 ipr_log_path_elem(hostrcb
, cfg
);
2121 add_len
-= be16_to_cpu(fabric
->length
);
2122 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2123 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2126 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2130 * ipr_log_sis64_array_error - Log a sis64 array error.
2131 * @ioa_cfg: ioa config struct
2132 * @hostrcb: hostrcb struct
2137 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2138 struct ipr_hostrcb
*hostrcb
)
2141 struct ipr_hostrcb_type_24_error
*error
;
2142 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2143 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2144 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2146 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2150 ipr_err("RAID %s Array Configuration: %s\n",
2151 error
->protection_level
,
2152 ipr_format_res_path(error
->last_res_path
, buffer
, sizeof(buffer
)));
2156 array_entry
= error
->array_member
;
2157 num_entries
= min_t(u32
, error
->num_entries
,
2158 ARRAY_SIZE(error
->array_member
));
2160 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2162 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2165 if (error
->exposed_mode_adn
== i
)
2166 ipr_err("Exposed Array Member %d:\n", i
);
2168 ipr_err("Array Member %d:\n", i
);
2170 ipr_err("Array Member %d:\n", i
);
2171 ipr_log_ext_vpd(&array_entry
->vpd
);
2172 ipr_err("Current Location: %s\n",
2173 ipr_format_res_path(array_entry
->res_path
, buffer
,
2175 ipr_err("Expected Location: %s\n",
2176 ipr_format_res_path(array_entry
->expected_res_path
,
2177 buffer
, sizeof(buffer
)));
2184 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2185 * @ioa_cfg: ioa config struct
2186 * @hostrcb: hostrcb struct
2191 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2192 struct ipr_hostrcb
*hostrcb
)
2194 struct ipr_hostrcb_type_30_error
*error
;
2195 struct ipr_hostrcb64_fabric_desc
*fabric
;
2196 struct ipr_hostrcb64_config_element
*cfg
;
2199 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2201 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2202 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2204 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2205 (offsetof(struct ipr_hostrcb64_error
, u
) +
2206 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2208 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2209 ipr_log64_fabric_path(hostrcb
, fabric
);
2210 for_each_fabric_cfg(fabric
, cfg
)
2211 ipr_log64_path_elem(hostrcb
, cfg
);
2213 add_len
-= be16_to_cpu(fabric
->length
);
2214 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2215 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2218 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2222 * ipr_log_generic_error - Log an adapter error.
2223 * @ioa_cfg: ioa config struct
2224 * @hostrcb: hostrcb struct
2229 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2230 struct ipr_hostrcb
*hostrcb
)
2232 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2233 be32_to_cpu(hostrcb
->hcam
.length
));
2237 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2240 * This function will return the index of into the ipr_error_table
2241 * for the specified IOASC. If the IOASC is not in the table,
2242 * 0 will be returned, which points to the entry used for unknown errors.
2245 * index into the ipr_error_table
2247 static u32
ipr_get_error(u32 ioasc
)
2251 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2252 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2259 * ipr_handle_log_data - Log an adapter error.
2260 * @ioa_cfg: ioa config struct
2261 * @hostrcb: hostrcb struct
2263 * This function logs an adapter error to the system.
2268 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2269 struct ipr_hostrcb
*hostrcb
)
2274 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2277 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2278 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2281 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2283 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2285 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2286 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2287 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2288 scsi_report_bus_reset(ioa_cfg
->host
,
2289 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2292 error_index
= ipr_get_error(ioasc
);
2294 if (!ipr_error_table
[error_index
].log_hcam
)
2297 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2299 /* Set indication we have logged an error */
2300 ioa_cfg
->errors_logged
++;
2302 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2304 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2305 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2307 switch (hostrcb
->hcam
.overlay_id
) {
2308 case IPR_HOST_RCB_OVERLAY_ID_2
:
2309 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2311 case IPR_HOST_RCB_OVERLAY_ID_3
:
2312 ipr_log_config_error(ioa_cfg
, hostrcb
);
2314 case IPR_HOST_RCB_OVERLAY_ID_4
:
2315 case IPR_HOST_RCB_OVERLAY_ID_6
:
2316 ipr_log_array_error(ioa_cfg
, hostrcb
);
2318 case IPR_HOST_RCB_OVERLAY_ID_7
:
2319 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2321 case IPR_HOST_RCB_OVERLAY_ID_12
:
2322 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2324 case IPR_HOST_RCB_OVERLAY_ID_13
:
2325 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2327 case IPR_HOST_RCB_OVERLAY_ID_14
:
2328 case IPR_HOST_RCB_OVERLAY_ID_16
:
2329 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2331 case IPR_HOST_RCB_OVERLAY_ID_17
:
2332 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2334 case IPR_HOST_RCB_OVERLAY_ID_20
:
2335 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2337 case IPR_HOST_RCB_OVERLAY_ID_23
:
2338 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2340 case IPR_HOST_RCB_OVERLAY_ID_24
:
2341 case IPR_HOST_RCB_OVERLAY_ID_26
:
2342 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2344 case IPR_HOST_RCB_OVERLAY_ID_30
:
2345 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2347 case IPR_HOST_RCB_OVERLAY_ID_1
:
2348 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2350 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2356 * ipr_process_error - Op done function for an adapter error log.
2357 * @ipr_cmd: ipr command struct
2359 * This function is the op done function for an error log host
2360 * controlled async from the adapter. It will log the error and
2361 * send the HCAM back to the adapter.
2366 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2368 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2369 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2370 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2374 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2376 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2378 list_del(&hostrcb
->queue
);
2379 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
2382 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2383 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2384 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2385 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
2386 dev_err(&ioa_cfg
->pdev
->dev
,
2387 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2390 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2394 * ipr_timeout - An internally generated op has timed out.
2395 * @ipr_cmd: ipr command struct
2397 * This function blocks host requests and initiates an
2403 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
2405 unsigned long lock_flags
= 0;
2406 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2409 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2411 ioa_cfg
->errors_logged
++;
2412 dev_err(&ioa_cfg
->pdev
->dev
,
2413 "Adapter being reset due to command timeout.\n");
2415 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2416 ioa_cfg
->sdt_state
= GET_DUMP
;
2418 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2419 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2421 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2426 * ipr_oper_timeout - Adapter timed out transitioning to operational
2427 * @ipr_cmd: ipr command struct
2429 * This function blocks host requests and initiates an
2435 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
2437 unsigned long lock_flags
= 0;
2438 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2441 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2443 ioa_cfg
->errors_logged
++;
2444 dev_err(&ioa_cfg
->pdev
->dev
,
2445 "Adapter timed out transitioning to operational.\n");
2447 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2448 ioa_cfg
->sdt_state
= GET_DUMP
;
2450 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2452 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2453 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2456 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2461 * ipr_reset_reload - Reset/Reload the IOA
2462 * @ioa_cfg: ioa config struct
2463 * @shutdown_type: shutdown type
2465 * This function resets the adapter and re-initializes it.
2466 * This function assumes that all new host commands have been stopped.
2470 static int ipr_reset_reload(struct ipr_ioa_cfg
*ioa_cfg
,
2471 enum ipr_shutdown_type shutdown_type
)
2473 if (!ioa_cfg
->in_reset_reload
)
2474 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
2476 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
2477 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2478 spin_lock_irq(ioa_cfg
->host
->host_lock
);
2480 /* If we got hit with a host reset while we were already resetting
2481 the adapter for some reason, and the reset failed. */
2482 if (ioa_cfg
->ioa_is_dead
) {
2491 * ipr_find_ses_entry - Find matching SES in SES table
2492 * @res: resource entry struct of SES
2495 * pointer to SES table entry / NULL on failure
2497 static const struct ipr_ses_table_entry
*
2498 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2501 struct ipr_std_inq_vpids
*vpids
;
2502 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2504 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2505 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2506 if (ste
->compare_product_id_byte
[j
] == 'X') {
2507 vpids
= &res
->std_inq_data
.vpids
;
2508 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2516 if (matches
== IPR_PROD_ID_LEN
)
2524 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2525 * @ioa_cfg: ioa config struct
2527 * @bus_width: bus width
2530 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2531 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2532 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2533 * max 160MHz = max 320MB/sec).
2535 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2537 struct ipr_resource_entry
*res
;
2538 const struct ipr_ses_table_entry
*ste
;
2539 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2541 /* Loop through each config table entry in the config table buffer */
2542 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2543 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2546 if (bus
!= res
->bus
)
2549 if (!(ste
= ipr_find_ses_entry(res
)))
2552 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2555 return max_xfer_rate
;
2559 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2560 * @ioa_cfg: ioa config struct
2561 * @max_delay: max delay in micro-seconds to wait
2563 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2566 * 0 on success / other on failure
2568 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2570 volatile u32 pcii_reg
;
2573 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2574 while (delay
< max_delay
) {
2575 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2577 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2580 /* udelay cannot be used if delay is more than a few milliseconds */
2581 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2582 mdelay(delay
/ 1000);
2592 * ipr_get_sis64_dump_data_section - Dump IOA memory
2593 * @ioa_cfg: ioa config struct
2594 * @start_addr: adapter address to dump
2595 * @dest: destination kernel buffer
2596 * @length_in_words: length to dump in 4 byte words
2601 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2603 __be32
*dest
, u32 length_in_words
)
2607 for (i
= 0; i
< length_in_words
; i
++) {
2608 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2609 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2617 * ipr_get_ldump_data_section - Dump IOA memory
2618 * @ioa_cfg: ioa config struct
2619 * @start_addr: adapter address to dump
2620 * @dest: destination kernel buffer
2621 * @length_in_words: length to dump in 4 byte words
2624 * 0 on success / -EIO on failure
2626 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2628 __be32
*dest
, u32 length_in_words
)
2630 volatile u32 temp_pcii_reg
;
2634 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2635 dest
, length_in_words
);
2637 /* Write IOA interrupt reg starting LDUMP state */
2638 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2639 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2641 /* Wait for IO debug acknowledge */
2642 if (ipr_wait_iodbg_ack(ioa_cfg
,
2643 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2644 dev_err(&ioa_cfg
->pdev
->dev
,
2645 "IOA dump long data transfer timeout\n");
2649 /* Signal LDUMP interlocked - clear IO debug ack */
2650 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2651 ioa_cfg
->regs
.clr_interrupt_reg
);
2653 /* Write Mailbox with starting address */
2654 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2656 /* Signal address valid - clear IOA Reset alert */
2657 writel(IPR_UPROCI_RESET_ALERT
,
2658 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2660 for (i
= 0; i
< length_in_words
; i
++) {
2661 /* Wait for IO debug acknowledge */
2662 if (ipr_wait_iodbg_ack(ioa_cfg
,
2663 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2664 dev_err(&ioa_cfg
->pdev
->dev
,
2665 "IOA dump short data transfer timeout\n");
2669 /* Read data from mailbox and increment destination pointer */
2670 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2673 /* For all but the last word of data, signal data received */
2674 if (i
< (length_in_words
- 1)) {
2675 /* Signal dump data received - Clear IO debug Ack */
2676 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2677 ioa_cfg
->regs
.clr_interrupt_reg
);
2681 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2682 writel(IPR_UPROCI_RESET_ALERT
,
2683 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2685 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2686 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2688 /* Signal dump data received - Clear IO debug Ack */
2689 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2690 ioa_cfg
->regs
.clr_interrupt_reg
);
2692 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2693 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2695 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2697 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2707 #ifdef CONFIG_SCSI_IPR_DUMP
2709 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2710 * @ioa_cfg: ioa config struct
2711 * @pci_address: adapter address
2712 * @length: length of data to copy
2714 * Copy data from PCI adapter to kernel buffer.
2715 * Note: length MUST be a 4 byte multiple
2717 * 0 on success / other on failure
2719 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2720 unsigned long pci_address
, u32 length
)
2722 int bytes_copied
= 0;
2723 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2725 unsigned long lock_flags
= 0;
2726 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2729 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2731 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2733 while (bytes_copied
< length
&&
2734 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2735 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2736 ioa_dump
->page_offset
== 0) {
2737 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2741 return bytes_copied
;
2744 ioa_dump
->page_offset
= 0;
2745 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2746 ioa_dump
->next_page_index
++;
2748 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2750 rem_len
= length
- bytes_copied
;
2751 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2752 cur_len
= min(rem_len
, rem_page_len
);
2754 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2755 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2758 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2759 pci_address
+ bytes_copied
,
2760 &page
[ioa_dump
->page_offset
/ 4],
2761 (cur_len
/ sizeof(u32
)));
2763 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2766 ioa_dump
->page_offset
+= cur_len
;
2767 bytes_copied
+= cur_len
;
2775 return bytes_copied
;
2779 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2780 * @hdr: dump entry header struct
2785 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
2787 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
2789 hdr
->offset
= sizeof(*hdr
);
2790 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
2794 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2795 * @ioa_cfg: ioa config struct
2796 * @driver_dump: driver dump struct
2801 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
2802 struct ipr_driver_dump
*driver_dump
)
2804 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2806 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
2807 driver_dump
->ioa_type_entry
.hdr
.len
=
2808 sizeof(struct ipr_dump_ioa_type_entry
) -
2809 sizeof(struct ipr_dump_entry_header
);
2810 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2811 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
2812 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
2813 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
2814 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
2815 ucode_vpd
->minor_release
[1];
2816 driver_dump
->hdr
.num_entries
++;
2820 * ipr_dump_version_data - Fill in the driver version in the dump.
2821 * @ioa_cfg: ioa config struct
2822 * @driver_dump: driver dump struct
2827 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
2828 struct ipr_driver_dump
*driver_dump
)
2830 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
2831 driver_dump
->version_entry
.hdr
.len
=
2832 sizeof(struct ipr_dump_version_entry
) -
2833 sizeof(struct ipr_dump_entry_header
);
2834 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2835 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
2836 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
2837 driver_dump
->hdr
.num_entries
++;
2841 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2842 * @ioa_cfg: ioa config struct
2843 * @driver_dump: driver dump struct
2848 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
2849 struct ipr_driver_dump
*driver_dump
)
2851 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
2852 driver_dump
->trace_entry
.hdr
.len
=
2853 sizeof(struct ipr_dump_trace_entry
) -
2854 sizeof(struct ipr_dump_entry_header
);
2855 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2856 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
2857 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
2858 driver_dump
->hdr
.num_entries
++;
2862 * ipr_dump_location_data - Fill in the IOA location in the dump.
2863 * @ioa_cfg: ioa config struct
2864 * @driver_dump: driver dump struct
2869 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
2870 struct ipr_driver_dump
*driver_dump
)
2872 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
2873 driver_dump
->location_entry
.hdr
.len
=
2874 sizeof(struct ipr_dump_location_entry
) -
2875 sizeof(struct ipr_dump_entry_header
);
2876 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2877 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
2878 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
2879 driver_dump
->hdr
.num_entries
++;
2883 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2884 * @ioa_cfg: ioa config struct
2885 * @dump: dump struct
2890 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
2892 unsigned long start_addr
, sdt_word
;
2893 unsigned long lock_flags
= 0;
2894 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
2895 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
2896 u32 num_entries
, max_num_entries
, start_off
, end_off
;
2897 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
2898 struct ipr_sdt
*sdt
;
2904 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2906 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
2907 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2911 if (ioa_cfg
->sis64
) {
2912 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2913 ssleep(IPR_DUMP_DELAY_SECONDS
);
2914 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2917 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
2919 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
2920 dev_err(&ioa_cfg
->pdev
->dev
,
2921 "Invalid dump table format: %lx\n", start_addr
);
2922 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2926 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
2928 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
2930 /* Initialize the overall dump header */
2931 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
2932 driver_dump
->hdr
.num_entries
= 1;
2933 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
2934 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
2935 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
2936 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
2938 ipr_dump_version_data(ioa_cfg
, driver_dump
);
2939 ipr_dump_location_data(ioa_cfg
, driver_dump
);
2940 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
2941 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
2943 /* Update dump_header */
2944 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
2946 /* IOA Dump entry */
2947 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
2948 ioa_dump
->hdr
.len
= 0;
2949 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2950 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
2952 /* First entries in sdt are actually a list of dump addresses and
2953 lengths to gather the real dump data. sdt represents the pointer
2954 to the ioa generated dump table. Dump data will be extracted based
2955 on entries in this table */
2956 sdt
= &ioa_dump
->sdt
;
2958 if (ioa_cfg
->sis64
) {
2959 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
2960 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2962 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
2963 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2966 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
2967 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
2968 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
2969 bytes_to_copy
/ sizeof(__be32
));
2971 /* Smart Dump table is ready to use and the first entry is valid */
2972 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
2973 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
2974 dev_err(&ioa_cfg
->pdev
->dev
,
2975 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2976 rc
, be32_to_cpu(sdt
->hdr
.state
));
2977 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
2978 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
2979 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2983 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
2985 if (num_entries
> max_num_entries
)
2986 num_entries
= max_num_entries
;
2988 /* Update dump length to the actual data to be copied */
2989 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
2991 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
2993 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
2995 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2997 for (i
= 0; i
< num_entries
; i
++) {
2998 if (ioa_dump
->hdr
.len
> max_dump_size
) {
2999 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3003 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3004 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3006 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3008 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3009 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3011 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3012 bytes_to_copy
= end_off
- start_off
;
3017 if (bytes_to_copy
> max_dump_size
) {
3018 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3022 /* Copy data from adapter to driver buffers */
3023 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3026 ioa_dump
->hdr
.len
+= bytes_copied
;
3028 if (bytes_copied
!= bytes_to_copy
) {
3029 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3036 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3038 /* Update dump_header */
3039 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3041 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3046 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3050 * ipr_release_dump - Free adapter dump memory
3051 * @kref: kref struct
3056 static void ipr_release_dump(struct kref
*kref
)
3058 struct ipr_dump
*dump
= container_of(kref
,struct ipr_dump
,kref
);
3059 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3060 unsigned long lock_flags
= 0;
3064 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3065 ioa_cfg
->dump
= NULL
;
3066 ioa_cfg
->sdt_state
= INACTIVE
;
3067 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3069 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3070 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3072 vfree(dump
->ioa_dump
.ioa_data
);
3078 * ipr_worker_thread - Worker thread
3079 * @work: ioa config struct
3081 * Called at task level from a work thread. This function takes care
3082 * of adding and removing device from the mid-layer as configuration
3083 * changes are detected by the adapter.
3088 static void ipr_worker_thread(struct work_struct
*work
)
3090 unsigned long lock_flags
;
3091 struct ipr_resource_entry
*res
;
3092 struct scsi_device
*sdev
;
3093 struct ipr_dump
*dump
;
3094 struct ipr_ioa_cfg
*ioa_cfg
=
3095 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3096 u8 bus
, target
, lun
;
3100 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3102 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3103 dump
= ioa_cfg
->dump
;
3105 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3108 kref_get(&dump
->kref
);
3109 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3110 ipr_get_ioa_dump(ioa_cfg
, dump
);
3111 kref_put(&dump
->kref
, ipr_release_dump
);
3113 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3114 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3115 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3116 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3123 if (!ioa_cfg
->allow_cmds
|| !ioa_cfg
->allow_ml_add_del
) {
3124 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3128 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3129 if (res
->del_from_ml
&& res
->sdev
) {
3132 if (!scsi_device_get(sdev
)) {
3133 if (!res
->add_to_ml
)
3134 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3136 res
->del_from_ml
= 0;
3137 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3138 scsi_remove_device(sdev
);
3139 scsi_device_put(sdev
);
3140 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3147 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3148 if (res
->add_to_ml
) {
3150 target
= res
->target
;
3153 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3154 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3155 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3160 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3161 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3165 #ifdef CONFIG_SCSI_IPR_TRACE
3167 * ipr_read_trace - Dump the adapter trace
3168 * @filp: open sysfs file
3169 * @kobj: kobject struct
3170 * @bin_attr: bin_attribute struct
3173 * @count: buffer size
3176 * number of bytes printed to buffer
3178 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3179 struct bin_attribute
*bin_attr
,
3180 char *buf
, loff_t off
, size_t count
)
3182 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3183 struct Scsi_Host
*shost
= class_to_shost(dev
);
3184 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3185 unsigned long lock_flags
= 0;
3188 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3189 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3191 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3196 static struct bin_attribute ipr_trace_attr
= {
3202 .read
= ipr_read_trace
,
3207 * ipr_show_fw_version - Show the firmware version
3208 * @dev: class device struct
3212 * number of bytes printed to buffer
3214 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3215 struct device_attribute
*attr
, char *buf
)
3217 struct Scsi_Host
*shost
= class_to_shost(dev
);
3218 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3219 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3220 unsigned long lock_flags
= 0;
3223 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3224 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3225 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3226 ucode_vpd
->minor_release
[0],
3227 ucode_vpd
->minor_release
[1]);
3228 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3232 static struct device_attribute ipr_fw_version_attr
= {
3234 .name
= "fw_version",
3237 .show
= ipr_show_fw_version
,
3241 * ipr_show_log_level - Show the adapter's error logging level
3242 * @dev: class device struct
3246 * number of bytes printed to buffer
3248 static ssize_t
ipr_show_log_level(struct device
*dev
,
3249 struct device_attribute
*attr
, char *buf
)
3251 struct Scsi_Host
*shost
= class_to_shost(dev
);
3252 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3253 unsigned long lock_flags
= 0;
3256 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3257 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3258 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3263 * ipr_store_log_level - Change the adapter's error logging level
3264 * @dev: class device struct
3268 * number of bytes printed to buffer
3270 static ssize_t
ipr_store_log_level(struct device
*dev
,
3271 struct device_attribute
*attr
,
3272 const char *buf
, size_t count
)
3274 struct Scsi_Host
*shost
= class_to_shost(dev
);
3275 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3276 unsigned long lock_flags
= 0;
3278 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3279 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3280 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3284 static struct device_attribute ipr_log_level_attr
= {
3286 .name
= "log_level",
3287 .mode
= S_IRUGO
| S_IWUSR
,
3289 .show
= ipr_show_log_level
,
3290 .store
= ipr_store_log_level
3294 * ipr_store_diagnostics - IOA Diagnostics interface
3295 * @dev: device struct
3297 * @count: buffer size
3299 * This function will reset the adapter and wait a reasonable
3300 * amount of time for any errors that the adapter might log.
3303 * count on success / other on failure
3305 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3306 struct device_attribute
*attr
,
3307 const char *buf
, size_t count
)
3309 struct Scsi_Host
*shost
= class_to_shost(dev
);
3310 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3311 unsigned long lock_flags
= 0;
3314 if (!capable(CAP_SYS_ADMIN
))
3317 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3318 while(ioa_cfg
->in_reset_reload
) {
3319 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3320 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3321 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3324 ioa_cfg
->errors_logged
= 0;
3325 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3327 if (ioa_cfg
->in_reset_reload
) {
3328 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3329 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3331 /* Wait for a second for any errors to be logged */
3334 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3338 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3339 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3341 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3346 static struct device_attribute ipr_diagnostics_attr
= {
3348 .name
= "run_diagnostics",
3351 .store
= ipr_store_diagnostics
3355 * ipr_show_adapter_state - Show the adapter's state
3356 * @class_dev: device struct
3360 * number of bytes printed to buffer
3362 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3363 struct device_attribute
*attr
, char *buf
)
3365 struct Scsi_Host
*shost
= class_to_shost(dev
);
3366 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3367 unsigned long lock_flags
= 0;
3370 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3371 if (ioa_cfg
->ioa_is_dead
)
3372 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3374 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3375 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3380 * ipr_store_adapter_state - Change adapter state
3381 * @dev: device struct
3383 * @count: buffer size
3385 * This function will change the adapter's state.
3388 * count on success / other on failure
3390 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3391 struct device_attribute
*attr
,
3392 const char *buf
, size_t count
)
3394 struct Scsi_Host
*shost
= class_to_shost(dev
);
3395 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3396 unsigned long lock_flags
;
3399 if (!capable(CAP_SYS_ADMIN
))
3402 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3403 if (ioa_cfg
->ioa_is_dead
&& !strncmp(buf
, "online", 6)) {
3404 ioa_cfg
->ioa_is_dead
= 0;
3405 ioa_cfg
->reset_retries
= 0;
3406 ioa_cfg
->in_ioa_bringdown
= 0;
3407 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3409 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3410 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3415 static struct device_attribute ipr_ioa_state_attr
= {
3417 .name
= "online_state",
3418 .mode
= S_IRUGO
| S_IWUSR
,
3420 .show
= ipr_show_adapter_state
,
3421 .store
= ipr_store_adapter_state
3425 * ipr_store_reset_adapter - Reset the adapter
3426 * @dev: device struct
3428 * @count: buffer size
3430 * This function will reset the adapter.
3433 * count on success / other on failure
3435 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3436 struct device_attribute
*attr
,
3437 const char *buf
, size_t count
)
3439 struct Scsi_Host
*shost
= class_to_shost(dev
);
3440 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3441 unsigned long lock_flags
;
3444 if (!capable(CAP_SYS_ADMIN
))
3447 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3448 if (!ioa_cfg
->in_reset_reload
)
3449 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3450 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3451 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3456 static struct device_attribute ipr_ioa_reset_attr
= {
3458 .name
= "reset_host",
3461 .store
= ipr_store_reset_adapter
3465 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3466 * @buf_len: buffer length
3468 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3469 * list to use for microcode download
3472 * pointer to sglist / NULL on failure
3474 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3476 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
3477 struct ipr_sglist
*sglist
;
3478 struct scatterlist
*scatterlist
;
3481 /* Get the minimum size per scatter/gather element */
3482 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3484 /* Get the actual size per element */
3485 order
= get_order(sg_size
);
3487 /* Determine the actual number of bytes per element */
3488 bsize_elem
= PAGE_SIZE
* (1 << order
);
3490 /* Determine the actual number of sg entries needed */
3491 if (buf_len
% bsize_elem
)
3492 num_elem
= (buf_len
/ bsize_elem
) + 1;
3494 num_elem
= buf_len
/ bsize_elem
;
3496 /* Allocate a scatter/gather list for the DMA */
3497 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
3498 (sizeof(struct scatterlist
) * (num_elem
- 1)),
3501 if (sglist
== NULL
) {
3506 scatterlist
= sglist
->scatterlist
;
3507 sg_init_table(scatterlist
, num_elem
);
3509 sglist
->order
= order
;
3510 sglist
->num_sg
= num_elem
;
3512 /* Allocate a bunch of sg elements */
3513 for (i
= 0; i
< num_elem
; i
++) {
3514 page
= alloc_pages(GFP_KERNEL
, order
);
3518 /* Free up what we already allocated */
3519 for (j
= i
- 1; j
>= 0; j
--)
3520 __free_pages(sg_page(&scatterlist
[j
]), order
);
3525 sg_set_page(&scatterlist
[i
], page
, 0, 0);
3532 * ipr_free_ucode_buffer - Frees a microcode download buffer
3533 * @p_dnld: scatter/gather list pointer
3535 * Free a DMA'able ucode download buffer previously allocated with
3536 * ipr_alloc_ucode_buffer
3541 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3545 for (i
= 0; i
< sglist
->num_sg
; i
++)
3546 __free_pages(sg_page(&sglist
->scatterlist
[i
]), sglist
->order
);
3552 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3553 * @sglist: scatter/gather list pointer
3554 * @buffer: buffer pointer
3555 * @len: buffer length
3557 * Copy a microcode image from a user buffer into a buffer allocated by
3558 * ipr_alloc_ucode_buffer
3561 * 0 on success / other on failure
3563 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3564 u8
*buffer
, u32 len
)
3566 int bsize_elem
, i
, result
= 0;
3567 struct scatterlist
*scatterlist
;
3570 /* Determine the actual number of bytes per element */
3571 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3573 scatterlist
= sglist
->scatterlist
;
3575 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3576 struct page
*page
= sg_page(&scatterlist
[i
]);
3579 memcpy(kaddr
, buffer
, bsize_elem
);
3582 scatterlist
[i
].length
= bsize_elem
;
3590 if (len
% bsize_elem
) {
3591 struct page
*page
= sg_page(&scatterlist
[i
]);
3594 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3597 scatterlist
[i
].length
= len
% bsize_elem
;
3600 sglist
->buffer_len
= len
;
3605 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3606 * @ipr_cmd: ipr command struct
3607 * @sglist: scatter/gather list
3609 * Builds a microcode download IOA data list (IOADL).
3612 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3613 struct ipr_sglist
*sglist
)
3615 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3616 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3617 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3620 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3621 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3622 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3625 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3626 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3627 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3628 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3629 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3632 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3636 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3637 * @ipr_cmd: ipr command struct
3638 * @sglist: scatter/gather list
3640 * Builds a microcode download IOA data list (IOADL).
3643 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3644 struct ipr_sglist
*sglist
)
3646 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3647 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3648 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3651 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3652 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3653 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3656 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3658 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3659 ioadl
[i
].flags_and_data_len
=
3660 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3662 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3665 ioadl
[i
-1].flags_and_data_len
|=
3666 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3670 * ipr_update_ioa_ucode - Update IOA's microcode
3671 * @ioa_cfg: ioa config struct
3672 * @sglist: scatter/gather list
3674 * Initiate an adapter reset to update the IOA's microcode
3677 * 0 on success / -EIO on failure
3679 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3680 struct ipr_sglist
*sglist
)
3682 unsigned long lock_flags
;
3684 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3685 while(ioa_cfg
->in_reset_reload
) {
3686 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3687 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3688 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3691 if (ioa_cfg
->ucode_sglist
) {
3692 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3693 dev_err(&ioa_cfg
->pdev
->dev
,
3694 "Microcode download already in progress\n");
3698 sglist
->num_dma_sg
= pci_map_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
3699 sglist
->num_sg
, DMA_TO_DEVICE
);
3701 if (!sglist
->num_dma_sg
) {
3702 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3703 dev_err(&ioa_cfg
->pdev
->dev
,
3704 "Failed to map microcode download buffer!\n");
3708 ioa_cfg
->ucode_sglist
= sglist
;
3709 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3710 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3711 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3713 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3714 ioa_cfg
->ucode_sglist
= NULL
;
3715 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3720 * ipr_store_update_fw - Update the firmware on the adapter
3721 * @class_dev: device struct
3723 * @count: buffer size
3725 * This function will update the firmware on the adapter.
3728 * count on success / other on failure
3730 static ssize_t
ipr_store_update_fw(struct device
*dev
,
3731 struct device_attribute
*attr
,
3732 const char *buf
, size_t count
)
3734 struct Scsi_Host
*shost
= class_to_shost(dev
);
3735 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3736 struct ipr_ucode_image_header
*image_hdr
;
3737 const struct firmware
*fw_entry
;
3738 struct ipr_sglist
*sglist
;
3741 int len
, result
, dnld_size
;
3743 if (!capable(CAP_SYS_ADMIN
))
3746 len
= snprintf(fname
, 99, "%s", buf
);
3747 fname
[len
-1] = '\0';
3749 if(request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
3750 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
3754 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
3756 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
3757 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
3758 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
3761 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
3762 release_firmware(fw_entry
);
3766 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
3769 dev_err(&ioa_cfg
->pdev
->dev
,
3770 "Microcode buffer copy to DMA buffer failed\n");
3774 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3776 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
3781 ipr_free_ucode_buffer(sglist
);
3782 release_firmware(fw_entry
);
3786 static struct device_attribute ipr_update_fw_attr
= {
3788 .name
= "update_fw",
3791 .store
= ipr_store_update_fw
3795 * ipr_show_fw_type - Show the adapter's firmware type.
3796 * @dev: class device struct
3800 * number of bytes printed to buffer
3802 static ssize_t
ipr_show_fw_type(struct device
*dev
,
3803 struct device_attribute
*attr
, char *buf
)
3805 struct Scsi_Host
*shost
= class_to_shost(dev
);
3806 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3807 unsigned long lock_flags
= 0;
3810 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3811 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
3812 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3816 static struct device_attribute ipr_ioa_fw_type_attr
= {
3821 .show
= ipr_show_fw_type
3824 static struct device_attribute
*ipr_ioa_attrs
[] = {
3825 &ipr_fw_version_attr
,
3826 &ipr_log_level_attr
,
3827 &ipr_diagnostics_attr
,
3828 &ipr_ioa_state_attr
,
3829 &ipr_ioa_reset_attr
,
3830 &ipr_update_fw_attr
,
3831 &ipr_ioa_fw_type_attr
,
3835 #ifdef CONFIG_SCSI_IPR_DUMP
3837 * ipr_read_dump - Dump the adapter
3838 * @filp: open sysfs file
3839 * @kobj: kobject struct
3840 * @bin_attr: bin_attribute struct
3843 * @count: buffer size
3846 * number of bytes printed to buffer
3848 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
3849 struct bin_attribute
*bin_attr
,
3850 char *buf
, loff_t off
, size_t count
)
3852 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
3853 struct Scsi_Host
*shost
= class_to_shost(cdev
);
3854 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3855 struct ipr_dump
*dump
;
3856 unsigned long lock_flags
= 0;
3861 if (!capable(CAP_SYS_ADMIN
))
3864 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3865 dump
= ioa_cfg
->dump
;
3867 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
3868 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3871 kref_get(&dump
->kref
);
3872 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3874 if (off
> dump
->driver_dump
.hdr
.len
) {
3875 kref_put(&dump
->kref
, ipr_release_dump
);
3879 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
3880 count
= dump
->driver_dump
.hdr
.len
- off
;
3884 if (count
&& off
< sizeof(dump
->driver_dump
)) {
3885 if (off
+ count
> sizeof(dump
->driver_dump
))
3886 len
= sizeof(dump
->driver_dump
) - off
;
3889 src
= (u8
*)&dump
->driver_dump
+ off
;
3890 memcpy(buf
, src
, len
);
3896 off
-= sizeof(dump
->driver_dump
);
3899 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
3900 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
3901 sizeof(struct ipr_sdt_entry
));
3903 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
3904 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
3906 if (count
&& off
< sdt_end
) {
3907 if (off
+ count
> sdt_end
)
3908 len
= sdt_end
- off
;
3911 src
= (u8
*)&dump
->ioa_dump
+ off
;
3912 memcpy(buf
, src
, len
);
3921 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
3922 len
= PAGE_ALIGN(off
) - off
;
3925 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
3926 src
+= off
& ~PAGE_MASK
;
3927 memcpy(buf
, src
, len
);
3933 kref_put(&dump
->kref
, ipr_release_dump
);
3938 * ipr_alloc_dump - Prepare for adapter dump
3939 * @ioa_cfg: ioa config struct
3942 * 0 on success / other on failure
3944 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
3946 struct ipr_dump
*dump
;
3948 unsigned long lock_flags
= 0;
3950 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
3953 ipr_err("Dump memory allocation failed\n");
3958 ioa_data
= vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
3960 ioa_data
= vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
3963 ipr_err("Dump memory allocation failed\n");
3968 dump
->ioa_dump
.ioa_data
= ioa_data
;
3970 kref_init(&dump
->kref
);
3971 dump
->ioa_cfg
= ioa_cfg
;
3973 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3975 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
3976 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3977 vfree(dump
->ioa_dump
.ioa_data
);
3982 ioa_cfg
->dump
= dump
;
3983 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
3984 if (ioa_cfg
->ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
3985 ioa_cfg
->dump_taken
= 1;
3986 schedule_work(&ioa_cfg
->work_q
);
3988 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3994 * ipr_free_dump - Free adapter dump memory
3995 * @ioa_cfg: ioa config struct
3998 * 0 on success / other on failure
4000 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4002 struct ipr_dump
*dump
;
4003 unsigned long lock_flags
= 0;
4007 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4008 dump
= ioa_cfg
->dump
;
4010 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4014 ioa_cfg
->dump
= NULL
;
4015 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4017 kref_put(&dump
->kref
, ipr_release_dump
);
4024 * ipr_write_dump - Setup dump state of adapter
4025 * @filp: open sysfs file
4026 * @kobj: kobject struct
4027 * @bin_attr: bin_attribute struct
4030 * @count: buffer size
4033 * number of bytes printed to buffer
4035 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4036 struct bin_attribute
*bin_attr
,
4037 char *buf
, loff_t off
, size_t count
)
4039 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4040 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4041 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4044 if (!capable(CAP_SYS_ADMIN
))
4048 rc
= ipr_alloc_dump(ioa_cfg
);
4049 else if (buf
[0] == '0')
4050 rc
= ipr_free_dump(ioa_cfg
);
4060 static struct bin_attribute ipr_dump_attr
= {
4063 .mode
= S_IRUSR
| S_IWUSR
,
4066 .read
= ipr_read_dump
,
4067 .write
= ipr_write_dump
4070 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4074 * ipr_change_queue_depth - Change the device's queue depth
4075 * @sdev: scsi device struct
4076 * @qdepth: depth to set
4077 * @reason: calling context
4082 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
,
4085 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4086 struct ipr_resource_entry
*res
;
4087 unsigned long lock_flags
= 0;
4089 if (reason
!= SCSI_QDEPTH_DEFAULT
)
4092 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4093 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4095 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4096 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4097 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4099 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
4100 return sdev
->queue_depth
;
4104 * ipr_change_queue_type - Change the device's queue type
4105 * @dsev: scsi device struct
4106 * @tag_type: type of tags to use
4109 * actual queue type set
4111 static int ipr_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
4113 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4114 struct ipr_resource_entry
*res
;
4115 unsigned long lock_flags
= 0;
4117 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4118 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4121 if (ipr_is_gscsi(res
) && sdev
->tagged_supported
) {
4123 * We don't bother quiescing the device here since the
4124 * adapter firmware does it for us.
4126 scsi_set_tag_type(sdev
, tag_type
);
4129 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
4131 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
4137 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4142 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4143 * @dev: device struct
4144 * @attr: device attribute structure
4148 * number of bytes printed to buffer
4150 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4152 struct scsi_device
*sdev
= to_scsi_device(dev
);
4153 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4154 struct ipr_resource_entry
*res
;
4155 unsigned long lock_flags
= 0;
4156 ssize_t len
= -ENXIO
;
4158 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4159 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4161 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4162 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4166 static struct device_attribute ipr_adapter_handle_attr
= {
4168 .name
= "adapter_handle",
4171 .show
= ipr_show_adapter_handle
4175 * ipr_show_resource_path - Show the resource path or the resource address for
4177 * @dev: device struct
4178 * @attr: device attribute structure
4182 * number of bytes printed to buffer
4184 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4186 struct scsi_device
*sdev
= to_scsi_device(dev
);
4187 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4188 struct ipr_resource_entry
*res
;
4189 unsigned long lock_flags
= 0;
4190 ssize_t len
= -ENXIO
;
4191 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4193 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4194 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4195 if (res
&& ioa_cfg
->sis64
)
4196 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4197 ipr_format_res_path(res
->res_path
, buffer
,
4200 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4201 res
->bus
, res
->target
, res
->lun
);
4203 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4207 static struct device_attribute ipr_resource_path_attr
= {
4209 .name
= "resource_path",
4212 .show
= ipr_show_resource_path
4216 * ipr_show_device_id - Show the device_id for this device.
4217 * @dev: device struct
4218 * @attr: device attribute structure
4222 * number of bytes printed to buffer
4224 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4226 struct scsi_device
*sdev
= to_scsi_device(dev
);
4227 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4228 struct ipr_resource_entry
*res
;
4229 unsigned long lock_flags
= 0;
4230 ssize_t len
= -ENXIO
;
4232 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4233 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4234 if (res
&& ioa_cfg
->sis64
)
4235 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->dev_id
);
4237 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4239 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4243 static struct device_attribute ipr_device_id_attr
= {
4245 .name
= "device_id",
4248 .show
= ipr_show_device_id
4252 * ipr_show_resource_type - Show the resource type for this device.
4253 * @dev: device struct
4254 * @attr: device attribute structure
4258 * number of bytes printed to buffer
4260 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4262 struct scsi_device
*sdev
= to_scsi_device(dev
);
4263 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4264 struct ipr_resource_entry
*res
;
4265 unsigned long lock_flags
= 0;
4266 ssize_t len
= -ENXIO
;
4268 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4269 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4272 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4274 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4278 static struct device_attribute ipr_resource_type_attr
= {
4280 .name
= "resource_type",
4283 .show
= ipr_show_resource_type
4286 static struct device_attribute
*ipr_dev_attrs
[] = {
4287 &ipr_adapter_handle_attr
,
4288 &ipr_resource_path_attr
,
4289 &ipr_device_id_attr
,
4290 &ipr_resource_type_attr
,
4295 * ipr_biosparam - Return the HSC mapping
4296 * @sdev: scsi device struct
4297 * @block_device: block device pointer
4298 * @capacity: capacity of the device
4299 * @parm: Array containing returned HSC values.
4301 * This function generates the HSC parms that fdisk uses.
4302 * We want to make sure we return something that places partitions
4303 * on 4k boundaries for best performance with the IOA.
4308 static int ipr_biosparam(struct scsi_device
*sdev
,
4309 struct block_device
*block_device
,
4310 sector_t capacity
, int *parm
)
4318 cylinders
= capacity
;
4319 sector_div(cylinders
, (128 * 32));
4324 parm
[2] = cylinders
;
4330 * ipr_find_starget - Find target based on bus/target.
4331 * @starget: scsi target struct
4334 * resource entry pointer if found / NULL if not found
4336 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4338 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4339 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4340 struct ipr_resource_entry
*res
;
4342 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4343 if ((res
->bus
== starget
->channel
) &&
4344 (res
->target
== starget
->id
)) {
4352 static struct ata_port_info sata_port_info
;
4355 * ipr_target_alloc - Prepare for commands to a SCSI target
4356 * @starget: scsi target struct
4358 * If the device is a SATA device, this function allocates an
4359 * ATA port with libata, else it does nothing.
4362 * 0 on success / non-0 on failure
4364 static int ipr_target_alloc(struct scsi_target
*starget
)
4366 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4367 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4368 struct ipr_sata_port
*sata_port
;
4369 struct ata_port
*ap
;
4370 struct ipr_resource_entry
*res
;
4371 unsigned long lock_flags
;
4373 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4374 res
= ipr_find_starget(starget
);
4375 starget
->hostdata
= NULL
;
4377 if (res
&& ipr_is_gata(res
)) {
4378 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4379 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4383 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4385 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4386 sata_port
->ioa_cfg
= ioa_cfg
;
4388 sata_port
->res
= res
;
4390 res
->sata_port
= sata_port
;
4391 ap
->private_data
= sata_port
;
4392 starget
->hostdata
= sata_port
;
4398 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4404 * ipr_target_destroy - Destroy a SCSI target
4405 * @starget: scsi target struct
4407 * If the device was a SATA device, this function frees the libata
4408 * ATA port, else it does nothing.
4411 static void ipr_target_destroy(struct scsi_target
*starget
)
4413 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4414 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4415 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4417 if (ioa_cfg
->sis64
) {
4418 if (!ipr_find_starget(starget
)) {
4419 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4420 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4421 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4422 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4423 else if (starget
->channel
== 0)
4424 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4429 starget
->hostdata
= NULL
;
4430 ata_sas_port_destroy(sata_port
->ap
);
4436 * ipr_find_sdev - Find device based on bus/target/lun.
4437 * @sdev: scsi device struct
4440 * resource entry pointer if found / NULL if not found
4442 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4444 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4445 struct ipr_resource_entry
*res
;
4447 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4448 if ((res
->bus
== sdev
->channel
) &&
4449 (res
->target
== sdev
->id
) &&
4450 (res
->lun
== sdev
->lun
))
4458 * ipr_slave_destroy - Unconfigure a SCSI device
4459 * @sdev: scsi device struct
4464 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4466 struct ipr_resource_entry
*res
;
4467 struct ipr_ioa_cfg
*ioa_cfg
;
4468 unsigned long lock_flags
= 0;
4470 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4472 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4473 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4476 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4477 sdev
->hostdata
= NULL
;
4479 res
->sata_port
= NULL
;
4481 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4485 * ipr_slave_configure - Configure a SCSI device
4486 * @sdev: scsi device struct
4488 * This function configures the specified scsi device.
4493 static int ipr_slave_configure(struct scsi_device
*sdev
)
4495 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4496 struct ipr_resource_entry
*res
;
4497 struct ata_port
*ap
= NULL
;
4498 unsigned long lock_flags
= 0;
4499 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4501 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4502 res
= sdev
->hostdata
;
4504 if (ipr_is_af_dasd_device(res
))
4505 sdev
->type
= TYPE_RAID
;
4506 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4507 sdev
->scsi_level
= 4;
4508 sdev
->no_uld_attach
= 1;
4510 if (ipr_is_vset_device(res
)) {
4511 blk_queue_rq_timeout(sdev
->request_queue
,
4512 IPR_VSET_RW_TIMEOUT
);
4513 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4515 if (ipr_is_gata(res
) && res
->sata_port
)
4516 ap
= res
->sata_port
->ap
;
4517 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4520 scsi_adjust_queue_depth(sdev
, 0, IPR_MAX_CMD_PER_ATA_LUN
);
4521 ata_sas_slave_configure(sdev
, ap
);
4523 scsi_adjust_queue_depth(sdev
, 0, sdev
->host
->cmd_per_lun
);
4525 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4526 ipr_format_res_path(res
->res_path
, buffer
,
4530 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4535 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4536 * @sdev: scsi device struct
4538 * This function initializes an ATA port so that future commands
4539 * sent through queuecommand will work.
4544 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4546 struct ipr_sata_port
*sata_port
= NULL
;
4550 if (sdev
->sdev_target
)
4551 sata_port
= sdev
->sdev_target
->hostdata
;
4553 rc
= ata_sas_port_init(sata_port
->ap
);
4555 rc
= ata_sas_sync_probe(sata_port
->ap
);
4559 ipr_slave_destroy(sdev
);
4566 * ipr_slave_alloc - Prepare for commands to a device.
4567 * @sdev: scsi device struct
4569 * This function saves a pointer to the resource entry
4570 * in the scsi device struct if the device exists. We
4571 * can then use this pointer in ipr_queuecommand when
4572 * handling new commands.
4575 * 0 on success / -ENXIO if device does not exist
4577 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4579 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4580 struct ipr_resource_entry
*res
;
4581 unsigned long lock_flags
;
4584 sdev
->hostdata
= NULL
;
4586 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4588 res
= ipr_find_sdev(sdev
);
4593 sdev
->hostdata
= res
;
4594 if (!ipr_is_naca_model(res
))
4595 res
->needs_sync_complete
= 1;
4597 if (ipr_is_gata(res
)) {
4598 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4599 return ipr_ata_slave_alloc(sdev
);
4603 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4609 * ipr_eh_host_reset - Reset the host adapter
4610 * @scsi_cmd: scsi command struct
4615 static int __ipr_eh_host_reset(struct scsi_cmnd
* scsi_cmd
)
4617 struct ipr_ioa_cfg
*ioa_cfg
;
4621 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
4623 if (!ioa_cfg
->in_reset_reload
) {
4624 dev_err(&ioa_cfg
->pdev
->dev
,
4625 "Adapter being reset as a result of error recovery.\n");
4627 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
4628 ioa_cfg
->sdt_state
= GET_DUMP
;
4631 rc
= ipr_reset_reload(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
4637 static int ipr_eh_host_reset(struct scsi_cmnd
* cmd
)
4641 spin_lock_irq(cmd
->device
->host
->host_lock
);
4642 rc
= __ipr_eh_host_reset(cmd
);
4643 spin_unlock_irq(cmd
->device
->host
->host_lock
);
4649 * ipr_device_reset - Reset the device
4650 * @ioa_cfg: ioa config struct
4651 * @res: resource entry struct
4653 * This function issues a device reset to the affected device.
4654 * If the device is a SCSI device, a LUN reset will be sent
4655 * to the device first. If that does not work, a target reset
4656 * will be sent. If the device is a SATA device, a PHY reset will
4660 * 0 on success / non-zero on failure
4662 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
4663 struct ipr_resource_entry
*res
)
4665 struct ipr_cmnd
*ipr_cmd
;
4666 struct ipr_ioarcb
*ioarcb
;
4667 struct ipr_cmd_pkt
*cmd_pkt
;
4668 struct ipr_ioarcb_ata_regs
*regs
;
4672 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4673 ioarcb
= &ipr_cmd
->ioarcb
;
4674 cmd_pkt
= &ioarcb
->cmd_pkt
;
4676 if (ipr_cmd
->ioa_cfg
->sis64
) {
4677 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
4678 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
4680 regs
= &ioarcb
->u
.add_data
.u
.regs
;
4682 ioarcb
->res_handle
= res
->res_handle
;
4683 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
4684 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
4685 if (ipr_is_gata(res
)) {
4686 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
4687 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
4688 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
4691 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
4692 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
4693 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4694 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
4695 if (ipr_cmd
->ioa_cfg
->sis64
)
4696 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
4697 sizeof(struct ipr_ioasa_gata
));
4699 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
4700 sizeof(struct ipr_ioasa_gata
));
4704 return (IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0);
4708 * ipr_sata_reset - Reset the SATA port
4709 * @link: SATA link to reset
4710 * @classes: class of the attached device
4712 * This function issues a SATA phy reset to the affected ATA link.
4715 * 0 on success / non-zero on failure
4717 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
4718 unsigned long deadline
)
4720 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
4721 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
4722 struct ipr_resource_entry
*res
;
4723 unsigned long lock_flags
= 0;
4727 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4728 while(ioa_cfg
->in_reset_reload
) {
4729 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4730 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4731 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4734 res
= sata_port
->res
;
4736 rc
= ipr_device_reset(ioa_cfg
, res
);
4737 *classes
= res
->ata_class
;
4740 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4746 * ipr_eh_dev_reset - Reset the device
4747 * @scsi_cmd: scsi command struct
4749 * This function issues a device reset to the affected device.
4750 * A LUN reset will be sent to the device first. If that does
4751 * not work, a target reset will be sent.
4756 static int __ipr_eh_dev_reset(struct scsi_cmnd
* scsi_cmd
)
4758 struct ipr_cmnd
*ipr_cmd
;
4759 struct ipr_ioa_cfg
*ioa_cfg
;
4760 struct ipr_resource_entry
*res
;
4761 struct ata_port
*ap
;
4765 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
4766 res
= scsi_cmd
->device
->hostdata
;
4772 * If we are currently going through reset/reload, return failed. This will force the
4773 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4776 if (ioa_cfg
->in_reset_reload
)
4778 if (ioa_cfg
->ioa_is_dead
)
4781 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
4782 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
4783 if (ipr_cmd
->scsi_cmd
)
4784 ipr_cmd
->done
= ipr_scsi_eh_done
;
4786 ipr_cmd
->done
= ipr_sata_eh_done
;
4787 if (ipr_cmd
->qc
&& !(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
4788 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
4789 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
4794 res
->resetting_device
= 1;
4795 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
4797 if (ipr_is_gata(res
) && res
->sata_port
) {
4798 ap
= res
->sata_port
->ap
;
4799 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
4800 ata_std_error_handler(ap
);
4801 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
4803 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
4804 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
4810 rc
= ipr_device_reset(ioa_cfg
, res
);
4811 res
->resetting_device
= 0;
4814 return (rc
? FAILED
: SUCCESS
);
4817 static int ipr_eh_dev_reset(struct scsi_cmnd
* cmd
)
4821 spin_lock_irq(cmd
->device
->host
->host_lock
);
4822 rc
= __ipr_eh_dev_reset(cmd
);
4823 spin_unlock_irq(cmd
->device
->host
->host_lock
);
4829 * ipr_bus_reset_done - Op done function for bus reset.
4830 * @ipr_cmd: ipr command struct
4832 * This function is the op done function for a bus reset
4837 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
4839 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4840 struct ipr_resource_entry
*res
;
4843 if (!ioa_cfg
->sis64
)
4844 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4845 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
4846 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
4852 * If abort has not completed, indicate the reset has, else call the
4853 * abort's done function to wake the sleeping eh thread
4855 if (ipr_cmd
->sibling
->sibling
)
4856 ipr_cmd
->sibling
->sibling
= NULL
;
4858 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
4860 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4865 * ipr_abort_timeout - An abort task has timed out
4866 * @ipr_cmd: ipr command struct
4868 * This function handles when an abort task times out. If this
4869 * happens we issue a bus reset since we have resources tied
4870 * up that must be freed before returning to the midlayer.
4875 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
4877 struct ipr_cmnd
*reset_cmd
;
4878 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4879 struct ipr_cmd_pkt
*cmd_pkt
;
4880 unsigned long lock_flags
= 0;
4883 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4884 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
4885 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4889 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
4890 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4891 ipr_cmd
->sibling
= reset_cmd
;
4892 reset_cmd
->sibling
= ipr_cmd
;
4893 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
4894 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
4895 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
4896 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
4897 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
4899 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
4900 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4905 * ipr_cancel_op - Cancel specified op
4906 * @scsi_cmd: scsi command struct
4908 * This function cancels specified op.
4913 static int ipr_cancel_op(struct scsi_cmnd
* scsi_cmd
)
4915 struct ipr_cmnd
*ipr_cmd
;
4916 struct ipr_ioa_cfg
*ioa_cfg
;
4917 struct ipr_resource_entry
*res
;
4918 struct ipr_cmd_pkt
*cmd_pkt
;
4923 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
4924 res
= scsi_cmd
->device
->hostdata
;
4926 /* If we are currently going through reset/reload, return failed.
4927 * This will force the mid-layer to call ipr_eh_host_reset,
4928 * which will then go to sleep and wait for the reset to complete
4930 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->ioa_is_dead
)
4936 * If we are aborting a timed out op, chances are that the timeout was caused
4937 * by a still not detected EEH error. In such cases, reading a register will
4938 * trigger the EEH recovery infrastructure.
4940 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
4942 if (!ipr_is_gscsi(res
))
4945 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
4946 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
4947 ipr_cmd
->done
= ipr_scsi_eh_done
;
4956 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4957 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
4958 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
4959 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
4960 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
4961 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
4963 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
4965 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
4966 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
4969 * If the abort task timed out and we sent a bus reset, we will get
4970 * one the following responses to the abort
4972 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
4977 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4978 if (!ipr_is_naca_model(res
))
4979 res
->needs_sync_complete
= 1;
4982 return (IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
);
4986 * ipr_eh_abort - Abort a single op
4987 * @scsi_cmd: scsi command struct
4992 static int ipr_eh_abort(struct scsi_cmnd
* scsi_cmd
)
4994 unsigned long flags
;
4999 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5000 rc
= ipr_cancel_op(scsi_cmd
);
5001 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5008 * ipr_handle_other_interrupt - Handle "other" interrupts
5009 * @ioa_cfg: ioa config struct
5010 * @int_reg: interrupt register
5013 * IRQ_NONE / IRQ_HANDLED
5015 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5018 irqreturn_t rc
= IRQ_HANDLED
;
5021 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5022 int_reg
&= ~int_mask_reg
;
5024 /* If an interrupt on the adapter did not occur, ignore it.
5025 * Or in the case of SIS 64, check for a stage change interrupt.
5027 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5028 if (ioa_cfg
->sis64
) {
5029 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5030 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5031 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5033 /* clear stage change */
5034 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5035 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5036 list_del(&ioa_cfg
->reset_cmd
->queue
);
5037 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5038 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5046 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5047 /* Mask the interrupt */
5048 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5050 /* Clear the interrupt */
5051 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.clr_interrupt_reg
);
5052 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5054 list_del(&ioa_cfg
->reset_cmd
->queue
);
5055 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5056 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5057 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5058 if (ioa_cfg
->clear_isr
) {
5059 if (ipr_debug
&& printk_ratelimit())
5060 dev_err(&ioa_cfg
->pdev
->dev
,
5061 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5062 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5063 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5067 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5068 ioa_cfg
->ioa_unit_checked
= 1;
5070 dev_err(&ioa_cfg
->pdev
->dev
,
5071 "Permanent IOA failure. 0x%08X\n", int_reg
);
5073 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5074 ioa_cfg
->sdt_state
= GET_DUMP
;
5076 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5077 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5084 * ipr_isr_eh - Interrupt service routine error handler
5085 * @ioa_cfg: ioa config struct
5086 * @msg: message to log
5091 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
)
5093 ioa_cfg
->errors_logged
++;
5094 dev_err(&ioa_cfg
->pdev
->dev
, "%s\n", msg
);
5096 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5097 ioa_cfg
->sdt_state
= GET_DUMP
;
5099 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5103 * ipr_isr - Interrupt service routine
5105 * @devp: pointer to ioa config struct
5108 * IRQ_NONE / IRQ_HANDLED
5110 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5112 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
5113 unsigned long lock_flags
= 0;
5119 struct ipr_cmnd
*ipr_cmd
;
5120 irqreturn_t rc
= IRQ_NONE
;
5122 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5124 /* If interrupts are disabled, ignore the interrupt */
5125 if (!ioa_cfg
->allow_interrupts
) {
5126 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5133 while ((be32_to_cpu(*ioa_cfg
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5134 ioa_cfg
->toggle_bit
) {
5136 cmd_index
= (be32_to_cpu(*ioa_cfg
->hrrq_curr
) &
5137 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5139 if (unlikely(cmd_index
>= IPR_NUM_CMD_BLKS
)) {
5140 ipr_isr_eh(ioa_cfg
, "Invalid response handle from IOA");
5141 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5145 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5147 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5149 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5151 list_del(&ipr_cmd
->queue
);
5152 del_timer(&ipr_cmd
->timer
);
5153 ipr_cmd
->done(ipr_cmd
);
5157 if (ioa_cfg
->hrrq_curr
< ioa_cfg
->hrrq_end
) {
5158 ioa_cfg
->hrrq_curr
++;
5160 ioa_cfg
->hrrq_curr
= ioa_cfg
->hrrq_start
;
5161 ioa_cfg
->toggle_bit
^= 1u;
5165 if (ipr_cmd
&& !ioa_cfg
->clear_isr
)
5168 if (ipr_cmd
!= NULL
) {
5169 /* Clear the PCI interrupt */
5172 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5173 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5174 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5175 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5177 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5178 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5180 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5181 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5182 ipr_isr_eh(ioa_cfg
, "Error clearing HRRQ");
5183 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5189 if (unlikely(rc
== IRQ_NONE
))
5190 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5192 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5197 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5198 * @ioa_cfg: ioa config struct
5199 * @ipr_cmd: ipr command struct
5202 * 0 on success / -1 on failure
5204 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5205 struct ipr_cmnd
*ipr_cmd
)
5208 struct scatterlist
*sg
;
5210 u32 ioadl_flags
= 0;
5211 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5212 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5213 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5215 length
= scsi_bufflen(scsi_cmd
);
5219 nseg
= scsi_dma_map(scsi_cmd
);
5221 if (printk_ratelimit())
5222 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5226 ipr_cmd
->dma_use_sg
= nseg
;
5228 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5230 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5232 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5233 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5234 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5235 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5236 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5238 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5239 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5240 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5241 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5244 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5249 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5250 * @ioa_cfg: ioa config struct
5251 * @ipr_cmd: ipr command struct
5254 * 0 on success / -1 on failure
5256 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5257 struct ipr_cmnd
*ipr_cmd
)
5260 struct scatterlist
*sg
;
5262 u32 ioadl_flags
= 0;
5263 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5264 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5265 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5267 length
= scsi_bufflen(scsi_cmd
);
5271 nseg
= scsi_dma_map(scsi_cmd
);
5273 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5277 ipr_cmd
->dma_use_sg
= nseg
;
5279 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5280 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5281 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5282 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5284 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5285 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5286 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5287 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5288 ioarcb
->read_ioadl_len
=
5289 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5292 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5293 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5294 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5295 offsetof(struct ipr_ioarcb
, u
.add_data
));
5296 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5299 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5300 ioadl
[i
].flags_and_data_len
=
5301 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5302 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5305 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5310 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5311 * @scsi_cmd: scsi command struct
5316 static u8
ipr_get_task_attributes(struct scsi_cmnd
*scsi_cmd
)
5319 u8 rc
= IPR_FLAGS_LO_UNTAGGED_TASK
;
5321 if (scsi_populate_tag_msg(scsi_cmd
, tag
)) {
5323 case MSG_SIMPLE_TAG
:
5324 rc
= IPR_FLAGS_LO_SIMPLE_TASK
;
5327 rc
= IPR_FLAGS_LO_HEAD_OF_Q_TASK
;
5329 case MSG_ORDERED_TAG
:
5330 rc
= IPR_FLAGS_LO_ORDERED_TASK
;
5339 * ipr_erp_done - Process completion of ERP for a device
5340 * @ipr_cmd: ipr command struct
5342 * This function copies the sense buffer into the scsi_cmd
5343 * struct and pushes the scsi_done function.
5348 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
5350 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5351 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5352 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5353 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5355 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5356 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5357 scmd_printk(KERN_ERR
, scsi_cmd
,
5358 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
5360 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
5361 SCSI_SENSE_BUFFERSIZE
);
5365 if (!ipr_is_naca_model(res
))
5366 res
->needs_sync_complete
= 1;
5369 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5370 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5371 scsi_cmd
->scsi_done(scsi_cmd
);
5375 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5376 * @ipr_cmd: ipr command struct
5381 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
5383 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5384 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5385 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
5387 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
5388 ioarcb
->data_transfer_length
= 0;
5389 ioarcb
->read_data_transfer_length
= 0;
5390 ioarcb
->ioadl_len
= 0;
5391 ioarcb
->read_ioadl_len
= 0;
5392 ioasa
->hdr
.ioasc
= 0;
5393 ioasa
->hdr
.residual_data_len
= 0;
5395 if (ipr_cmd
->ioa_cfg
->sis64
)
5396 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
5397 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
5399 ioarcb
->write_ioadl_addr
=
5400 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
5401 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5406 * ipr_erp_request_sense - Send request sense to a device
5407 * @ipr_cmd: ipr command struct
5409 * This function sends a request sense to a device as a result
5410 * of a check condition.
5415 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
5417 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5418 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5420 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5421 ipr_erp_done(ipr_cmd
);
5425 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5427 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
5428 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
5429 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
5430 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
5431 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
5432 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
5434 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
5435 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
5437 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
5438 IPR_REQUEST_SENSE_TIMEOUT
* 2);
5442 * ipr_erp_cancel_all - Send cancel all to a device
5443 * @ipr_cmd: ipr command struct
5445 * This function sends a cancel all to a device to clear the
5446 * queue. If we are running TCQ on the device, QERR is set to 1,
5447 * which means all outstanding ops have been dropped on the floor.
5448 * Cancel all will return them to us.
5453 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
5455 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5456 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5457 struct ipr_cmd_pkt
*cmd_pkt
;
5461 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5463 if (!scsi_get_tag_type(scsi_cmd
->device
)) {
5464 ipr_erp_request_sense(ipr_cmd
);
5468 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5469 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5470 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5472 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
5473 IPR_CANCEL_ALL_TIMEOUT
);
5477 * ipr_dump_ioasa - Dump contents of IOASA
5478 * @ioa_cfg: ioa config struct
5479 * @ipr_cmd: ipr command struct
5480 * @res: resource entry struct
5482 * This function is invoked by the interrupt handler when ops
5483 * fail. It will log the IOASA if appropriate. Only called
5489 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
5490 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
5494 u32 ioasc
, fd_ioasc
;
5495 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5496 __be32
*ioasa_data
= (__be32
*)ioasa
;
5499 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
5500 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
5505 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
5508 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
5509 error_index
= ipr_get_error(fd_ioasc
);
5511 error_index
= ipr_get_error(ioasc
);
5513 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
5514 /* Don't log an error if the IOA already logged one */
5515 if (ioasa
->hdr
.ilid
!= 0)
5518 if (!ipr_is_gscsi(res
))
5521 if (ipr_error_table
[error_index
].log_ioasa
== 0)
5525 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
5527 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
5528 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
5529 data_len
= sizeof(struct ipr_ioasa64
);
5530 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
5531 data_len
= sizeof(struct ipr_ioasa
);
5533 ipr_err("IOASA Dump:\n");
5535 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
5536 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
5537 be32_to_cpu(ioasa_data
[i
]),
5538 be32_to_cpu(ioasa_data
[i
+1]),
5539 be32_to_cpu(ioasa_data
[i
+2]),
5540 be32_to_cpu(ioasa_data
[i
+3]));
5545 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5547 * @sense_buf: sense data buffer
5552 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
5555 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
5556 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
5557 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5558 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
5560 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
5562 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
5565 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
5567 if (ipr_is_vset_device(res
) &&
5568 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
5569 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
5570 sense_buf
[0] = 0x72;
5571 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
5572 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
5573 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
5577 sense_buf
[9] = 0x0A;
5578 sense_buf
[10] = 0x80;
5580 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
5582 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
5583 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
5584 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
5585 sense_buf
[15] = failing_lba
& 0x000000ff;
5587 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
5589 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
5590 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
5591 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
5592 sense_buf
[19] = failing_lba
& 0x000000ff;
5594 sense_buf
[0] = 0x70;
5595 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
5596 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
5597 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
5599 /* Illegal request */
5600 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
5601 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
5602 sense_buf
[7] = 10; /* additional length */
5604 /* IOARCB was in error */
5605 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
5606 sense_buf
[15] = 0xC0;
5607 else /* Parameter data was invalid */
5608 sense_buf
[15] = 0x80;
5611 ((IPR_FIELD_POINTER_MASK
&
5612 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
5614 (IPR_FIELD_POINTER_MASK
&
5615 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
5617 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
5618 if (ipr_is_vset_device(res
))
5619 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
5621 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
5623 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
5624 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
5625 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
5626 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
5627 sense_buf
[6] = failing_lba
& 0x000000ff;
5630 sense_buf
[7] = 6; /* additional length */
5636 * ipr_get_autosense - Copy autosense data to sense buffer
5637 * @ipr_cmd: ipr command struct
5639 * This function copies the autosense buffer to the buffer
5640 * in the scsi_cmd, if there is autosense available.
5643 * 1 if autosense was available / 0 if not
5645 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
5647 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5648 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
5650 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
5653 if (ipr_cmd
->ioa_cfg
->sis64
)
5654 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
5655 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
5656 SCSI_SENSE_BUFFERSIZE
));
5658 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
5659 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
5660 SCSI_SENSE_BUFFERSIZE
));
5665 * ipr_erp_start - Process an error response for a SCSI op
5666 * @ioa_cfg: ioa config struct
5667 * @ipr_cmd: ipr command struct
5669 * This function determines whether or not to initiate ERP
5670 * on the affected device.
5675 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
5676 struct ipr_cmnd
*ipr_cmd
)
5678 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5679 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5680 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5681 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
5684 ipr_scsi_eh_done(ipr_cmd
);
5688 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
5689 ipr_gen_sense(ipr_cmd
);
5691 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
5693 switch (masked_ioasc
) {
5694 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
5695 if (ipr_is_naca_model(res
))
5696 scsi_cmd
->result
|= (DID_ABORT
<< 16);
5698 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
5700 case IPR_IOASC_IR_RESOURCE_HANDLE
:
5701 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
5702 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
5704 case IPR_IOASC_HW_SEL_TIMEOUT
:
5705 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
5706 if (!ipr_is_naca_model(res
))
5707 res
->needs_sync_complete
= 1;
5709 case IPR_IOASC_SYNC_REQUIRED
:
5711 res
->needs_sync_complete
= 1;
5712 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
5714 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
5715 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
5716 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
5718 case IPR_IOASC_BUS_WAS_RESET
:
5719 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
5721 * Report the bus reset and ask for a retry. The device
5722 * will give CC/UA the next command.
5724 if (!res
->resetting_device
)
5725 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
5726 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5727 if (!ipr_is_naca_model(res
))
5728 res
->needs_sync_complete
= 1;
5730 case IPR_IOASC_HW_DEV_BUS_STATUS
:
5731 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
5732 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
5733 if (!ipr_get_autosense(ipr_cmd
)) {
5734 if (!ipr_is_naca_model(res
)) {
5735 ipr_erp_cancel_all(ipr_cmd
);
5740 if (!ipr_is_naca_model(res
))
5741 res
->needs_sync_complete
= 1;
5743 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
5746 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
5747 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5748 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
5749 res
->needs_sync_complete
= 1;
5753 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5754 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5755 scsi_cmd
->scsi_done(scsi_cmd
);
5759 * ipr_scsi_done - mid-layer done function
5760 * @ipr_cmd: ipr command struct
5762 * This function is invoked by the interrupt handler for
5763 * ops generated by the SCSI mid-layer
5768 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
5770 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5771 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5772 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5774 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
5776 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
5777 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5778 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5779 scsi_cmd
->scsi_done(scsi_cmd
);
5781 ipr_erp_start(ioa_cfg
, ipr_cmd
);
5785 * ipr_queuecommand - Queue a mid-layer request
5786 * @scsi_cmd: scsi command struct
5787 * @done: done function
5789 * This function queues a request generated by the mid-layer.
5793 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5794 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5796 static int ipr_queuecommand_lck(struct scsi_cmnd
*scsi_cmd
,
5797 void (*done
) (struct scsi_cmnd
*))
5799 struct ipr_ioa_cfg
*ioa_cfg
;
5800 struct ipr_resource_entry
*res
;
5801 struct ipr_ioarcb
*ioarcb
;
5802 struct ipr_cmnd
*ipr_cmd
;
5805 scsi_cmd
->scsi_done
= done
;
5806 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5807 res
= scsi_cmd
->device
->hostdata
;
5808 scsi_cmd
->result
= (DID_OK
<< 16);
5811 * We are currently blocking all devices due to a host reset
5812 * We have told the host to stop giving us new requests, but
5813 * ERP ops don't count. FIXME
5815 if (unlikely(!ioa_cfg
->allow_cmds
&& !ioa_cfg
->ioa_is_dead
))
5816 return SCSI_MLQUEUE_HOST_BUSY
;
5819 * FIXME - Create scsi_set_host_offline interface
5820 * and the ioa_is_dead check can be removed
5822 if (unlikely(ioa_cfg
->ioa_is_dead
|| !res
)) {
5823 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
5824 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
5825 scsi_cmd
->scsi_done(scsi_cmd
);
5829 if (ipr_is_gata(res
) && res
->sata_port
)
5830 return ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
5832 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5833 ioarcb
= &ipr_cmd
->ioarcb
;
5834 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
5836 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
5837 ipr_cmd
->scsi_cmd
= scsi_cmd
;
5838 ioarcb
->res_handle
= res
->res_handle
;
5839 ipr_cmd
->done
= ipr_scsi_done
;
5840 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
5842 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
5843 if (scsi_cmd
->underflow
== 0)
5844 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
5846 if (res
->needs_sync_complete
) {
5847 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
5848 res
->needs_sync_complete
= 0;
5851 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
5852 if (ipr_is_gscsi(res
))
5853 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
5854 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
5855 ioarcb
->cmd_pkt
.flags_lo
|= ipr_get_task_attributes(scsi_cmd
);
5858 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
5859 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
))
5860 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5862 if (likely(rc
== 0)) {
5864 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
5866 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
5869 if (unlikely(rc
!= 0)) {
5870 list_move_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5871 return SCSI_MLQUEUE_HOST_BUSY
;
5874 ipr_send_command(ipr_cmd
);
5878 static DEF_SCSI_QCMD(ipr_queuecommand
)
5881 * ipr_ioctl - IOCTL handler
5882 * @sdev: scsi device struct
5887 * 0 on success / other on failure
5889 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
5891 struct ipr_resource_entry
*res
;
5893 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
5894 if (res
&& ipr_is_gata(res
)) {
5895 if (cmd
== HDIO_GET_IDENTITY
)
5897 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
5904 * ipr_info - Get information about the card/driver
5905 * @scsi_host: scsi host struct
5908 * pointer to buffer with description string
5910 static const char * ipr_ioa_info(struct Scsi_Host
*host
)
5912 static char buffer
[512];
5913 struct ipr_ioa_cfg
*ioa_cfg
;
5914 unsigned long lock_flags
= 0;
5916 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
5918 spin_lock_irqsave(host
->host_lock
, lock_flags
);
5919 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
5920 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
5925 static struct scsi_host_template driver_template
= {
5926 .module
= THIS_MODULE
,
5928 .info
= ipr_ioa_info
,
5930 .queuecommand
= ipr_queuecommand
,
5931 .eh_abort_handler
= ipr_eh_abort
,
5932 .eh_device_reset_handler
= ipr_eh_dev_reset
,
5933 .eh_host_reset_handler
= ipr_eh_host_reset
,
5934 .slave_alloc
= ipr_slave_alloc
,
5935 .slave_configure
= ipr_slave_configure
,
5936 .slave_destroy
= ipr_slave_destroy
,
5937 .target_alloc
= ipr_target_alloc
,
5938 .target_destroy
= ipr_target_destroy
,
5939 .change_queue_depth
= ipr_change_queue_depth
,
5940 .change_queue_type
= ipr_change_queue_type
,
5941 .bios_param
= ipr_biosparam
,
5942 .can_queue
= IPR_MAX_COMMANDS
,
5944 .sg_tablesize
= IPR_MAX_SGLIST
,
5945 .max_sectors
= IPR_IOA_MAX_SECTORS
,
5946 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
5947 .use_clustering
= ENABLE_CLUSTERING
,
5948 .shost_attrs
= ipr_ioa_attrs
,
5949 .sdev_attrs
= ipr_dev_attrs
,
5950 .proc_name
= IPR_NAME
5954 * ipr_ata_phy_reset - libata phy_reset handler
5955 * @ap: ata port to reset
5958 static void ipr_ata_phy_reset(struct ata_port
*ap
)
5960 unsigned long flags
;
5961 struct ipr_sata_port
*sata_port
= ap
->private_data
;
5962 struct ipr_resource_entry
*res
= sata_port
->res
;
5963 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5967 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
5968 while(ioa_cfg
->in_reset_reload
) {
5969 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
5970 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5971 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
5974 if (!ioa_cfg
->allow_cmds
)
5977 rc
= ipr_device_reset(ioa_cfg
, res
);
5980 ap
->link
.device
[0].class = ATA_DEV_NONE
;
5984 ap
->link
.device
[0].class = res
->ata_class
;
5985 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
5986 ap
->link
.device
[0].class = ATA_DEV_NONE
;
5989 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
5994 * ipr_ata_post_internal - Cleanup after an internal command
5995 * @qc: ATA queued command
6000 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6002 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6003 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6004 struct ipr_cmnd
*ipr_cmd
;
6005 unsigned long flags
;
6007 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6008 while(ioa_cfg
->in_reset_reload
) {
6009 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6010 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6011 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6014 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
6015 if (ipr_cmd
->qc
== qc
) {
6016 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6020 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6024 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6025 * @regs: destination
6026 * @tf: source ATA taskfile
6031 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6032 struct ata_taskfile
*tf
)
6034 regs
->feature
= tf
->feature
;
6035 regs
->nsect
= tf
->nsect
;
6036 regs
->lbal
= tf
->lbal
;
6037 regs
->lbam
= tf
->lbam
;
6038 regs
->lbah
= tf
->lbah
;
6039 regs
->device
= tf
->device
;
6040 regs
->command
= tf
->command
;
6041 regs
->hob_feature
= tf
->hob_feature
;
6042 regs
->hob_nsect
= tf
->hob_nsect
;
6043 regs
->hob_lbal
= tf
->hob_lbal
;
6044 regs
->hob_lbam
= tf
->hob_lbam
;
6045 regs
->hob_lbah
= tf
->hob_lbah
;
6046 regs
->ctl
= tf
->ctl
;
6050 * ipr_sata_done - done function for SATA commands
6051 * @ipr_cmd: ipr command struct
6053 * This function is invoked by the interrupt handler for
6054 * ops generated by the SCSI mid-layer to SATA devices
6059 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6061 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6062 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6063 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6064 struct ipr_resource_entry
*res
= sata_port
->res
;
6065 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6067 if (ipr_cmd
->ioa_cfg
->sis64
)
6068 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6069 sizeof(struct ipr_ioasa_gata
));
6071 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6072 sizeof(struct ipr_ioasa_gata
));
6073 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6075 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6076 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6078 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6079 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6081 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6082 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
6083 ata_qc_complete(qc
);
6087 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6088 * @ipr_cmd: ipr command struct
6089 * @qc: ATA queued command
6092 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6093 struct ata_queued_cmd
*qc
)
6095 u32 ioadl_flags
= 0;
6096 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6097 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
6098 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6099 int len
= qc
->nbytes
;
6100 struct scatterlist
*sg
;
6102 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6107 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6108 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6109 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6110 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6111 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6113 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6115 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6116 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6117 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
));
6119 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6120 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6121 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6122 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6124 last_ioadl64
= ioadl64
;
6128 if (likely(last_ioadl64
))
6129 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6133 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6134 * @ipr_cmd: ipr command struct
6135 * @qc: ATA queued command
6138 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6139 struct ata_queued_cmd
*qc
)
6141 u32 ioadl_flags
= 0;
6142 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6143 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6144 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6145 int len
= qc
->nbytes
;
6146 struct scatterlist
*sg
;
6152 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6153 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6154 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6155 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6157 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6158 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6159 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6160 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6161 ioarcb
->read_ioadl_len
=
6162 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6165 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6166 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6167 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6173 if (likely(last_ioadl
))
6174 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6178 * ipr_qc_issue - Issue a SATA qc to a device
6179 * @qc: queued command
6184 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
6186 struct ata_port
*ap
= qc
->ap
;
6187 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6188 struct ipr_resource_entry
*res
= sata_port
->res
;
6189 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6190 struct ipr_cmnd
*ipr_cmd
;
6191 struct ipr_ioarcb
*ioarcb
;
6192 struct ipr_ioarcb_ata_regs
*regs
;
6194 if (unlikely(!ioa_cfg
->allow_cmds
|| ioa_cfg
->ioa_is_dead
))
6195 return AC_ERR_SYSTEM
;
6197 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
6198 ioarcb
= &ipr_cmd
->ioarcb
;
6200 if (ioa_cfg
->sis64
) {
6201 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
6202 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
6204 regs
= &ioarcb
->u
.add_data
.u
.regs
;
6206 memset(regs
, 0, sizeof(*regs
));
6207 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
6209 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
6211 ipr_cmd
->done
= ipr_sata_done
;
6212 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
6213 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
6214 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6215 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6216 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
6219 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
6221 ipr_build_ata_ioadl(ipr_cmd
, qc
);
6223 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
6224 ipr_copy_sata_tf(regs
, &qc
->tf
);
6225 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
6226 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6228 switch (qc
->tf
.protocol
) {
6229 case ATA_PROT_NODATA
:
6234 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6237 case ATAPI_PROT_PIO
:
6238 case ATAPI_PROT_NODATA
:
6239 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6242 case ATAPI_PROT_DMA
:
6243 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6244 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6249 return AC_ERR_INVALID
;
6252 ipr_send_command(ipr_cmd
);
6258 * ipr_qc_fill_rtf - Read result TF
6259 * @qc: ATA queued command
6264 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
6266 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6267 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
6268 struct ata_taskfile
*tf
= &qc
->result_tf
;
6270 tf
->feature
= g
->error
;
6271 tf
->nsect
= g
->nsect
;
6275 tf
->device
= g
->device
;
6276 tf
->command
= g
->status
;
6277 tf
->hob_nsect
= g
->hob_nsect
;
6278 tf
->hob_lbal
= g
->hob_lbal
;
6279 tf
->hob_lbam
= g
->hob_lbam
;
6280 tf
->hob_lbah
= g
->hob_lbah
;
6281 tf
->ctl
= g
->alt_status
;
6286 static struct ata_port_operations ipr_sata_ops
= {
6287 .phy_reset
= ipr_ata_phy_reset
,
6288 .hardreset
= ipr_sata_reset
,
6289 .post_internal_cmd
= ipr_ata_post_internal
,
6290 .qc_prep
= ata_noop_qc_prep
,
6291 .qc_issue
= ipr_qc_issue
,
6292 .qc_fill_rtf
= ipr_qc_fill_rtf
,
6293 .port_start
= ata_sas_port_start
,
6294 .port_stop
= ata_sas_port_stop
6297 static struct ata_port_info sata_port_info
= {
6298 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
,
6299 .pio_mask
= ATA_PIO4_ONLY
,
6300 .mwdma_mask
= ATA_MWDMA2
,
6301 .udma_mask
= ATA_UDMA6
,
6302 .port_ops
= &ipr_sata_ops
6305 #ifdef CONFIG_PPC_PSERIES
6306 static const u16 ipr_blocked_processors
[] = {
6318 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6319 * @ioa_cfg: ioa cfg struct
6321 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6322 * certain pSeries hardware. This function determines if the given
6323 * adapter is in one of these confgurations or not.
6326 * 1 if adapter is not supported / 0 if adapter is supported
6328 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
6332 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
6333 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++){
6334 if (__is_processor(ipr_blocked_processors
[i
]))
6341 #define ipr_invalid_adapter(ioa_cfg) 0
6345 * ipr_ioa_bringdown_done - IOA bring down completion.
6346 * @ipr_cmd: ipr command struct
6348 * This function processes the completion of an adapter bring down.
6349 * It wakes any reset sleepers.
6354 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
6356 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6359 ioa_cfg
->in_reset_reload
= 0;
6360 ioa_cfg
->reset_retries
= 0;
6361 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
6362 wake_up_all(&ioa_cfg
->reset_wait_q
);
6364 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
6365 scsi_unblock_requests(ioa_cfg
->host
);
6366 spin_lock_irq(ioa_cfg
->host
->host_lock
);
6369 return IPR_RC_JOB_RETURN
;
6373 * ipr_ioa_reset_done - IOA reset completion.
6374 * @ipr_cmd: ipr command struct
6376 * This function processes the completion of an adapter reset.
6377 * It schedules any necessary mid-layer add/removes and
6378 * wakes any reset sleepers.
6383 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
6385 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6386 struct ipr_resource_entry
*res
;
6387 struct ipr_hostrcb
*hostrcb
, *temp
;
6391 ioa_cfg
->in_reset_reload
= 0;
6392 ioa_cfg
->allow_cmds
= 1;
6393 ioa_cfg
->reset_cmd
= NULL
;
6394 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
6396 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
6397 if (ioa_cfg
->allow_ml_add_del
&& (res
->add_to_ml
|| res
->del_from_ml
)) {
6402 schedule_work(&ioa_cfg
->work_q
);
6404 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
6405 list_del(&hostrcb
->queue
);
6406 if (i
++ < IPR_NUM_LOG_HCAMS
)
6407 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
6409 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
6412 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
6413 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
6415 ioa_cfg
->reset_retries
= 0;
6416 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
6417 wake_up_all(&ioa_cfg
->reset_wait_q
);
6419 spin_unlock(ioa_cfg
->host
->host_lock
);
6420 scsi_unblock_requests(ioa_cfg
->host
);
6421 spin_lock(ioa_cfg
->host
->host_lock
);
6423 if (!ioa_cfg
->allow_cmds
)
6424 scsi_block_requests(ioa_cfg
->host
);
6427 return IPR_RC_JOB_RETURN
;
6431 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6432 * @supported_dev: supported device struct
6433 * @vpids: vendor product id struct
6438 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
6439 struct ipr_std_inq_vpids
*vpids
)
6441 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
6442 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
6443 supported_dev
->num_records
= 1;
6444 supported_dev
->data_length
=
6445 cpu_to_be16(sizeof(struct ipr_supported_device
));
6446 supported_dev
->reserved
= 0;
6450 * ipr_set_supported_devs - Send Set Supported Devices for a device
6451 * @ipr_cmd: ipr command struct
6453 * This function sends a Set Supported Devices to the adapter
6456 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6458 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
6460 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6461 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
6462 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6463 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
6465 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
6467 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
6468 if (!ipr_is_scsi_disk(res
))
6471 ipr_cmd
->u
.res
= res
;
6472 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
6474 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
6475 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6476 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6478 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
6479 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
6480 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
6481 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
6483 ipr_init_ioadl(ipr_cmd
,
6484 ioa_cfg
->vpd_cbs_dma
+
6485 offsetof(struct ipr_misc_cbs
, supp_dev
),
6486 sizeof(struct ipr_supported_device
),
6487 IPR_IOADL_FLAGS_WRITE_LAST
);
6489 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
6490 IPR_SET_SUP_DEVICE_TIMEOUT
);
6492 if (!ioa_cfg
->sis64
)
6493 ipr_cmd
->job_step
= ipr_set_supported_devs
;
6494 return IPR_RC_JOB_RETURN
;
6497 return IPR_RC_JOB_CONTINUE
;
6501 * ipr_get_mode_page - Locate specified mode page
6502 * @mode_pages: mode page buffer
6503 * @page_code: page code to find
6504 * @len: minimum required length for mode page
6507 * pointer to mode page / NULL on failure
6509 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
6510 u32 page_code
, u32 len
)
6512 struct ipr_mode_page_hdr
*mode_hdr
;
6516 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
6519 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
6520 mode_hdr
= (struct ipr_mode_page_hdr
*)
6521 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
6524 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
6525 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
6529 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
6530 mode_hdr
->page_length
);
6531 length
-= page_length
;
6532 mode_hdr
= (struct ipr_mode_page_hdr
*)
6533 ((unsigned long)mode_hdr
+ page_length
);
6540 * ipr_check_term_power - Check for term power errors
6541 * @ioa_cfg: ioa config struct
6542 * @mode_pages: IOAFP mode pages buffer
6544 * Check the IOAFP's mode page 28 for term power errors
6549 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
6550 struct ipr_mode_pages
*mode_pages
)
6554 struct ipr_dev_bus_entry
*bus
;
6555 struct ipr_mode_page28
*mode_page
;
6557 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
6558 sizeof(struct ipr_mode_page28
));
6560 entry_length
= mode_page
->entry_length
;
6562 bus
= mode_page
->bus
;
6564 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
6565 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
6566 dev_err(&ioa_cfg
->pdev
->dev
,
6567 "Term power is absent on scsi bus %d\n",
6571 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
6576 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6577 * @ioa_cfg: ioa config struct
6579 * Looks through the config table checking for SES devices. If
6580 * the SES device is in the SES table indicating a maximum SCSI
6581 * bus speed, the speed is limited for the bus.
6586 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
6591 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
6592 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
6593 ioa_cfg
->bus_attr
[i
].bus_width
);
6595 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
6596 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
6601 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6602 * @ioa_cfg: ioa config struct
6603 * @mode_pages: mode page 28 buffer
6605 * Updates mode page 28 based on driver configuration
6610 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
6611 struct ipr_mode_pages
*mode_pages
)
6613 int i
, entry_length
;
6614 struct ipr_dev_bus_entry
*bus
;
6615 struct ipr_bus_attributes
*bus_attr
;
6616 struct ipr_mode_page28
*mode_page
;
6618 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
6619 sizeof(struct ipr_mode_page28
));
6621 entry_length
= mode_page
->entry_length
;
6623 /* Loop for each device bus entry */
6624 for (i
= 0, bus
= mode_page
->bus
;
6625 i
< mode_page
->num_entries
;
6626 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
6627 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
6628 dev_err(&ioa_cfg
->pdev
->dev
,
6629 "Invalid resource address reported: 0x%08X\n",
6630 IPR_GET_PHYS_LOC(bus
->res_addr
));
6634 bus_attr
= &ioa_cfg
->bus_attr
[i
];
6635 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
6636 bus
->bus_width
= bus_attr
->bus_width
;
6637 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
6638 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
6639 if (bus_attr
->qas_enabled
)
6640 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
6642 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
6647 * ipr_build_mode_select - Build a mode select command
6648 * @ipr_cmd: ipr command struct
6649 * @res_handle: resource handle to send command to
6650 * @parm: Byte 2 of Mode Sense command
6651 * @dma_addr: DMA buffer address
6652 * @xfer_len: data transfer length
6657 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
6658 __be32 res_handle
, u8 parm
,
6659 dma_addr_t dma_addr
, u8 xfer_len
)
6661 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6663 ioarcb
->res_handle
= res_handle
;
6664 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
6665 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6666 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
6667 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
6668 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
6670 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
6674 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6675 * @ipr_cmd: ipr command struct
6677 * This function sets up the SCSI bus attributes and sends
6678 * a Mode Select for Page 28 to activate them.
6683 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
6685 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6686 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
6690 ipr_scsi_bus_speed_limit(ioa_cfg
);
6691 ipr_check_term_power(ioa_cfg
, mode_pages
);
6692 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
6693 length
= mode_pages
->hdr
.length
+ 1;
6694 mode_pages
->hdr
.length
= 0;
6696 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
6697 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
6700 ipr_cmd
->job_step
= ipr_set_supported_devs
;
6701 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
6702 struct ipr_resource_entry
, queue
);
6703 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
6706 return IPR_RC_JOB_RETURN
;
6710 * ipr_build_mode_sense - Builds a mode sense command
6711 * @ipr_cmd: ipr command struct
6712 * @res: resource entry struct
6713 * @parm: Byte 2 of mode sense command
6714 * @dma_addr: DMA address of mode sense buffer
6715 * @xfer_len: Size of DMA buffer
6720 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
6722 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
6724 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6726 ioarcb
->res_handle
= res_handle
;
6727 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
6728 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
6729 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
6730 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
6732 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
6736 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6737 * @ipr_cmd: ipr command struct
6739 * This function handles the failure of an IOA bringup command.
6744 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
6746 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6747 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6749 dev_err(&ioa_cfg
->pdev
->dev
,
6750 "0x%02X failed with IOASC: 0x%08X\n",
6751 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
6753 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
6754 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
6755 return IPR_RC_JOB_RETURN
;
6759 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6760 * @ipr_cmd: ipr command struct
6762 * This function handles the failure of a Mode Sense to the IOAFP.
6763 * Some adapters do not handle all mode pages.
6766 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6768 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
6770 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6771 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6773 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
6774 ipr_cmd
->job_step
= ipr_set_supported_devs
;
6775 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
6776 struct ipr_resource_entry
, queue
);
6777 return IPR_RC_JOB_CONTINUE
;
6780 return ipr_reset_cmd_failed(ipr_cmd
);
6784 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6785 * @ipr_cmd: ipr command struct
6787 * This function send a Page 28 mode sense to the IOA to
6788 * retrieve SCSI bus attributes.
6793 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
6795 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6798 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
6799 0x28, ioa_cfg
->vpd_cbs_dma
+
6800 offsetof(struct ipr_misc_cbs
, mode_pages
),
6801 sizeof(struct ipr_mode_pages
));
6803 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
6804 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
6806 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
6809 return IPR_RC_JOB_RETURN
;
6813 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6814 * @ipr_cmd: ipr command struct
6816 * This function enables dual IOA RAID support if possible.
6821 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
6823 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6824 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
6825 struct ipr_mode_page24
*mode_page
;
6829 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
6830 sizeof(struct ipr_mode_page24
));
6833 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
6835 length
= mode_pages
->hdr
.length
+ 1;
6836 mode_pages
->hdr
.length
= 0;
6838 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
6839 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
6842 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
6843 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
6846 return IPR_RC_JOB_RETURN
;
6850 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6851 * @ipr_cmd: ipr command struct
6853 * This function handles the failure of a Mode Sense to the IOAFP.
6854 * Some adapters do not handle all mode pages.
6857 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6859 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
6861 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6863 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
6864 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
6865 return IPR_RC_JOB_CONTINUE
;
6868 return ipr_reset_cmd_failed(ipr_cmd
);
6872 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6873 * @ipr_cmd: ipr command struct
6875 * This function send a mode sense to the IOA to retrieve
6876 * the IOA Advanced Function Control mode page.
6881 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
6883 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6886 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
6887 0x24, ioa_cfg
->vpd_cbs_dma
+
6888 offsetof(struct ipr_misc_cbs
, mode_pages
),
6889 sizeof(struct ipr_mode_pages
));
6891 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
6892 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
6894 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
6897 return IPR_RC_JOB_RETURN
;
6901 * ipr_init_res_table - Initialize the resource table
6902 * @ipr_cmd: ipr command struct
6904 * This function looks through the existing resource table, comparing
6905 * it with the config table. This function will take care of old/new
6906 * devices and schedule adding/removing them from the mid-layer
6910 * IPR_RC_JOB_CONTINUE
6912 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
6914 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6915 struct ipr_resource_entry
*res
, *temp
;
6916 struct ipr_config_table_entry_wrapper cfgtew
;
6917 int entries
, found
, flag
, i
;
6922 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
6924 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
6926 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
6927 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
6929 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
6930 list_move_tail(&res
->queue
, &old_res
);
6933 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
6935 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
6937 for (i
= 0; i
< entries
; i
++) {
6939 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
6941 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
6944 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
6945 if (ipr_is_same_device(res
, &cfgtew
)) {
6946 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
6953 if (list_empty(&ioa_cfg
->free_res_q
)) {
6954 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
6959 res
= list_entry(ioa_cfg
->free_res_q
.next
,
6960 struct ipr_resource_entry
, queue
);
6961 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
6962 ipr_init_res_entry(res
, &cfgtew
);
6964 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
6965 res
->sdev
->allow_restart
= 1;
6968 ipr_update_res_entry(res
, &cfgtew
);
6971 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
6973 res
->del_from_ml
= 1;
6974 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
6975 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
6979 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
6980 ipr_clear_res_target(res
);
6981 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
6984 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
6985 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
6987 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
6990 return IPR_RC_JOB_CONTINUE
;
6994 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6995 * @ipr_cmd: ipr command struct
6997 * This function sends a Query IOA Configuration command
6998 * to the adapter to retrieve the IOA configuration table.
7003 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7005 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7006 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7007 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7008 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7011 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7012 ioa_cfg
->dual_raid
= 1;
7013 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7014 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7015 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7016 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7017 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7019 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7020 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7021 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7022 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7024 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7025 IPR_IOADL_FLAGS_READ_LAST
);
7027 ipr_cmd
->job_step
= ipr_init_res_table
;
7029 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7032 return IPR_RC_JOB_RETURN
;
7036 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7037 * @ipr_cmd: ipr command struct
7039 * This utility function sends an inquiry to the adapter.
7044 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7045 dma_addr_t dma_addr
, u8 xfer_len
)
7047 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7050 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7051 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7053 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7054 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7055 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7056 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7058 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7060 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7065 * ipr_inquiry_page_supported - Is the given inquiry page supported
7066 * @page0: inquiry page 0 buffer
7069 * This function determines if the specified inquiry page is supported.
7072 * 1 if page is supported / 0 if not
7074 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7078 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7079 if (page0
->page
[i
] == page
)
7086 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7087 * @ipr_cmd: ipr command struct
7089 * This function sends a Page 0xD0 inquiry to the adapter
7090 * to retrieve adapter capabilities.
7093 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7095 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
7097 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7098 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
7099 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7102 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7103 memset(cap
, 0, sizeof(*cap
));
7105 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
7106 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
7107 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
7108 sizeof(struct ipr_inquiry_cap
));
7109 return IPR_RC_JOB_RETURN
;
7113 return IPR_RC_JOB_CONTINUE
;
7117 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7118 * @ipr_cmd: ipr command struct
7120 * This function sends a Page 3 inquiry to the adapter
7121 * to retrieve software VPD information.
7124 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7126 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
7128 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7132 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
7134 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
7135 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
7136 sizeof(struct ipr_inquiry_page3
));
7139 return IPR_RC_JOB_RETURN
;
7143 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7144 * @ipr_cmd: ipr command struct
7146 * This function sends a Page 0 inquiry to the adapter
7147 * to retrieve supported inquiry pages.
7150 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7152 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
7154 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7159 /* Grab the type out of the VPD and store it away */
7160 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
7162 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
7164 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
7166 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
7167 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
7168 sizeof(struct ipr_inquiry_page0
));
7171 return IPR_RC_JOB_RETURN
;
7175 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7176 * @ipr_cmd: ipr command struct
7178 * This function sends a standard inquiry to the adapter.
7183 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
7185 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7188 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
7190 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
7191 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
7192 sizeof(struct ipr_ioa_vpd
));
7195 return IPR_RC_JOB_RETURN
;
7199 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7200 * @ipr_cmd: ipr command struct
7202 * This function send an Identify Host Request Response Queue
7203 * command to establish the HRRQ with the adapter.
7208 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
7210 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7211 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7214 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
7216 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
7217 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7219 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7221 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
7222 ioarcb
->cmd_pkt
.cdb
[2] =
7223 ((u64
) ioa_cfg
->host_rrq_dma
>> 24) & 0xff;
7224 ioarcb
->cmd_pkt
.cdb
[3] =
7225 ((u64
) ioa_cfg
->host_rrq_dma
>> 16) & 0xff;
7226 ioarcb
->cmd_pkt
.cdb
[4] =
7227 ((u64
) ioa_cfg
->host_rrq_dma
>> 8) & 0xff;
7228 ioarcb
->cmd_pkt
.cdb
[5] =
7229 ((u64
) ioa_cfg
->host_rrq_dma
) & 0xff;
7230 ioarcb
->cmd_pkt
.cdb
[7] =
7231 ((sizeof(u32
) * IPR_NUM_CMD_BLKS
) >> 8) & 0xff;
7232 ioarcb
->cmd_pkt
.cdb
[8] =
7233 (sizeof(u32
) * IPR_NUM_CMD_BLKS
) & 0xff;
7235 if (ioa_cfg
->sis64
) {
7236 ioarcb
->cmd_pkt
.cdb
[10] =
7237 ((u64
) ioa_cfg
->host_rrq_dma
>> 56) & 0xff;
7238 ioarcb
->cmd_pkt
.cdb
[11] =
7239 ((u64
) ioa_cfg
->host_rrq_dma
>> 48) & 0xff;
7240 ioarcb
->cmd_pkt
.cdb
[12] =
7241 ((u64
) ioa_cfg
->host_rrq_dma
>> 40) & 0xff;
7242 ioarcb
->cmd_pkt
.cdb
[13] =
7243 ((u64
) ioa_cfg
->host_rrq_dma
>> 32) & 0xff;
7246 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
7248 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7251 return IPR_RC_JOB_RETURN
;
7255 * ipr_reset_timer_done - Adapter reset timer function
7256 * @ipr_cmd: ipr command struct
7258 * Description: This function is used in adapter reset processing
7259 * for timing events. If the reset_cmd pointer in the IOA
7260 * config struct is not this adapter's we are doing nested
7261 * resets and fail_all_ops will take care of freeing the
7267 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
7269 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7270 unsigned long lock_flags
= 0;
7272 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
7274 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
7275 list_del(&ipr_cmd
->queue
);
7276 ipr_cmd
->done(ipr_cmd
);
7279 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
7283 * ipr_reset_start_timer - Start a timer for adapter reset job
7284 * @ipr_cmd: ipr command struct
7285 * @timeout: timeout value
7287 * Description: This function is used in adapter reset processing
7288 * for timing events. If the reset_cmd pointer in the IOA
7289 * config struct is not this adapter's we are doing nested
7290 * resets and fail_all_ops will take care of freeing the
7296 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
7297 unsigned long timeout
)
7299 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->ioa_cfg
->pending_q
);
7300 ipr_cmd
->done
= ipr_reset_ioa_job
;
7302 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7303 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
7304 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
7305 add_timer(&ipr_cmd
->timer
);
7309 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7310 * @ioa_cfg: ioa cfg struct
7315 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
7317 memset(ioa_cfg
->host_rrq
, 0, sizeof(u32
) * IPR_NUM_CMD_BLKS
);
7319 /* Initialize Host RRQ pointers */
7320 ioa_cfg
->hrrq_start
= ioa_cfg
->host_rrq
;
7321 ioa_cfg
->hrrq_end
= &ioa_cfg
->host_rrq
[IPR_NUM_CMD_BLKS
- 1];
7322 ioa_cfg
->hrrq_curr
= ioa_cfg
->hrrq_start
;
7323 ioa_cfg
->toggle_bit
= 1;
7325 /* Zero out config table */
7326 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
7330 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7331 * @ipr_cmd: ipr command struct
7334 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7336 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
7338 unsigned long stage
, stage_time
;
7340 volatile u32 int_reg
;
7341 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7344 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
7345 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
7346 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
7348 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
7350 /* sanity check the stage_time value */
7351 if (stage_time
== 0)
7352 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
7353 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
7354 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
7355 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
7356 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
7358 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
7359 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7360 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7361 stage_time
= ioa_cfg
->transop_timeout
;
7362 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7363 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
7364 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7365 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7366 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7367 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7368 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
7369 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7370 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7371 return IPR_RC_JOB_CONTINUE
;
7375 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7376 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
7377 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
7378 ipr_cmd
->done
= ipr_reset_ioa_job
;
7379 add_timer(&ipr_cmd
->timer
);
7380 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
7382 return IPR_RC_JOB_RETURN
;
7386 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7387 * @ipr_cmd: ipr command struct
7389 * This function reinitializes some control blocks and
7390 * enables destructive diagnostics on the adapter.
7395 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
7397 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7398 volatile u32 int_reg
;
7399 volatile u64 maskval
;
7402 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7403 ipr_init_ioa_mem(ioa_cfg
);
7405 ioa_cfg
->allow_interrupts
= 1;
7406 if (ioa_cfg
->sis64
) {
7407 /* Set the adapter to the correct endian mode. */
7408 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
7409 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
7412 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7414 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7415 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
7416 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
7417 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7418 return IPR_RC_JOB_CONTINUE
;
7421 /* Enable destructive diagnostics on IOA */
7422 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
7424 if (ioa_cfg
->sis64
) {
7425 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7426 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
7427 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
7429 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
7431 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7433 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
7435 if (ioa_cfg
->sis64
) {
7436 ipr_cmd
->job_step
= ipr_reset_next_stage
;
7437 return IPR_RC_JOB_CONTINUE
;
7440 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7441 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
7442 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
7443 ipr_cmd
->done
= ipr_reset_ioa_job
;
7444 add_timer(&ipr_cmd
->timer
);
7445 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
7448 return IPR_RC_JOB_RETURN
;
7452 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7453 * @ipr_cmd: ipr command struct
7455 * This function is invoked when an adapter dump has run out
7456 * of processing time.
7459 * IPR_RC_JOB_CONTINUE
7461 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
7463 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7465 if (ioa_cfg
->sdt_state
== GET_DUMP
)
7466 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
7467 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
7468 ioa_cfg
->sdt_state
= ABORT_DUMP
;
7470 ioa_cfg
->dump_timeout
= 1;
7471 ipr_cmd
->job_step
= ipr_reset_alert
;
7473 return IPR_RC_JOB_CONTINUE
;
7477 * ipr_unit_check_no_data - Log a unit check/no data error log
7478 * @ioa_cfg: ioa config struct
7480 * Logs an error indicating the adapter unit checked, but for some
7481 * reason, we were unable to fetch the unit check buffer.
7486 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
7488 ioa_cfg
->errors_logged
++;
7489 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
7493 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7494 * @ioa_cfg: ioa config struct
7496 * Fetches the unit check buffer from the adapter by clocking the data
7497 * through the mailbox register.
7502 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
7504 unsigned long mailbox
;
7505 struct ipr_hostrcb
*hostrcb
;
7506 struct ipr_uc_sdt sdt
;
7510 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
7512 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
7513 ipr_unit_check_no_data(ioa_cfg
);
7517 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
7518 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
7519 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
7521 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
7522 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
7523 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
7524 ipr_unit_check_no_data(ioa_cfg
);
7528 /* Find length of the first sdt entry (UC buffer) */
7529 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
7530 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
7532 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
7533 be32_to_cpu(sdt
.entry
[0].start_token
)) &
7534 IPR_FMT2_MBX_ADDR_MASK
;
7536 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
7537 struct ipr_hostrcb
, queue
);
7538 list_del(&hostrcb
->queue
);
7539 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
7541 rc
= ipr_get_ldump_data_section(ioa_cfg
,
7542 be32_to_cpu(sdt
.entry
[0].start_token
),
7543 (__be32
*)&hostrcb
->hcam
,
7544 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
7547 ipr_handle_log_data(ioa_cfg
, hostrcb
);
7548 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
7549 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
7550 ioa_cfg
->sdt_state
== GET_DUMP
)
7551 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
7553 ipr_unit_check_no_data(ioa_cfg
);
7555 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
7559 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7560 * @ipr_cmd: ipr command struct
7562 * Description: This function will call to get the unit check buffer.
7567 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
7569 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7572 ioa_cfg
->ioa_unit_checked
= 0;
7573 ipr_get_unit_check_buffer(ioa_cfg
);
7574 ipr_cmd
->job_step
= ipr_reset_alert
;
7575 ipr_reset_start_timer(ipr_cmd
, 0);
7578 return IPR_RC_JOB_RETURN
;
7582 * ipr_reset_restore_cfg_space - Restore PCI config space.
7583 * @ipr_cmd: ipr command struct
7585 * Description: This function restores the saved PCI config space of
7586 * the adapter, fails all outstanding ops back to the callers, and
7587 * fetches the dump/unit check if applicable to this reset.
7590 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7592 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
7594 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7598 ioa_cfg
->pdev
->state_saved
= true;
7599 pci_restore_state(ioa_cfg
->pdev
);
7601 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
7602 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
7603 return IPR_RC_JOB_CONTINUE
;
7606 ipr_fail_all_ops(ioa_cfg
);
7608 if (ioa_cfg
->sis64
) {
7609 /* Set the adapter to the correct endian mode. */
7610 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
7611 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
7614 if (ioa_cfg
->ioa_unit_checked
) {
7615 if (ioa_cfg
->sis64
) {
7616 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
7617 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
7618 return IPR_RC_JOB_RETURN
;
7620 ioa_cfg
->ioa_unit_checked
= 0;
7621 ipr_get_unit_check_buffer(ioa_cfg
);
7622 ipr_cmd
->job_step
= ipr_reset_alert
;
7623 ipr_reset_start_timer(ipr_cmd
, 0);
7624 return IPR_RC_JOB_RETURN
;
7628 if (ioa_cfg
->in_ioa_bringdown
) {
7629 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
7631 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
7633 if (GET_DUMP
== ioa_cfg
->sdt_state
) {
7634 ioa_cfg
->sdt_state
= READ_DUMP
;
7635 ioa_cfg
->dump_timeout
= 0;
7637 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
7639 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
7640 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
7641 schedule_work(&ioa_cfg
->work_q
);
7642 return IPR_RC_JOB_RETURN
;
7647 return IPR_RC_JOB_CONTINUE
;
7651 * ipr_reset_bist_done - BIST has completed on the adapter.
7652 * @ipr_cmd: ipr command struct
7654 * Description: Unblock config space and resume the reset process.
7657 * IPR_RC_JOB_CONTINUE
7659 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
7661 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7664 if (ioa_cfg
->cfg_locked
)
7665 pci_cfg_access_unlock(ioa_cfg
->pdev
);
7666 ioa_cfg
->cfg_locked
= 0;
7667 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
7669 return IPR_RC_JOB_CONTINUE
;
7673 * ipr_reset_start_bist - Run BIST on the adapter.
7674 * @ipr_cmd: ipr command struct
7676 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7679 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7681 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
7683 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7684 int rc
= PCIBIOS_SUCCESSFUL
;
7687 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
7688 writel(IPR_UPROCI_SIS64_START_BIST
,
7689 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
7691 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
7693 if (rc
== PCIBIOS_SUCCESSFUL
) {
7694 ipr_cmd
->job_step
= ipr_reset_bist_done
;
7695 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
7696 rc
= IPR_RC_JOB_RETURN
;
7698 if (ioa_cfg
->cfg_locked
)
7699 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
7700 ioa_cfg
->cfg_locked
= 0;
7701 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
7702 rc
= IPR_RC_JOB_CONTINUE
;
7710 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7711 * @ipr_cmd: ipr command struct
7713 * Description: This clears PCI reset to the adapter and delays two seconds.
7718 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
7721 pci_set_pcie_reset_state(ipr_cmd
->ioa_cfg
->pdev
, pcie_deassert_reset
);
7722 ipr_cmd
->job_step
= ipr_reset_bist_done
;
7723 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
7725 return IPR_RC_JOB_RETURN
;
7729 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7730 * @ipr_cmd: ipr command struct
7732 * Description: This asserts PCI reset to the adapter.
7737 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
7739 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7740 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
7743 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
7744 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
7745 ipr_reset_start_timer(ipr_cmd
, IPR_PCI_RESET_TIMEOUT
);
7747 return IPR_RC_JOB_RETURN
;
7751 * ipr_reset_block_config_access_wait - Wait for permission to block config access
7752 * @ipr_cmd: ipr command struct
7754 * Description: This attempts to block config access to the IOA.
7757 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7759 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
7761 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7762 int rc
= IPR_RC_JOB_CONTINUE
;
7764 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
7765 ioa_cfg
->cfg_locked
= 1;
7766 ipr_cmd
->job_step
= ioa_cfg
->reset
;
7768 if (ipr_cmd
->u
.time_left
) {
7769 rc
= IPR_RC_JOB_RETURN
;
7770 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
7771 ipr_reset_start_timer(ipr_cmd
,
7772 IPR_CHECK_FOR_RESET_TIMEOUT
);
7774 ipr_cmd
->job_step
= ioa_cfg
->reset
;
7775 dev_err(&ioa_cfg
->pdev
->dev
,
7776 "Timed out waiting to lock config access. Resetting anyway.\n");
7784 * ipr_reset_block_config_access - Block config access to the IOA
7785 * @ipr_cmd: ipr command struct
7787 * Description: This attempts to block config access to the IOA
7790 * IPR_RC_JOB_CONTINUE
7792 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
7794 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
7795 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
7796 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
7797 return IPR_RC_JOB_CONTINUE
;
7801 * ipr_reset_allowed - Query whether or not IOA can be reset
7802 * @ioa_cfg: ioa config struct
7805 * 0 if reset not allowed / non-zero if reset is allowed
7807 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
7809 volatile u32 temp_reg
;
7811 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
7812 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
7816 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7817 * @ipr_cmd: ipr command struct
7819 * Description: This function waits for adapter permission to run BIST,
7820 * then runs BIST. If the adapter does not give permission after a
7821 * reasonable time, we will reset the adapter anyway. The impact of
7822 * resetting the adapter without warning the adapter is the risk of
7823 * losing the persistent error log on the adapter. If the adapter is
7824 * reset while it is writing to the flash on the adapter, the flash
7825 * segment will have bad ECC and be zeroed.
7828 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7830 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
7832 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7833 int rc
= IPR_RC_JOB_RETURN
;
7835 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
7836 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
7837 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
7839 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
7840 rc
= IPR_RC_JOB_CONTINUE
;
7847 * ipr_reset_alert - Alert the adapter of a pending reset
7848 * @ipr_cmd: ipr command struct
7850 * Description: This function alerts the adapter that it will be reset.
7851 * If memory space is not currently enabled, proceed directly
7852 * to running BIST on the adapter. The timer must always be started
7853 * so we guarantee we do not run BIST from ipr_isr.
7858 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
7860 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7865 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
7867 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
7868 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
7869 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
7870 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
7872 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
7875 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
7876 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
7879 return IPR_RC_JOB_RETURN
;
7883 * ipr_reset_ucode_download_done - Microcode download completion
7884 * @ipr_cmd: ipr command struct
7886 * Description: This function unmaps the microcode download buffer.
7889 * IPR_RC_JOB_CONTINUE
7891 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
7893 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7894 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
7896 pci_unmap_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
7897 sglist
->num_sg
, DMA_TO_DEVICE
);
7899 ipr_cmd
->job_step
= ipr_reset_alert
;
7900 return IPR_RC_JOB_CONTINUE
;
7904 * ipr_reset_ucode_download - Download microcode to the adapter
7905 * @ipr_cmd: ipr command struct
7907 * Description: This function checks to see if it there is microcode
7908 * to download to the adapter. If there is, a download is performed.
7911 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7913 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
7915 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7916 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
7919 ipr_cmd
->job_step
= ipr_reset_alert
;
7922 return IPR_RC_JOB_CONTINUE
;
7924 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7925 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7926 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
7927 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
7928 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
7929 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
7930 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
7933 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
7935 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
7936 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
7938 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7939 IPR_WRITE_BUFFER_TIMEOUT
);
7942 return IPR_RC_JOB_RETURN
;
7946 * ipr_reset_shutdown_ioa - Shutdown the adapter
7947 * @ipr_cmd: ipr command struct
7949 * Description: This function issues an adapter shutdown of the
7950 * specified type to the specified adapter as part of the
7951 * adapter reset job.
7954 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7956 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
7958 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7959 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
7960 unsigned long timeout
;
7961 int rc
= IPR_RC_JOB_CONTINUE
;
7964 if (shutdown_type
!= IPR_SHUTDOWN_NONE
&& !ioa_cfg
->ioa_is_dead
) {
7965 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7966 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7967 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
7968 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
7970 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
7971 timeout
= IPR_SHUTDOWN_TIMEOUT
;
7972 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
7973 timeout
= IPR_INTERNAL_TIMEOUT
;
7974 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7975 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
7977 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
7979 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
7981 rc
= IPR_RC_JOB_RETURN
;
7982 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
7984 ipr_cmd
->job_step
= ipr_reset_alert
;
7991 * ipr_reset_ioa_job - Adapter reset job
7992 * @ipr_cmd: ipr command struct
7994 * Description: This function is the job router for the adapter reset job.
7999 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
8002 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8005 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
8007 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
8009 * We are doing nested adapter resets and this is
8010 * not the current reset job.
8012 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
8016 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
8017 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
8018 if (rc
== IPR_RC_JOB_RETURN
)
8022 ipr_reinit_ipr_cmnd(ipr_cmd
);
8023 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
8024 rc
= ipr_cmd
->job_step(ipr_cmd
);
8025 } while(rc
== IPR_RC_JOB_CONTINUE
);
8029 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8030 * @ioa_cfg: ioa config struct
8031 * @job_step: first job step of reset job
8032 * @shutdown_type: shutdown type
8034 * Description: This function will initiate the reset of the given adapter
8035 * starting at the selected job step.
8036 * If the caller needs to wait on the completion of the reset,
8037 * the caller must sleep on the reset_wait_q.
8042 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8043 int (*job_step
) (struct ipr_cmnd
*),
8044 enum ipr_shutdown_type shutdown_type
)
8046 struct ipr_cmnd
*ipr_cmd
;
8048 ioa_cfg
->in_reset_reload
= 1;
8049 ioa_cfg
->allow_cmds
= 0;
8050 scsi_block_requests(ioa_cfg
->host
);
8052 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
8053 ioa_cfg
->reset_cmd
= ipr_cmd
;
8054 ipr_cmd
->job_step
= job_step
;
8055 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
8057 ipr_reset_ioa_job(ipr_cmd
);
8061 * ipr_initiate_ioa_reset - Initiate an adapter reset
8062 * @ioa_cfg: ioa config struct
8063 * @shutdown_type: shutdown type
8065 * Description: This function will initiate the reset of the given adapter.
8066 * If the caller needs to wait on the completion of the reset,
8067 * the caller must sleep on the reset_wait_q.
8072 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8073 enum ipr_shutdown_type shutdown_type
)
8075 if (ioa_cfg
->ioa_is_dead
)
8078 if (ioa_cfg
->in_reset_reload
) {
8079 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8080 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8081 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8082 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8085 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
8086 dev_err(&ioa_cfg
->pdev
->dev
,
8087 "IOA taken offline - error recovery failed\n");
8089 ioa_cfg
->reset_retries
= 0;
8090 ioa_cfg
->ioa_is_dead
= 1;
8092 if (ioa_cfg
->in_ioa_bringdown
) {
8093 ioa_cfg
->reset_cmd
= NULL
;
8094 ioa_cfg
->in_reset_reload
= 0;
8095 ipr_fail_all_ops(ioa_cfg
);
8096 wake_up_all(&ioa_cfg
->reset_wait_q
);
8098 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
8099 scsi_unblock_requests(ioa_cfg
->host
);
8100 spin_lock_irq(ioa_cfg
->host
->host_lock
);
8103 ioa_cfg
->in_ioa_bringdown
= 1;
8104 shutdown_type
= IPR_SHUTDOWN_NONE
;
8108 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
8113 * ipr_reset_freeze - Hold off all I/O activity
8114 * @ipr_cmd: ipr command struct
8116 * Description: If the PCI slot is frozen, hold off all I/O
8117 * activity; then, as soon as the slot is available again,
8118 * initiate an adapter reset.
8120 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
8122 /* Disallow new interrupts, avoid loop */
8123 ipr_cmd
->ioa_cfg
->allow_interrupts
= 0;
8124 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->ioa_cfg
->pending_q
);
8125 ipr_cmd
->done
= ipr_reset_ioa_job
;
8126 return IPR_RC_JOB_RETURN
;
8130 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8131 * @pdev: PCI device struct
8133 * Description: This routine is called to tell us that the PCI bus
8134 * is down. Can't do anything here, except put the device driver
8135 * into a holding pattern, waiting for the PCI bus to come back.
8137 static void ipr_pci_frozen(struct pci_dev
*pdev
)
8139 unsigned long flags
= 0;
8140 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8142 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8143 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
8144 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8148 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8149 * @pdev: PCI device struct
8151 * Description: This routine is called by the pci error recovery
8152 * code after the PCI slot has been reset, just before we
8153 * should resume normal operations.
8155 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
8157 unsigned long flags
= 0;
8158 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8160 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8161 if (ioa_cfg
->needs_warm_reset
)
8162 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8164 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
8166 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8167 return PCI_ERS_RESULT_RECOVERED
;
8171 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8172 * @pdev: PCI device struct
8174 * Description: This routine is called when the PCI bus has
8175 * permanently failed.
8177 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
8179 unsigned long flags
= 0;
8180 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8182 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8183 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
8184 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8185 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
;
8186 ioa_cfg
->in_ioa_bringdown
= 1;
8187 ioa_cfg
->allow_cmds
= 0;
8188 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8189 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8193 * ipr_pci_error_detected - Called when a PCI error is detected.
8194 * @pdev: PCI device struct
8195 * @state: PCI channel state
8197 * Description: Called when a PCI error is detected.
8200 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8202 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
8203 pci_channel_state_t state
)
8206 case pci_channel_io_frozen
:
8207 ipr_pci_frozen(pdev
);
8208 return PCI_ERS_RESULT_NEED_RESET
;
8209 case pci_channel_io_perm_failure
:
8210 ipr_pci_perm_failure(pdev
);
8211 return PCI_ERS_RESULT_DISCONNECT
;
8216 return PCI_ERS_RESULT_NEED_RESET
;
8220 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8221 * @ioa_cfg: ioa cfg struct
8223 * Description: This is the second phase of adapter intialization
8224 * This function takes care of initilizing the adapter to the point
8225 * where it can accept new commands.
8228 * 0 on success / -EIO on failure
8230 static int __devinit
ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
8233 unsigned long host_lock_flags
= 0;
8236 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8237 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
8238 if (ioa_cfg
->needs_hard_reset
) {
8239 ioa_cfg
->needs_hard_reset
= 0;
8240 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8242 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
8245 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8246 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
8247 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8249 if (ioa_cfg
->ioa_is_dead
) {
8251 } else if (ipr_invalid_adapter(ioa_cfg
)) {
8255 dev_err(&ioa_cfg
->pdev
->dev
,
8256 "Adapter not supported in this hardware configuration.\n");
8259 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8266 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8267 * @ioa_cfg: ioa config struct
8272 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8276 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8277 if (ioa_cfg
->ipr_cmnd_list
[i
])
8278 pci_pool_free(ioa_cfg
->ipr_cmd_pool
,
8279 ioa_cfg
->ipr_cmnd_list
[i
],
8280 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
8282 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
8285 if (ioa_cfg
->ipr_cmd_pool
)
8286 pci_pool_destroy (ioa_cfg
->ipr_cmd_pool
);
8288 kfree(ioa_cfg
->ipr_cmnd_list
);
8289 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
8290 ioa_cfg
->ipr_cmnd_list
= NULL
;
8291 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
8292 ioa_cfg
->ipr_cmd_pool
= NULL
;
8296 * ipr_free_mem - Frees memory allocated for an adapter
8297 * @ioa_cfg: ioa cfg struct
8302 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8306 kfree(ioa_cfg
->res_entries
);
8307 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_misc_cbs
),
8308 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
8309 ipr_free_cmd_blks(ioa_cfg
);
8310 pci_free_consistent(ioa_cfg
->pdev
, sizeof(u32
) * IPR_NUM_CMD_BLKS
,
8311 ioa_cfg
->host_rrq
, ioa_cfg
->host_rrq_dma
);
8312 pci_free_consistent(ioa_cfg
->pdev
, ioa_cfg
->cfg_table_size
,
8313 ioa_cfg
->u
.cfg_table
,
8314 ioa_cfg
->cfg_table_dma
);
8316 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
8317 pci_free_consistent(ioa_cfg
->pdev
,
8318 sizeof(struct ipr_hostrcb
),
8319 ioa_cfg
->hostrcb
[i
],
8320 ioa_cfg
->hostrcb_dma
[i
]);
8323 ipr_free_dump(ioa_cfg
);
8324 kfree(ioa_cfg
->trace
);
8328 * ipr_free_all_resources - Free all allocated resources for an adapter.
8329 * @ipr_cmd: ipr command struct
8331 * This function frees all allocated resources for the
8332 * specified adapter.
8337 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
8339 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8342 free_irq(pdev
->irq
, ioa_cfg
);
8343 pci_disable_msi(pdev
);
8344 iounmap(ioa_cfg
->hdw_dma_regs
);
8345 pci_release_regions(pdev
);
8346 ipr_free_mem(ioa_cfg
);
8347 scsi_host_put(ioa_cfg
->host
);
8348 pci_disable_device(pdev
);
8353 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8354 * @ioa_cfg: ioa config struct
8357 * 0 on success / -ENOMEM on allocation failure
8359 static int __devinit
ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8361 struct ipr_cmnd
*ipr_cmd
;
8362 struct ipr_ioarcb
*ioarcb
;
8363 dma_addr_t dma_addr
;
8366 ioa_cfg
->ipr_cmd_pool
= pci_pool_create (IPR_NAME
, ioa_cfg
->pdev
,
8367 sizeof(struct ipr_cmnd
), 512, 0);
8369 if (!ioa_cfg
->ipr_cmd_pool
)
8372 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
8373 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
8375 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
8376 ipr_free_cmd_blks(ioa_cfg
);
8380 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8381 ipr_cmd
= pci_pool_alloc (ioa_cfg
->ipr_cmd_pool
, GFP_KERNEL
, &dma_addr
);
8384 ipr_free_cmd_blks(ioa_cfg
);
8388 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
8389 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
8390 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
8392 ioarcb
= &ipr_cmd
->ioarcb
;
8393 ipr_cmd
->dma_addr
= dma_addr
;
8395 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
8397 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
8399 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
8400 if (ioa_cfg
->sis64
) {
8401 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
8402 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
8403 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
8404 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
8406 ioarcb
->write_ioadl_addr
=
8407 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
8408 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
8409 ioarcb
->ioasa_host_pci_addr
=
8410 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
8412 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
8413 ipr_cmd
->cmd_index
= i
;
8414 ipr_cmd
->ioa_cfg
= ioa_cfg
;
8415 ipr_cmd
->sense_buffer_dma
= dma_addr
+
8416 offsetof(struct ipr_cmnd
, sense_buffer
);
8418 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
8425 * ipr_alloc_mem - Allocate memory for an adapter
8426 * @ioa_cfg: ioa config struct
8429 * 0 on success / non-zero for error
8431 static int __devinit
ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8433 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8434 int i
, rc
= -ENOMEM
;
8437 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
8438 ioa_cfg
->max_devs_supported
, GFP_KERNEL
);
8440 if (!ioa_cfg
->res_entries
)
8443 if (ioa_cfg
->sis64
) {
8444 ioa_cfg
->target_ids
= kzalloc(sizeof(unsigned long) *
8445 BITS_TO_LONGS(ioa_cfg
->max_devs_supported
), GFP_KERNEL
);
8446 ioa_cfg
->array_ids
= kzalloc(sizeof(unsigned long) *
8447 BITS_TO_LONGS(ioa_cfg
->max_devs_supported
), GFP_KERNEL
);
8448 ioa_cfg
->vset_ids
= kzalloc(sizeof(unsigned long) *
8449 BITS_TO_LONGS(ioa_cfg
->max_devs_supported
), GFP_KERNEL
);
8452 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
8453 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
8454 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
8457 ioa_cfg
->vpd_cbs
= pci_alloc_consistent(ioa_cfg
->pdev
,
8458 sizeof(struct ipr_misc_cbs
),
8459 &ioa_cfg
->vpd_cbs_dma
);
8461 if (!ioa_cfg
->vpd_cbs
)
8462 goto out_free_res_entries
;
8464 if (ipr_alloc_cmd_blks(ioa_cfg
))
8465 goto out_free_vpd_cbs
;
8467 ioa_cfg
->host_rrq
= pci_alloc_consistent(ioa_cfg
->pdev
,
8468 sizeof(u32
) * IPR_NUM_CMD_BLKS
,
8469 &ioa_cfg
->host_rrq_dma
);
8471 if (!ioa_cfg
->host_rrq
)
8472 goto out_ipr_free_cmd_blocks
;
8474 ioa_cfg
->u
.cfg_table
= pci_alloc_consistent(ioa_cfg
->pdev
,
8475 ioa_cfg
->cfg_table_size
,
8476 &ioa_cfg
->cfg_table_dma
);
8478 if (!ioa_cfg
->u
.cfg_table
)
8479 goto out_free_host_rrq
;
8481 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
8482 ioa_cfg
->hostrcb
[i
] = pci_alloc_consistent(ioa_cfg
->pdev
,
8483 sizeof(struct ipr_hostrcb
),
8484 &ioa_cfg
->hostrcb_dma
[i
]);
8486 if (!ioa_cfg
->hostrcb
[i
])
8487 goto out_free_hostrcb_dma
;
8489 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
8490 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
8491 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
8492 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
8495 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
8496 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
8498 if (!ioa_cfg
->trace
)
8499 goto out_free_hostrcb_dma
;
8506 out_free_hostrcb_dma
:
8508 pci_free_consistent(pdev
, sizeof(struct ipr_hostrcb
),
8509 ioa_cfg
->hostrcb
[i
],
8510 ioa_cfg
->hostrcb_dma
[i
]);
8512 pci_free_consistent(pdev
, ioa_cfg
->cfg_table_size
,
8513 ioa_cfg
->u
.cfg_table
,
8514 ioa_cfg
->cfg_table_dma
);
8516 pci_free_consistent(pdev
, sizeof(u32
) * IPR_NUM_CMD_BLKS
,
8517 ioa_cfg
->host_rrq
, ioa_cfg
->host_rrq_dma
);
8518 out_ipr_free_cmd_blocks
:
8519 ipr_free_cmd_blks(ioa_cfg
);
8521 pci_free_consistent(pdev
, sizeof(struct ipr_misc_cbs
),
8522 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
8523 out_free_res_entries
:
8524 kfree(ioa_cfg
->res_entries
);
8529 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8530 * @ioa_cfg: ioa config struct
8535 static void __devinit
ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
8539 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
8540 ioa_cfg
->bus_attr
[i
].bus
= i
;
8541 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
8542 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
8543 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
8544 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
8546 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
8551 * ipr_init_ioa_cfg - Initialize IOA config struct
8552 * @ioa_cfg: ioa config struct
8553 * @host: scsi host struct
8554 * @pdev: PCI dev struct
8559 static void __devinit
ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
8560 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
8562 const struct ipr_interrupt_offsets
*p
;
8563 struct ipr_interrupts
*t
;
8566 ioa_cfg
->host
= host
;
8567 ioa_cfg
->pdev
= pdev
;
8568 ioa_cfg
->log_level
= ipr_log_level
;
8569 ioa_cfg
->doorbell
= IPR_DOORBELL
;
8570 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
8571 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
8572 sprintf(ioa_cfg
->ipr_free_label
, IPR_FREEQ_LABEL
);
8573 sprintf(ioa_cfg
->ipr_pending_label
, IPR_PENDQ_LABEL
);
8574 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
8575 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
8576 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
8577 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
8579 INIT_LIST_HEAD(&ioa_cfg
->free_q
);
8580 INIT_LIST_HEAD(&ioa_cfg
->pending_q
);
8581 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
8582 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
8583 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
8584 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
8585 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
8586 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
8587 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
8588 ioa_cfg
->sdt_state
= INACTIVE
;
8590 ipr_initialize_bus_attr(ioa_cfg
);
8591 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
8593 if (ioa_cfg
->sis64
) {
8594 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
8595 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
8596 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
8597 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
8599 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
8600 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
8601 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
8602 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
8604 host
->max_channel
= IPR_MAX_BUS_TO_SCAN
;
8605 host
->unique_id
= host
->host_no
;
8606 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
8607 host
->can_queue
= ioa_cfg
->max_cmds
;
8608 pci_set_drvdata(pdev
, ioa_cfg
);
8610 p
= &ioa_cfg
->chip_cfg
->regs
;
8612 base
= ioa_cfg
->hdw_dma_regs
;
8614 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
8615 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
8616 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
8617 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
8618 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
8619 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
8620 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
8621 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
8622 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
8623 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
8624 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
8625 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
8626 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
8627 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
8628 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
8629 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
8631 if (ioa_cfg
->sis64
) {
8632 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
8633 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
8634 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
8635 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
8640 * ipr_get_chip_info - Find adapter chip information
8641 * @dev_id: PCI device id struct
8644 * ptr to chip information on success / NULL on failure
8646 static const struct ipr_chip_t
* __devinit
8647 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
8651 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
8652 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
8653 ipr_chip
[i
].device
== dev_id
->device
)
8654 return &ipr_chip
[i
];
8659 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8660 * @pdev: PCI device struct
8662 * Description: Simply set the msi_received flag to 1 indicating that
8663 * Message Signaled Interrupts are supported.
8666 * 0 on success / non-zero on failure
8668 static irqreturn_t __devinit
ipr_test_intr(int irq
, void *devp
)
8670 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
8671 unsigned long lock_flags
= 0;
8672 irqreturn_t rc
= IRQ_HANDLED
;
8674 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8676 ioa_cfg
->msi_received
= 1;
8677 wake_up(&ioa_cfg
->msi_wait_q
);
8679 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8684 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8685 * @pdev: PCI device struct
8687 * Description: The return value from pci_enable_msi() can not always be
8688 * trusted. This routine sets up and initiates a test interrupt to determine
8689 * if the interrupt is received via the ipr_test_intr() service routine.
8690 * If the tests fails, the driver will fall back to LSI.
8693 * 0 on success / non-zero on failure
8695 static int __devinit
ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
,
8696 struct pci_dev
*pdev
)
8699 volatile u32 int_reg
;
8700 unsigned long lock_flags
= 0;
8704 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8705 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
8706 ioa_cfg
->msi_received
= 0;
8707 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8708 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8709 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8710 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8712 rc
= request_irq(pdev
->irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
8714 dev_err(&pdev
->dev
, "Can not assign irq %d\n", pdev
->irq
);
8716 } else if (ipr_debug
)
8717 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", pdev
->irq
);
8719 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
8720 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8721 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
8722 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8724 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8725 if (!ioa_cfg
->msi_received
) {
8726 /* MSI test failed */
8727 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
8729 } else if (ipr_debug
)
8730 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
8732 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8734 free_irq(pdev
->irq
, ioa_cfg
);
8742 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8743 * @pdev: PCI device struct
8744 * @dev_id: PCI device id struct
8747 * 0 on success / non-zero on failure
8749 static int __devinit
ipr_probe_ioa(struct pci_dev
*pdev
,
8750 const struct pci_device_id
*dev_id
)
8752 struct ipr_ioa_cfg
*ioa_cfg
;
8753 struct Scsi_Host
*host
;
8754 unsigned long ipr_regs_pci
;
8755 void __iomem
*ipr_regs
;
8756 int rc
= PCIBIOS_SUCCESSFUL
;
8757 volatile u32 mask
, uproc
, interrupts
;
8761 if ((rc
= pci_enable_device(pdev
))) {
8762 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
8766 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
8768 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
8771 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
8776 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
8777 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
8778 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
,
8779 sata_port_info
.flags
, &ipr_sata_ops
);
8781 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
8783 if (!ioa_cfg
->ipr_chip
) {
8784 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
8785 dev_id
->vendor
, dev_id
->device
);
8786 goto out_scsi_host_put
;
8789 /* set SIS 32 or SIS 64 */
8790 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
8791 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
8792 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
8793 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
8795 if (ipr_transop_timeout
)
8796 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
8797 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
8798 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
8800 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
8802 ioa_cfg
->revid
= pdev
->revision
;
8804 ipr_regs_pci
= pci_resource_start(pdev
, 0);
8806 rc
= pci_request_regions(pdev
, IPR_NAME
);
8809 "Couldn't register memory range of registers\n");
8810 goto out_scsi_host_put
;
8813 ipr_regs
= pci_ioremap_bar(pdev
, 0);
8817 "Couldn't map memory range of registers\n");
8819 goto out_release_regions
;
8822 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
8823 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
8824 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
8826 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
8828 pci_set_master(pdev
);
8830 if (ioa_cfg
->sis64
) {
8831 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
8833 dev_dbg(&pdev
->dev
, "Failed to set 64 bit PCI DMA mask\n");
8834 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
8838 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
8841 dev_err(&pdev
->dev
, "Failed to set PCI DMA mask\n");
8845 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
8846 ioa_cfg
->chip_cfg
->cache_line_size
);
8848 if (rc
!= PCIBIOS_SUCCESSFUL
) {
8849 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
8854 /* Enable MSI style interrupts if they are supported. */
8855 if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&& !pci_enable_msi(pdev
)) {
8856 rc
= ipr_test_msi(ioa_cfg
, pdev
);
8857 if (rc
== -EOPNOTSUPP
)
8858 pci_disable_msi(pdev
);
8860 goto out_msi_disable
;
8862 dev_info(&pdev
->dev
, "MSI enabled with IRQ: %d\n", pdev
->irq
);
8863 } else if (ipr_debug
)
8864 dev_info(&pdev
->dev
, "Cannot enable MSI.\n");
8866 /* Save away PCI config space for use following IOA reset */
8867 rc
= pci_save_state(pdev
);
8869 if (rc
!= PCIBIOS_SUCCESSFUL
) {
8870 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
8872 goto out_msi_disable
;
8875 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
8876 goto out_msi_disable
;
8878 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
8879 goto out_msi_disable
;
8882 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
8883 + ((sizeof(struct ipr_config_table_entry64
)
8884 * ioa_cfg
->max_devs_supported
)));
8886 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
8887 + ((sizeof(struct ipr_config_table_entry
)
8888 * ioa_cfg
->max_devs_supported
)));
8890 rc
= ipr_alloc_mem(ioa_cfg
);
8893 "Couldn't allocate enough memory for device driver!\n");
8894 goto out_msi_disable
;
8898 * If HRRQ updated interrupt is not masked, or reset alert is set,
8899 * the card is in an unknown state and needs a hard reset
8901 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
8902 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8903 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
8904 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
8905 ioa_cfg
->needs_hard_reset
= 1;
8906 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
8907 ioa_cfg
->needs_hard_reset
= 1;
8908 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
8909 ioa_cfg
->ioa_unit_checked
= 1;
8911 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8912 rc
= request_irq(pdev
->irq
, ipr_isr
,
8913 ioa_cfg
->msi_received
? 0 : IRQF_SHARED
,
8917 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
8922 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
8923 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
8924 ioa_cfg
->needs_warm_reset
= 1;
8925 ioa_cfg
->reset
= ipr_reset_slot_reset
;
8927 ioa_cfg
->reset
= ipr_reset_start_bist
;
8929 spin_lock(&ipr_driver_lock
);
8930 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
8931 spin_unlock(&ipr_driver_lock
);
8938 ipr_free_mem(ioa_cfg
);
8940 pci_disable_msi(pdev
);
8943 out_release_regions
:
8944 pci_release_regions(pdev
);
8946 scsi_host_put(host
);
8948 pci_disable_device(pdev
);
8953 * ipr_scan_vsets - Scans for VSET devices
8954 * @ioa_cfg: ioa config struct
8956 * Description: Since the VSET resources do not follow SAM in that we can have
8957 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8962 static void ipr_scan_vsets(struct ipr_ioa_cfg
*ioa_cfg
)
8966 for (target
= 0; target
< IPR_MAX_NUM_TARGETS_PER_BUS
; target
++)
8967 for (lun
= 0; lun
< IPR_MAX_NUM_VSET_LUNS_PER_TARGET
; lun
++ )
8968 scsi_add_device(ioa_cfg
->host
, IPR_VSET_BUS
, target
, lun
);
8972 * ipr_initiate_ioa_bringdown - Bring down an adapter
8973 * @ioa_cfg: ioa config struct
8974 * @shutdown_type: shutdown type
8976 * Description: This function will initiate bringing down the adapter.
8977 * This consists of issuing an IOA shutdown to the adapter
8978 * to flush the cache, and running BIST.
8979 * If the caller needs to wait on the completion of the reset,
8980 * the caller must sleep on the reset_wait_q.
8985 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
8986 enum ipr_shutdown_type shutdown_type
)
8989 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
8990 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8991 ioa_cfg
->reset_retries
= 0;
8992 ioa_cfg
->in_ioa_bringdown
= 1;
8993 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
8998 * __ipr_remove - Remove a single adapter
8999 * @pdev: pci device struct
9001 * Adapter hot plug remove entry point.
9006 static void __ipr_remove(struct pci_dev
*pdev
)
9008 unsigned long host_lock_flags
= 0;
9009 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9012 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9013 while(ioa_cfg
->in_reset_reload
) {
9014 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9015 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9016 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9019 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9021 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9022 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9023 flush_work_sync(&ioa_cfg
->work_q
);
9024 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9026 spin_lock(&ipr_driver_lock
);
9027 list_del(&ioa_cfg
->queue
);
9028 spin_unlock(&ipr_driver_lock
);
9030 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
9031 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9032 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9034 ipr_free_all_resources(ioa_cfg
);
9040 * ipr_remove - IOA hot plug remove entry point
9041 * @pdev: pci device struct
9043 * Adapter hot plug remove entry point.
9048 static void __devexit
ipr_remove(struct pci_dev
*pdev
)
9050 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9054 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9056 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9058 scsi_remove_host(ioa_cfg
->host
);
9066 * ipr_probe - Adapter hot plug add entry point
9069 * 0 on success / non-zero on failure
9071 static int __devinit
ipr_probe(struct pci_dev
*pdev
,
9072 const struct pci_device_id
*dev_id
)
9074 struct ipr_ioa_cfg
*ioa_cfg
;
9077 rc
= ipr_probe_ioa(pdev
, dev_id
);
9082 ioa_cfg
= pci_get_drvdata(pdev
);
9083 rc
= ipr_probe_ioa_part2(ioa_cfg
);
9090 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
9097 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9101 scsi_remove_host(ioa_cfg
->host
);
9106 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9110 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9112 scsi_remove_host(ioa_cfg
->host
);
9117 scsi_scan_host(ioa_cfg
->host
);
9118 ipr_scan_vsets(ioa_cfg
);
9119 scsi_add_device(ioa_cfg
->host
, IPR_IOA_BUS
, IPR_IOA_TARGET
, IPR_IOA_LUN
);
9120 ioa_cfg
->allow_ml_add_del
= 1;
9121 ioa_cfg
->host
->max_channel
= IPR_VSET_BUS
;
9122 schedule_work(&ioa_cfg
->work_q
);
9127 * ipr_shutdown - Shutdown handler.
9128 * @pdev: pci device struct
9130 * This function is invoked upon system shutdown/reboot. It will issue
9131 * an adapter shutdown to the adapter to flush the write cache.
9136 static void ipr_shutdown(struct pci_dev
*pdev
)
9138 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9139 unsigned long lock_flags
= 0;
9141 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9142 while(ioa_cfg
->in_reset_reload
) {
9143 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9144 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9145 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9148 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9149 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9150 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9153 static struct pci_device_id ipr_pci_table
[] __devinitdata
= {
9154 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9155 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
9156 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9157 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
9158 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9159 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
9160 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9161 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
9162 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9163 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
9164 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9165 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
9166 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9167 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
9168 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9169 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
9170 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9171 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
9172 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
9173 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
9174 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
9175 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9176 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
9177 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
9178 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9179 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
9180 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
9181 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
9182 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
9183 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9184 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
9185 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
9186 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9187 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9188 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
9189 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9191 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
9192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9193 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
9194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9195 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
9196 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
9197 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
9198 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
9199 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
9200 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
9201 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
9202 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
9203 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9204 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
9205 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
9206 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9207 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9208 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
9209 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9210 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
9211 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9212 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
9213 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9214 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
9215 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9216 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
9217 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9218 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
9219 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9220 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
9221 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9222 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
9223 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9224 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
9225 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9226 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
9229 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
9231 static struct pci_error_handlers ipr_err_handler
= {
9232 .error_detected
= ipr_pci_error_detected
,
9233 .slot_reset
= ipr_pci_slot_reset
,
9236 static struct pci_driver ipr_driver
= {
9238 .id_table
= ipr_pci_table
,
9240 .remove
= __devexit_p(ipr_remove
),
9241 .shutdown
= ipr_shutdown
,
9242 .err_handler
= &ipr_err_handler
,
9246 * ipr_halt_done - Shutdown prepare completion
9251 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
9253 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9255 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
9259 * ipr_halt - Issue shutdown prepare to all adapters
9262 * NOTIFY_OK on success / NOTIFY_DONE on failure
9264 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
9266 struct ipr_cmnd
*ipr_cmd
;
9267 struct ipr_ioa_cfg
*ioa_cfg
;
9268 unsigned long flags
= 0;
9270 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
9273 spin_lock(&ipr_driver_lock
);
9275 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
9276 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9277 if (!ioa_cfg
->allow_cmds
) {
9278 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9282 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9283 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9284 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9285 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
9286 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
9288 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
9289 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9291 spin_unlock(&ipr_driver_lock
);
9296 static struct notifier_block ipr_notifier
= {
9301 * ipr_init - Module entry point
9304 * 0 on success / negative value on failure
9306 static int __init
ipr_init(void)
9308 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9309 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
9311 register_reboot_notifier(&ipr_notifier
);
9312 return pci_register_driver(&ipr_driver
);
9316 * ipr_exit - Module unload
9318 * Module unload entry point.
9323 static void __exit
ipr_exit(void)
9325 unregister_reboot_notifier(&ipr_notifier
);
9326 pci_unregister_driver(&ipr_driver
);
9329 module_init(ipr_init
);
9330 module_exit(ipr_exit
);