Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6/cjktty.git] / drivers / scsi / ipr.c
blobf820cffb7f00e7f81fb00566711ee74efbbeab66
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Notes:
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <linux/libata.h>
75 #include <linux/hdreg.h>
76 #include <linux/reboot.h>
77 #include <linux/stringify.h>
78 #include <asm/io.h>
79 #include <asm/irq.h>
80 #include <asm/processor.h>
81 #include <scsi/scsi.h>
82 #include <scsi/scsi_host.h>
83 #include <scsi/scsi_tcq.h>
84 #include <scsi/scsi_eh.h>
85 #include <scsi/scsi_cmnd.h>
86 #include "ipr.h"
89 * Global Data
91 static LIST_HEAD(ipr_ioa_head);
92 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93 static unsigned int ipr_max_speed = 1;
94 static int ipr_testmode = 0;
95 static unsigned int ipr_fastfail = 0;
96 static unsigned int ipr_transop_timeout = 0;
97 static unsigned int ipr_debug = 0;
98 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
99 static unsigned int ipr_dual_ioa_raid = 1;
100 static DEFINE_SPINLOCK(ipr_driver_lock);
102 /* This table describes the differences between DMA controller chips */
103 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105 .mailbox = 0x0042C,
106 .cache_line_size = 0x20,
108 .set_interrupt_mask_reg = 0x0022C,
109 .clr_interrupt_mask_reg = 0x00230,
110 .clr_interrupt_mask_reg32 = 0x00230,
111 .sense_interrupt_mask_reg = 0x0022C,
112 .sense_interrupt_mask_reg32 = 0x0022C,
113 .clr_interrupt_reg = 0x00228,
114 .clr_interrupt_reg32 = 0x00228,
115 .sense_interrupt_reg = 0x00224,
116 .sense_interrupt_reg32 = 0x00224,
117 .ioarrin_reg = 0x00404,
118 .sense_uproc_interrupt_reg = 0x00214,
119 .sense_uproc_interrupt_reg32 = 0x00214,
120 .set_uproc_interrupt_reg = 0x00214,
121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
126 { /* Snipe and Scamp */
127 .mailbox = 0x0052C,
128 .cache_line_size = 0x20,
130 .set_interrupt_mask_reg = 0x00288,
131 .clr_interrupt_mask_reg = 0x0028C,
132 .clr_interrupt_mask_reg32 = 0x0028C,
133 .sense_interrupt_mask_reg = 0x00288,
134 .sense_interrupt_mask_reg32 = 0x00288,
135 .clr_interrupt_reg = 0x00284,
136 .clr_interrupt_reg32 = 0x00284,
137 .sense_interrupt_reg = 0x00280,
138 .sense_interrupt_reg32 = 0x00280,
139 .ioarrin_reg = 0x00504,
140 .sense_uproc_interrupt_reg = 0x00290,
141 .sense_uproc_interrupt_reg32 = 0x00290,
142 .set_uproc_interrupt_reg = 0x00290,
143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
154 .clr_interrupt_mask_reg32 = 0x0001C,
155 .sense_interrupt_mask_reg = 0x00010,
156 .sense_interrupt_mask_reg32 = 0x00014,
157 .clr_interrupt_reg = 0x00008,
158 .clr_interrupt_reg32 = 0x0000C,
159 .sense_interrupt_reg = 0x00000,
160 .sense_interrupt_reg32 = 0x00004,
161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
163 .sense_uproc_interrupt_reg32 = 0x00024,
164 .set_uproc_interrupt_reg = 0x00020,
165 .set_uproc_interrupt_reg32 = 0x00024,
166 .clr_uproc_interrupt_reg = 0x00028,
167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068
175 static const struct ipr_chip_t ipr_chip[] = {
176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
187 static int ipr_max_bus_speeds [] = {
188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193 module_param_named(max_speed, ipr_max_speed, uint, 0);
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195 module_param_named(log_level, ipr_log_level, uint, 0);
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197 module_param_named(testmode, ipr_testmode, int, 0);
198 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
199 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
200 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
203 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
204 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
205 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207 module_param_named(max_devs, ipr_max_devs, int, 0);
208 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
210 MODULE_LICENSE("GPL");
211 MODULE_VERSION(IPR_DRIVER_VERSION);
213 /* A constant array of IOASCs/URCs/Error Messages */
214 static const
215 struct ipr_error_table_t ipr_error_table[] = {
216 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
217 "8155: An unknown error was received"},
218 {0x00330000, 0, 0,
219 "Soft underlength error"},
220 {0x005A0000, 0, 0,
221 "Command to be cancelled not found"},
222 {0x00808000, 0, 0,
223 "Qualified success"},
224 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
225 "FFFE: Soft device bus error recovered by the IOA"},
226 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
227 "4101: Soft device bus fabric error"},
228 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229 "FFFC: Logical block guard error recovered by the device"},
230 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231 "FFFC: Logical block reference tag error recovered by the device"},
232 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233 "4171: Recovered scatter list tag / sequence number error"},
234 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFD: Recovered logical block reference tag error detected by the IOA"},
240 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "FFFD: Logical block guard error recovered by the IOA"},
242 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFF9: Device sector reassign successful"},
244 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFF7: Media error recovered by device rewrite procedures"},
246 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
247 "7001: IOA sector reassignment successful"},
248 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFF9: Soft media error. Sector reassignment recommended"},
250 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
251 "FFF7: Media error recovered by IOA rewrite procedures"},
252 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Soft PCI bus error recovered by the IOA"},
254 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
255 "FFF6: Device hardware error recovered by the IOA"},
256 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF6: Device hardware error recovered by the device"},
258 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
259 "FF3D: Soft IOA error recovered by the IOA"},
260 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
261 "FFFA: Undefined device response recovered by the IOA"},
262 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
263 "FFF6: Device bus error, message or command phase"},
264 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFFE: Task Management Function failed"},
266 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FFF6: Failure prediction threshold exceeded"},
268 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
269 "8009: Impending cache battery pack failure"},
270 {0x02040400, 0, 0,
271 "34FF: Disk device format in progress"},
272 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
273 "9070: IOA requested reset"},
274 {0x023F0000, 0, 0,
275 "Synchronization required"},
276 {0x024E0000, 0, 0,
277 "No ready, IOA shutdown"},
278 {0x025A0000, 0, 0,
279 "Not ready, IOA has been shutdown"},
280 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
281 "3020: Storage subsystem configuration error"},
282 {0x03110B00, 0, 0,
283 "FFF5: Medium error, data unreadable, recommend reassign"},
284 {0x03110C00, 0, 0,
285 "7000: Medium error, data unreadable, do not reassign"},
286 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "FFF3: Disk media format bad"},
288 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
289 "3002: Addressed device failed to respond to selection"},
290 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
291 "3100: Device bus error"},
292 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
293 "3109: IOA timed out a device command"},
294 {0x04088000, 0, 0,
295 "3120: SCSI bus is not operational"},
296 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
297 "4100: Hard device bus fabric error"},
298 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299 "310C: Logical block guard error detected by the device"},
300 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301 "310C: Logical block reference tag error detected by the device"},
302 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303 "4170: Scatter list tag / sequence number error"},
304 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "8150: Logical block CRC error on IOA to Host transfer"},
306 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307 "4170: Logical block sequence number error on IOA to Host transfer"},
308 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310D: Logical block reference tag error detected by the IOA"},
310 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "310D: Logical block guard error detected by the IOA"},
312 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
313 "9000: IOA reserved area data check"},
314 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
315 "9001: IOA reserved area invalid data pattern"},
316 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
317 "9002: IOA reserved area LRC error"},
318 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319 "Hardware Error, IOA metadata access error"},
320 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
321 "102E: Out of alternate sectors for disk storage"},
322 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
323 "FFF4: Data transfer underlength error"},
324 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
325 "FFF4: Data transfer overlength error"},
326 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "3400: Logical unit failure"},
328 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
329 "FFF4: Device microcode is corrupt"},
330 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
331 "8150: PCI bus error"},
332 {0x04430000, 1, 0,
333 "Unsupported device bus message received"},
334 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
335 "FFF4: Disk device problem"},
336 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
337 "8150: Permanent IOA failure"},
338 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
339 "3010: Disk device returned wrong response to IOA"},
340 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
341 "8151: IOA microcode error"},
342 {0x04448500, 0, 0,
343 "Device bus status error"},
344 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
345 "8157: IOA error requiring IOA reset to recover"},
346 {0x04448700, 0, 0,
347 "ATA device status error"},
348 {0x04490000, 0, 0,
349 "Message reject received from the device"},
350 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
351 "8008: A permanent cache battery pack failure occurred"},
352 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
353 "9090: Disk unit has been modified after the last known status"},
354 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
355 "9081: IOA detected device error"},
356 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
357 "9082: IOA detected device error"},
358 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "3110: Device bus error, message or command phase"},
360 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
361 "3110: SAS Command / Task Management Function failed"},
362 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
363 "9091: Incorrect hardware configuration change has been detected"},
364 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
365 "9073: Invalid multi-adapter configuration"},
366 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
367 "4010: Incorrect connection between cascaded expanders"},
368 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "4020: Connections exceed IOA design limits"},
370 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
371 "4030: Incorrect multipath connection"},
372 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
373 "4110: Unsupported enclosure function"},
374 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
375 "FFF4: Command to logical unit failed"},
376 {0x05240000, 1, 0,
377 "Illegal request, invalid request type or request packet"},
378 {0x05250000, 0, 0,
379 "Illegal request, invalid resource handle"},
380 {0x05258000, 0, 0,
381 "Illegal request, commands not allowed to this device"},
382 {0x05258100, 0, 0,
383 "Illegal request, command not allowed to a secondary adapter"},
384 {0x05258200, 0, 0,
385 "Illegal request, command not allowed to a non-optimized resource"},
386 {0x05260000, 0, 0,
387 "Illegal request, invalid field in parameter list"},
388 {0x05260100, 0, 0,
389 "Illegal request, parameter not supported"},
390 {0x05260200, 0, 0,
391 "Illegal request, parameter value invalid"},
392 {0x052C0000, 0, 0,
393 "Illegal request, command sequence error"},
394 {0x052C8000, 1, 0,
395 "Illegal request, dual adapter support not enabled"},
396 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
397 "9031: Array protection temporarily suspended, protection resuming"},
398 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
399 "9040: Array protection temporarily suspended, protection resuming"},
400 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "3140: Device bus not ready to ready transition"},
402 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
403 "FFFB: SCSI bus was reset"},
404 {0x06290500, 0, 0,
405 "FFFE: SCSI bus transition to single ended"},
406 {0x06290600, 0, 0,
407 "FFFE: SCSI bus transition to LVD"},
408 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
409 "FFFB: SCSI bus was reset by another initiator"},
410 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
411 "3029: A device replacement has occurred"},
412 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
413 "9051: IOA cache data exists for a missing or failed device"},
414 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
415 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
416 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
417 "9025: Disk unit is not supported at its physical location"},
418 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
419 "3020: IOA detected a SCSI bus configuration error"},
420 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
421 "3150: SCSI bus configuration error"},
422 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
423 "9074: Asymmetric advanced function disk configuration"},
424 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
425 "4040: Incomplete multipath connection between IOA and enclosure"},
426 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
427 "4041: Incomplete multipath connection between enclosure and device"},
428 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9075: Incomplete multipath connection between IOA and remote IOA"},
430 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9076: Configuration error, missing remote IOA"},
432 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4050: Enclosure does not support a required multipath function"},
434 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4070: Logically bad block written on device"},
436 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
437 "9041: Array protection temporarily suspended"},
438 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
439 "9042: Corrupt array parity detected on specified device"},
440 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
441 "9030: Array no longer protected due to missing or failed disk unit"},
442 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "9071: Link operational transition"},
444 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9072: Link not operational transition"},
446 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
447 "9032: Array exposed but still protected"},
448 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
449 "70DD: Device forced failed by disrupt device command"},
450 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
451 "4061: Multipath redundancy level got better"},
452 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4060: Multipath redundancy level got worse"},
454 {0x07270000, 0, 0,
455 "Failure due to other device"},
456 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9008: IOA does not support functions expected by devices"},
458 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9010: Cache data associated with attached devices cannot be found"},
460 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
461 "9011: Cache data belongs to devices other than those attached"},
462 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
463 "9020: Array missing 2 or more devices with only 1 device present"},
464 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
465 "9021: Array missing 2 or more devices with 2 or more devices present"},
466 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
467 "9022: Exposed array is missing a required device"},
468 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
469 "9023: Array member(s) not at required physical locations"},
470 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9024: Array not functional due to present hardware configuration"},
472 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9026: Array not functional due to present hardware configuration"},
474 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9027: Array is missing a device and parity is out of sync"},
476 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9028: Maximum number of arrays already exist"},
478 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9050: Required cache data cannot be located for a disk unit"},
480 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9052: Cache data exists for a device that has been modified"},
482 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9054: IOA resources not available due to previous problems"},
484 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9092: Disk unit requires initialization before use"},
486 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9029: Incorrect hardware configuration change has been detected"},
488 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9060: One or more disk pairs are missing from an array"},
490 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9061: One or more disks are missing from an array"},
492 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9062: One or more disks are missing from an array"},
494 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9063: Maximum number of functional arrays has been exceeded"},
496 {0x0B260000, 0, 0,
497 "Aborted command, invalid descriptor"},
498 {0x0B5A0000, 0, 0,
499 "Command terminated by host"}
502 static const struct ipr_ses_table_entry ipr_ses_table[] = {
503 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
504 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
505 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
506 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
507 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
508 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
509 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
510 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
511 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
514 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
515 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
519 * Function Prototypes
521 static int ipr_reset_alert(struct ipr_cmnd *);
522 static void ipr_process_ccn(struct ipr_cmnd *);
523 static void ipr_process_error(struct ipr_cmnd *);
524 static void ipr_reset_ioa_job(struct ipr_cmnd *);
525 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
526 enum ipr_shutdown_type);
528 #ifdef CONFIG_SCSI_IPR_TRACE
530 * ipr_trc_hook - Add a trace entry to the driver trace
531 * @ipr_cmd: ipr command struct
532 * @type: trace type
533 * @add_data: additional data
535 * Return value:
536 * none
538 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
539 u8 type, u32 add_data)
541 struct ipr_trace_entry *trace_entry;
542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
544 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
545 trace_entry->time = jiffies;
546 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
547 trace_entry->type = type;
548 if (ipr_cmd->ioa_cfg->sis64)
549 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550 else
551 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
552 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
553 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
554 trace_entry->u.add_data = add_data;
556 #else
557 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
558 #endif
561 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
562 * @ipr_cmd: ipr command struct
564 * Return value:
565 * none
567 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
570 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
572 dma_addr_t dma_addr = ipr_cmd->dma_addr;
574 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
575 ioarcb->data_transfer_length = 0;
576 ioarcb->read_data_transfer_length = 0;
577 ioarcb->ioadl_len = 0;
578 ioarcb->read_ioadl_len = 0;
580 if (ipr_cmd->ioa_cfg->sis64) {
581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
583 ioasa64->u.gata.status = 0;
584 } else {
585 ioarcb->write_ioadl_addr =
586 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
587 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
588 ioasa->u.gata.status = 0;
591 ioasa->hdr.ioasc = 0;
592 ioasa->hdr.residual_data_len = 0;
593 ipr_cmd->scsi_cmd = NULL;
594 ipr_cmd->qc = NULL;
595 ipr_cmd->sense_buffer[0] = 0;
596 ipr_cmd->dma_use_sg = 0;
600 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
601 * @ipr_cmd: ipr command struct
603 * Return value:
604 * none
606 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
608 ipr_reinit_ipr_cmnd(ipr_cmd);
609 ipr_cmd->u.scratch = 0;
610 ipr_cmd->sibling = NULL;
611 init_timer(&ipr_cmd->timer);
615 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
616 * @ioa_cfg: ioa config struct
618 * Return value:
619 * pointer to ipr command struct
621 static
622 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
624 struct ipr_cmnd *ipr_cmd;
626 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
627 list_del(&ipr_cmd->queue);
628 ipr_init_ipr_cmnd(ipr_cmd);
630 return ipr_cmd;
634 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
635 * @ioa_cfg: ioa config struct
636 * @clr_ints: interrupts to clear
638 * This function masks all interrupts on the adapter, then clears the
639 * interrupts specified in the mask
641 * Return value:
642 * none
644 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
645 u32 clr_ints)
647 volatile u32 int_reg;
649 /* Stop new interrupts */
650 ioa_cfg->allow_interrupts = 0;
652 /* Set interrupt mask to stop all new interrupts */
653 if (ioa_cfg->sis64)
654 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
655 else
656 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
658 /* Clear any pending interrupts */
659 if (ioa_cfg->sis64)
660 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
661 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
666 * ipr_save_pcix_cmd_reg - Save PCI-X command register
667 * @ioa_cfg: ioa config struct
669 * Return value:
670 * 0 on success / -EIO on failure
672 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
674 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
676 if (pcix_cmd_reg == 0)
677 return 0;
679 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
680 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
681 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
682 return -EIO;
685 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
686 return 0;
690 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
691 * @ioa_cfg: ioa config struct
693 * Return value:
694 * 0 on success / -EIO on failure
696 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
698 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
700 if (pcix_cmd_reg) {
701 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
702 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
703 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
704 return -EIO;
708 return 0;
712 * ipr_sata_eh_done - done function for aborted SATA commands
713 * @ipr_cmd: ipr command struct
715 * This function is invoked for ops generated to SATA
716 * devices which are being aborted.
718 * Return value:
719 * none
721 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
724 struct ata_queued_cmd *qc = ipr_cmd->qc;
725 struct ipr_sata_port *sata_port = qc->ap->private_data;
727 qc->err_mask |= AC_ERR_OTHER;
728 sata_port->ioasa.status |= ATA_BUSY;
729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
730 ata_qc_complete(qc);
734 * ipr_scsi_eh_done - mid-layer done function for aborted ops
735 * @ipr_cmd: ipr command struct
737 * This function is invoked by the interrupt handler for
738 * ops generated by the SCSI mid-layer which are being aborted.
740 * Return value:
741 * none
743 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
748 scsi_cmd->result |= (DID_ERROR << 16);
750 scsi_dma_unmap(ipr_cmd->scsi_cmd);
751 scsi_cmd->scsi_done(scsi_cmd);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
756 * ipr_fail_all_ops - Fails all outstanding ops.
757 * @ioa_cfg: ioa config struct
759 * This function fails all outstanding ops.
761 * Return value:
762 * none
764 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
766 struct ipr_cmnd *ipr_cmd, *temp;
768 ENTER;
769 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
770 list_del(&ipr_cmd->queue);
772 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
773 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
775 if (ipr_cmd->scsi_cmd)
776 ipr_cmd->done = ipr_scsi_eh_done;
777 else if (ipr_cmd->qc)
778 ipr_cmd->done = ipr_sata_eh_done;
780 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
781 del_timer(&ipr_cmd->timer);
782 ipr_cmd->done(ipr_cmd);
785 LEAVE;
789 * ipr_send_command - Send driver initiated requests.
790 * @ipr_cmd: ipr command struct
792 * This function sends a command to the adapter using the correct write call.
793 * In the case of sis64, calculate the ioarcb size required. Then or in the
794 * appropriate bits.
796 * Return value:
797 * none
799 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
801 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
802 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
804 if (ioa_cfg->sis64) {
805 /* The default size is 256 bytes */
806 send_dma_addr |= 0x1;
808 /* If the number of ioadls * size of ioadl > 128 bytes,
809 then use a 512 byte ioarcb */
810 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
811 send_dma_addr |= 0x4;
812 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813 } else
814 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
818 * ipr_do_req - Send driver initiated requests.
819 * @ipr_cmd: ipr command struct
820 * @done: done function
821 * @timeout_func: timeout function
822 * @timeout: timeout value
824 * This function sends the specified command to the adapter with the
825 * timeout given. The done function is invoked on command completion.
827 * Return value:
828 * none
830 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
831 void (*done) (struct ipr_cmnd *),
832 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
836 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
838 ipr_cmd->done = done;
840 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
841 ipr_cmd->timer.expires = jiffies + timeout;
842 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
844 add_timer(&ipr_cmd->timer);
846 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
848 mb();
850 ipr_send_command(ipr_cmd);
854 * ipr_internal_cmd_done - Op done function for an internally generated op.
855 * @ipr_cmd: ipr command struct
857 * This function is the op done function for an internally generated,
858 * blocking op. It simply wakes the sleeping thread.
860 * Return value:
861 * none
863 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
865 if (ipr_cmd->sibling)
866 ipr_cmd->sibling = NULL;
867 else
868 complete(&ipr_cmd->completion);
872 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
873 * @ipr_cmd: ipr command struct
874 * @dma_addr: dma address
875 * @len: transfer length
876 * @flags: ioadl flag value
878 * This function initializes an ioadl in the case where there is only a single
879 * descriptor.
881 * Return value:
882 * nothing
884 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
885 u32 len, int flags)
887 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
888 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
890 ipr_cmd->dma_use_sg = 1;
892 if (ipr_cmd->ioa_cfg->sis64) {
893 ioadl64->flags = cpu_to_be32(flags);
894 ioadl64->data_len = cpu_to_be32(len);
895 ioadl64->address = cpu_to_be64(dma_addr);
897 ipr_cmd->ioarcb.ioadl_len =
898 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
899 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
900 } else {
901 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
902 ioadl->address = cpu_to_be32(dma_addr);
904 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
905 ipr_cmd->ioarcb.read_ioadl_len =
906 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
907 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
908 } else {
909 ipr_cmd->ioarcb.ioadl_len =
910 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
911 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
917 * ipr_send_blocking_cmd - Send command and sleep on its completion.
918 * @ipr_cmd: ipr command struct
919 * @timeout_func: function to invoke if command times out
920 * @timeout: timeout
922 * Return value:
923 * none
925 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
926 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
927 u32 timeout)
929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
931 init_completion(&ipr_cmd->completion);
932 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
934 spin_unlock_irq(ioa_cfg->host->host_lock);
935 wait_for_completion(&ipr_cmd->completion);
936 spin_lock_irq(ioa_cfg->host->host_lock);
940 * ipr_send_hcam - Send an HCAM to the adapter.
941 * @ioa_cfg: ioa config struct
942 * @type: HCAM type
943 * @hostrcb: hostrcb struct
945 * This function will send a Host Controlled Async command to the adapter.
946 * If HCAMs are currently not allowed to be issued to the adapter, it will
947 * place the hostrcb on the free queue.
949 * Return value:
950 * none
952 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
953 struct ipr_hostrcb *hostrcb)
955 struct ipr_cmnd *ipr_cmd;
956 struct ipr_ioarcb *ioarcb;
958 if (ioa_cfg->allow_cmds) {
959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
960 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
961 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
963 ipr_cmd->u.hostrcb = hostrcb;
964 ioarcb = &ipr_cmd->ioarcb;
966 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
967 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
968 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
969 ioarcb->cmd_pkt.cdb[1] = type;
970 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
971 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
973 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
974 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
976 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
977 ipr_cmd->done = ipr_process_ccn;
978 else
979 ipr_cmd->done = ipr_process_error;
981 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
983 mb();
985 ipr_send_command(ipr_cmd);
986 } else {
987 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
992 * ipr_update_ata_class - Update the ata class in the resource entry
993 * @res: resource entry struct
994 * @proto: cfgte device bus protocol value
996 * Return value:
997 * none
999 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1001 switch(proto) {
1002 case IPR_PROTO_SATA:
1003 case IPR_PROTO_SAS_STP:
1004 res->ata_class = ATA_DEV_ATA;
1005 break;
1006 case IPR_PROTO_SATA_ATAPI:
1007 case IPR_PROTO_SAS_STP_ATAPI:
1008 res->ata_class = ATA_DEV_ATAPI;
1009 break;
1010 default:
1011 res->ata_class = ATA_DEV_UNKNOWN;
1012 break;
1017 * ipr_init_res_entry - Initialize a resource entry struct.
1018 * @res: resource entry struct
1019 * @cfgtew: config table entry wrapper struct
1021 * Return value:
1022 * none
1024 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1025 struct ipr_config_table_entry_wrapper *cfgtew)
1027 int found = 0;
1028 unsigned int proto;
1029 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1030 struct ipr_resource_entry *gscsi_res = NULL;
1032 res->needs_sync_complete = 0;
1033 res->in_erp = 0;
1034 res->add_to_ml = 0;
1035 res->del_from_ml = 0;
1036 res->resetting_device = 0;
1037 res->sdev = NULL;
1038 res->sata_port = NULL;
1040 if (ioa_cfg->sis64) {
1041 proto = cfgtew->u.cfgte64->proto;
1042 res->res_flags = cfgtew->u.cfgte64->res_flags;
1043 res->qmodel = IPR_QUEUEING_MODEL64(res);
1044 res->type = cfgtew->u.cfgte64->res_type;
1046 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1047 sizeof(res->res_path));
1049 res->bus = 0;
1050 res->lun = scsilun_to_int(&res->dev_lun);
1052 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1053 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1054 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1055 found = 1;
1056 res->target = gscsi_res->target;
1057 break;
1060 if (!found) {
1061 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1062 ioa_cfg->max_devs_supported);
1063 set_bit(res->target, ioa_cfg->target_ids);
1066 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1067 sizeof(res->dev_lun.scsi_lun));
1068 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1069 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070 res->target = 0;
1071 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1072 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074 ioa_cfg->max_devs_supported);
1075 set_bit(res->target, ioa_cfg->array_ids);
1076 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077 res->bus = IPR_VSET_VIRTUAL_BUS;
1078 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079 ioa_cfg->max_devs_supported);
1080 set_bit(res->target, ioa_cfg->vset_ids);
1081 } else {
1082 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083 ioa_cfg->max_devs_supported);
1084 set_bit(res->target, ioa_cfg->target_ids);
1086 } else {
1087 proto = cfgtew->u.cfgte->proto;
1088 res->qmodel = IPR_QUEUEING_MODEL(res);
1089 res->flags = cfgtew->u.cfgte->flags;
1090 if (res->flags & IPR_IS_IOA_RESOURCE)
1091 res->type = IPR_RES_TYPE_IOAFP;
1092 else
1093 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1095 res->bus = cfgtew->u.cfgte->res_addr.bus;
1096 res->target = cfgtew->u.cfgte->res_addr.target;
1097 res->lun = cfgtew->u.cfgte->res_addr.lun;
1100 ipr_update_ata_class(res, proto);
1104 * ipr_is_same_device - Determine if two devices are the same.
1105 * @res: resource entry struct
1106 * @cfgtew: config table entry wrapper struct
1108 * Return value:
1109 * 1 if the devices are the same / 0 otherwise
1111 static int ipr_is_same_device(struct ipr_resource_entry *res,
1112 struct ipr_config_table_entry_wrapper *cfgtew)
1114 if (res->ioa_cfg->sis64) {
1115 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1116 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1117 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1118 sizeof(cfgtew->u.cfgte64->lun))) {
1119 return 1;
1121 } else {
1122 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1123 res->target == cfgtew->u.cfgte->res_addr.target &&
1124 res->lun == cfgtew->u.cfgte->res_addr.lun)
1125 return 1;
1128 return 0;
1132 * ipr_format_res_path - Format the resource path for printing.
1133 * @res_path: resource path
1134 * @buf: buffer
1136 * Return value:
1137 * pointer to buffer
1139 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1141 int i;
1142 char *p = buffer;
1144 res_path[0] = '\0';
1145 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1146 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1147 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1149 return buffer;
1153 * ipr_update_res_entry - Update the resource entry.
1154 * @res: resource entry struct
1155 * @cfgtew: config table entry wrapper struct
1157 * Return value:
1158 * none
1160 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1161 struct ipr_config_table_entry_wrapper *cfgtew)
1163 char buffer[IPR_MAX_RES_PATH_LENGTH];
1164 unsigned int proto;
1165 int new_path = 0;
1167 if (res->ioa_cfg->sis64) {
1168 res->flags = cfgtew->u.cfgte64->flags;
1169 res->res_flags = cfgtew->u.cfgte64->res_flags;
1170 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1172 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1173 sizeof(struct ipr_std_inq_data));
1175 res->qmodel = IPR_QUEUEING_MODEL64(res);
1176 proto = cfgtew->u.cfgte64->proto;
1177 res->res_handle = cfgtew->u.cfgte64->res_handle;
1178 res->dev_id = cfgtew->u.cfgte64->dev_id;
1180 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1181 sizeof(res->dev_lun.scsi_lun));
1183 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1184 sizeof(res->res_path))) {
1185 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1186 sizeof(res->res_path));
1187 new_path = 1;
1190 if (res->sdev && new_path)
1191 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1192 ipr_format_res_path(res->res_path, buffer,
1193 sizeof(buffer)));
1194 } else {
1195 res->flags = cfgtew->u.cfgte->flags;
1196 if (res->flags & IPR_IS_IOA_RESOURCE)
1197 res->type = IPR_RES_TYPE_IOAFP;
1198 else
1199 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1201 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1202 sizeof(struct ipr_std_inq_data));
1204 res->qmodel = IPR_QUEUEING_MODEL(res);
1205 proto = cfgtew->u.cfgte->proto;
1206 res->res_handle = cfgtew->u.cfgte->res_handle;
1209 ipr_update_ata_class(res, proto);
1213 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1214 * for the resource.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1218 * Return value:
1219 * none
1221 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1223 struct ipr_resource_entry *gscsi_res = NULL;
1224 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1226 if (!ioa_cfg->sis64)
1227 return;
1229 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1230 clear_bit(res->target, ioa_cfg->array_ids);
1231 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1232 clear_bit(res->target, ioa_cfg->vset_ids);
1233 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1234 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1235 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1236 return;
1237 clear_bit(res->target, ioa_cfg->target_ids);
1239 } else if (res->bus == 0)
1240 clear_bit(res->target, ioa_cfg->target_ids);
1244 * ipr_handle_config_change - Handle a config change from the adapter
1245 * @ioa_cfg: ioa config struct
1246 * @hostrcb: hostrcb
1248 * Return value:
1249 * none
1251 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1252 struct ipr_hostrcb *hostrcb)
1254 struct ipr_resource_entry *res = NULL;
1255 struct ipr_config_table_entry_wrapper cfgtew;
1256 __be32 cc_res_handle;
1258 u32 is_ndn = 1;
1260 if (ioa_cfg->sis64) {
1261 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1262 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1263 } else {
1264 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1265 cc_res_handle = cfgtew.u.cfgte->res_handle;
1268 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1269 if (res->res_handle == cc_res_handle) {
1270 is_ndn = 0;
1271 break;
1275 if (is_ndn) {
1276 if (list_empty(&ioa_cfg->free_res_q)) {
1277 ipr_send_hcam(ioa_cfg,
1278 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1279 hostrcb);
1280 return;
1283 res = list_entry(ioa_cfg->free_res_q.next,
1284 struct ipr_resource_entry, queue);
1286 list_del(&res->queue);
1287 ipr_init_res_entry(res, &cfgtew);
1288 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1291 ipr_update_res_entry(res, &cfgtew);
1293 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1294 if (res->sdev) {
1295 res->del_from_ml = 1;
1296 res->res_handle = IPR_INVALID_RES_HANDLE;
1297 if (ioa_cfg->allow_ml_add_del)
1298 schedule_work(&ioa_cfg->work_q);
1299 } else {
1300 ipr_clear_res_target(res);
1301 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1303 } else if (!res->sdev) {
1304 res->add_to_ml = 1;
1305 if (ioa_cfg->allow_ml_add_del)
1306 schedule_work(&ioa_cfg->work_q);
1309 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1313 * ipr_process_ccn - Op done function for a CCN.
1314 * @ipr_cmd: ipr command struct
1316 * This function is the op done function for a configuration
1317 * change notification host controlled async from the adapter.
1319 * Return value:
1320 * none
1322 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1324 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1325 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1326 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1328 list_del(&hostrcb->queue);
1329 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1331 if (ioasc) {
1332 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1333 dev_err(&ioa_cfg->pdev->dev,
1334 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1336 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1337 } else {
1338 ipr_handle_config_change(ioa_cfg, hostrcb);
1343 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1344 * @i: index into buffer
1345 * @buf: string to modify
1347 * This function will strip all trailing whitespace, pad the end
1348 * of the string with a single space, and NULL terminate the string.
1350 * Return value:
1351 * new length of string
1353 static int strip_and_pad_whitespace(int i, char *buf)
1355 while (i && buf[i] == ' ')
1356 i--;
1357 buf[i+1] = ' ';
1358 buf[i+2] = '\0';
1359 return i + 2;
1363 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1364 * @prefix: string to print at start of printk
1365 * @hostrcb: hostrcb pointer
1366 * @vpd: vendor/product id/sn struct
1368 * Return value:
1369 * none
1371 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1372 struct ipr_vpd *vpd)
1374 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1375 int i = 0;
1377 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1378 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1380 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1381 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1383 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1384 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1386 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1390 * ipr_log_vpd - Log the passed VPD to the error log.
1391 * @vpd: vendor/product id/sn struct
1393 * Return value:
1394 * none
1396 static void ipr_log_vpd(struct ipr_vpd *vpd)
1398 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1399 + IPR_SERIAL_NUM_LEN];
1401 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1402 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1403 IPR_PROD_ID_LEN);
1404 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1405 ipr_err("Vendor/Product ID: %s\n", buffer);
1407 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1408 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1409 ipr_err(" Serial Number: %s\n", buffer);
1413 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1414 * @prefix: string to print at start of printk
1415 * @hostrcb: hostrcb pointer
1416 * @vpd: vendor/product id/sn/wwn struct
1418 * Return value:
1419 * none
1421 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1422 struct ipr_ext_vpd *vpd)
1424 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1425 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1426 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1430 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1431 * @vpd: vendor/product id/sn/wwn struct
1433 * Return value:
1434 * none
1436 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1438 ipr_log_vpd(&vpd->vpd);
1439 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1440 be32_to_cpu(vpd->wwid[1]));
1444 * ipr_log_enhanced_cache_error - Log a cache error.
1445 * @ioa_cfg: ioa config struct
1446 * @hostrcb: hostrcb struct
1448 * Return value:
1449 * none
1451 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1452 struct ipr_hostrcb *hostrcb)
1454 struct ipr_hostrcb_type_12_error *error;
1456 if (ioa_cfg->sis64)
1457 error = &hostrcb->hcam.u.error64.u.type_12_error;
1458 else
1459 error = &hostrcb->hcam.u.error.u.type_12_error;
1461 ipr_err("-----Current Configuration-----\n");
1462 ipr_err("Cache Directory Card Information:\n");
1463 ipr_log_ext_vpd(&error->ioa_vpd);
1464 ipr_err("Adapter Card Information:\n");
1465 ipr_log_ext_vpd(&error->cfc_vpd);
1467 ipr_err("-----Expected Configuration-----\n");
1468 ipr_err("Cache Directory Card Information:\n");
1469 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1470 ipr_err("Adapter Card Information:\n");
1471 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1473 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1474 be32_to_cpu(error->ioa_data[0]),
1475 be32_to_cpu(error->ioa_data[1]),
1476 be32_to_cpu(error->ioa_data[2]));
1480 * ipr_log_cache_error - Log a cache error.
1481 * @ioa_cfg: ioa config struct
1482 * @hostrcb: hostrcb struct
1484 * Return value:
1485 * none
1487 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1488 struct ipr_hostrcb *hostrcb)
1490 struct ipr_hostrcb_type_02_error *error =
1491 &hostrcb->hcam.u.error.u.type_02_error;
1493 ipr_err("-----Current Configuration-----\n");
1494 ipr_err("Cache Directory Card Information:\n");
1495 ipr_log_vpd(&error->ioa_vpd);
1496 ipr_err("Adapter Card Information:\n");
1497 ipr_log_vpd(&error->cfc_vpd);
1499 ipr_err("-----Expected Configuration-----\n");
1500 ipr_err("Cache Directory Card Information:\n");
1501 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1502 ipr_err("Adapter Card Information:\n");
1503 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1505 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1506 be32_to_cpu(error->ioa_data[0]),
1507 be32_to_cpu(error->ioa_data[1]),
1508 be32_to_cpu(error->ioa_data[2]));
1512 * ipr_log_enhanced_config_error - Log a configuration error.
1513 * @ioa_cfg: ioa config struct
1514 * @hostrcb: hostrcb struct
1516 * Return value:
1517 * none
1519 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1520 struct ipr_hostrcb *hostrcb)
1522 int errors_logged, i;
1523 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1524 struct ipr_hostrcb_type_13_error *error;
1526 error = &hostrcb->hcam.u.error.u.type_13_error;
1527 errors_logged = be32_to_cpu(error->errors_logged);
1529 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1530 be32_to_cpu(error->errors_detected), errors_logged);
1532 dev_entry = error->dev;
1534 for (i = 0; i < errors_logged; i++, dev_entry++) {
1535 ipr_err_separator;
1537 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1538 ipr_log_ext_vpd(&dev_entry->vpd);
1540 ipr_err("-----New Device Information-----\n");
1541 ipr_log_ext_vpd(&dev_entry->new_vpd);
1543 ipr_err("Cache Directory Card Information:\n");
1544 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1546 ipr_err("Adapter Card Information:\n");
1547 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1552 * ipr_log_sis64_config_error - Log a device error.
1553 * @ioa_cfg: ioa config struct
1554 * @hostrcb: hostrcb struct
1556 * Return value:
1557 * none
1559 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1560 struct ipr_hostrcb *hostrcb)
1562 int errors_logged, i;
1563 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1564 struct ipr_hostrcb_type_23_error *error;
1565 char buffer[IPR_MAX_RES_PATH_LENGTH];
1567 error = &hostrcb->hcam.u.error64.u.type_23_error;
1568 errors_logged = be32_to_cpu(error->errors_logged);
1570 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1571 be32_to_cpu(error->errors_detected), errors_logged);
1573 dev_entry = error->dev;
1575 for (i = 0; i < errors_logged; i++, dev_entry++) {
1576 ipr_err_separator;
1578 ipr_err("Device %d : %s", i + 1,
1579 ipr_format_res_path(dev_entry->res_path, buffer,
1580 sizeof(buffer)));
1581 ipr_log_ext_vpd(&dev_entry->vpd);
1583 ipr_err("-----New Device Information-----\n");
1584 ipr_log_ext_vpd(&dev_entry->new_vpd);
1586 ipr_err("Cache Directory Card Information:\n");
1587 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1589 ipr_err("Adapter Card Information:\n");
1590 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1595 * ipr_log_config_error - Log a configuration error.
1596 * @ioa_cfg: ioa config struct
1597 * @hostrcb: hostrcb struct
1599 * Return value:
1600 * none
1602 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1603 struct ipr_hostrcb *hostrcb)
1605 int errors_logged, i;
1606 struct ipr_hostrcb_device_data_entry *dev_entry;
1607 struct ipr_hostrcb_type_03_error *error;
1609 error = &hostrcb->hcam.u.error.u.type_03_error;
1610 errors_logged = be32_to_cpu(error->errors_logged);
1612 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1613 be32_to_cpu(error->errors_detected), errors_logged);
1615 dev_entry = error->dev;
1617 for (i = 0; i < errors_logged; i++, dev_entry++) {
1618 ipr_err_separator;
1620 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1621 ipr_log_vpd(&dev_entry->vpd);
1623 ipr_err("-----New Device Information-----\n");
1624 ipr_log_vpd(&dev_entry->new_vpd);
1626 ipr_err("Cache Directory Card Information:\n");
1627 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1629 ipr_err("Adapter Card Information:\n");
1630 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1632 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1633 be32_to_cpu(dev_entry->ioa_data[0]),
1634 be32_to_cpu(dev_entry->ioa_data[1]),
1635 be32_to_cpu(dev_entry->ioa_data[2]),
1636 be32_to_cpu(dev_entry->ioa_data[3]),
1637 be32_to_cpu(dev_entry->ioa_data[4]));
1642 * ipr_log_enhanced_array_error - Log an array configuration error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1646 * Return value:
1647 * none
1649 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1652 int i, num_entries;
1653 struct ipr_hostrcb_type_14_error *error;
1654 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1655 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1657 error = &hostrcb->hcam.u.error.u.type_14_error;
1659 ipr_err_separator;
1661 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1662 error->protection_level,
1663 ioa_cfg->host->host_no,
1664 error->last_func_vset_res_addr.bus,
1665 error->last_func_vset_res_addr.target,
1666 error->last_func_vset_res_addr.lun);
1668 ipr_err_separator;
1670 array_entry = error->array_member;
1671 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1672 sizeof(error->array_member));
1674 for (i = 0; i < num_entries; i++, array_entry++) {
1675 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1676 continue;
1678 if (be32_to_cpu(error->exposed_mode_adn) == i)
1679 ipr_err("Exposed Array Member %d:\n", i);
1680 else
1681 ipr_err("Array Member %d:\n", i);
1683 ipr_log_ext_vpd(&array_entry->vpd);
1684 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1685 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1686 "Expected Location");
1688 ipr_err_separator;
1693 * ipr_log_array_error - Log an array configuration error.
1694 * @ioa_cfg: ioa config struct
1695 * @hostrcb: hostrcb struct
1697 * Return value:
1698 * none
1700 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1701 struct ipr_hostrcb *hostrcb)
1703 int i;
1704 struct ipr_hostrcb_type_04_error *error;
1705 struct ipr_hostrcb_array_data_entry *array_entry;
1706 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1708 error = &hostrcb->hcam.u.error.u.type_04_error;
1710 ipr_err_separator;
1712 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1713 error->protection_level,
1714 ioa_cfg->host->host_no,
1715 error->last_func_vset_res_addr.bus,
1716 error->last_func_vset_res_addr.target,
1717 error->last_func_vset_res_addr.lun);
1719 ipr_err_separator;
1721 array_entry = error->array_member;
1723 for (i = 0; i < 18; i++) {
1724 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1725 continue;
1727 if (be32_to_cpu(error->exposed_mode_adn) == i)
1728 ipr_err("Exposed Array Member %d:\n", i);
1729 else
1730 ipr_err("Array Member %d:\n", i);
1732 ipr_log_vpd(&array_entry->vpd);
1734 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1735 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1736 "Expected Location");
1738 ipr_err_separator;
1740 if (i == 9)
1741 array_entry = error->array_member2;
1742 else
1743 array_entry++;
1748 * ipr_log_hex_data - Log additional hex IOA error data.
1749 * @ioa_cfg: ioa config struct
1750 * @data: IOA error data
1751 * @len: data length
1753 * Return value:
1754 * none
1756 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1758 int i;
1760 if (len == 0)
1761 return;
1763 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1764 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1766 for (i = 0; i < len / 4; i += 4) {
1767 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1768 be32_to_cpu(data[i]),
1769 be32_to_cpu(data[i+1]),
1770 be32_to_cpu(data[i+2]),
1771 be32_to_cpu(data[i+3]));
1776 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1777 * @ioa_cfg: ioa config struct
1778 * @hostrcb: hostrcb struct
1780 * Return value:
1781 * none
1783 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1784 struct ipr_hostrcb *hostrcb)
1786 struct ipr_hostrcb_type_17_error *error;
1788 if (ioa_cfg->sis64)
1789 error = &hostrcb->hcam.u.error64.u.type_17_error;
1790 else
1791 error = &hostrcb->hcam.u.error.u.type_17_error;
1793 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1794 strim(error->failure_reason);
1796 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1797 be32_to_cpu(hostrcb->hcam.u.error.prc));
1798 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1799 ipr_log_hex_data(ioa_cfg, error->data,
1800 be32_to_cpu(hostrcb->hcam.length) -
1801 (offsetof(struct ipr_hostrcb_error, u) +
1802 offsetof(struct ipr_hostrcb_type_17_error, data)));
1806 * ipr_log_dual_ioa_error - Log a dual adapter error.
1807 * @ioa_cfg: ioa config struct
1808 * @hostrcb: hostrcb struct
1810 * Return value:
1811 * none
1813 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1814 struct ipr_hostrcb *hostrcb)
1816 struct ipr_hostrcb_type_07_error *error;
1818 error = &hostrcb->hcam.u.error.u.type_07_error;
1819 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1820 strim(error->failure_reason);
1822 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1823 be32_to_cpu(hostrcb->hcam.u.error.prc));
1824 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1825 ipr_log_hex_data(ioa_cfg, error->data,
1826 be32_to_cpu(hostrcb->hcam.length) -
1827 (offsetof(struct ipr_hostrcb_error, u) +
1828 offsetof(struct ipr_hostrcb_type_07_error, data)));
1831 static const struct {
1832 u8 active;
1833 char *desc;
1834 } path_active_desc[] = {
1835 { IPR_PATH_NO_INFO, "Path" },
1836 { IPR_PATH_ACTIVE, "Active path" },
1837 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1840 static const struct {
1841 u8 state;
1842 char *desc;
1843 } path_state_desc[] = {
1844 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1845 { IPR_PATH_HEALTHY, "is healthy" },
1846 { IPR_PATH_DEGRADED, "is degraded" },
1847 { IPR_PATH_FAILED, "is failed" }
1851 * ipr_log_fabric_path - Log a fabric path error
1852 * @hostrcb: hostrcb struct
1853 * @fabric: fabric descriptor
1855 * Return value:
1856 * none
1858 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1859 struct ipr_hostrcb_fabric_desc *fabric)
1861 int i, j;
1862 u8 path_state = fabric->path_state;
1863 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1864 u8 state = path_state & IPR_PATH_STATE_MASK;
1866 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1867 if (path_active_desc[i].active != active)
1868 continue;
1870 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1871 if (path_state_desc[j].state != state)
1872 continue;
1874 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1875 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1876 path_active_desc[i].desc, path_state_desc[j].desc,
1877 fabric->ioa_port);
1878 } else if (fabric->cascaded_expander == 0xff) {
1879 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1880 path_active_desc[i].desc, path_state_desc[j].desc,
1881 fabric->ioa_port, fabric->phy);
1882 } else if (fabric->phy == 0xff) {
1883 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1884 path_active_desc[i].desc, path_state_desc[j].desc,
1885 fabric->ioa_port, fabric->cascaded_expander);
1886 } else {
1887 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1888 path_active_desc[i].desc, path_state_desc[j].desc,
1889 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1891 return;
1895 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1896 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1900 * ipr_log64_fabric_path - Log a fabric path error
1901 * @hostrcb: hostrcb struct
1902 * @fabric: fabric descriptor
1904 * Return value:
1905 * none
1907 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1908 struct ipr_hostrcb64_fabric_desc *fabric)
1910 int i, j;
1911 u8 path_state = fabric->path_state;
1912 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1913 u8 state = path_state & IPR_PATH_STATE_MASK;
1914 char buffer[IPR_MAX_RES_PATH_LENGTH];
1916 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1917 if (path_active_desc[i].active != active)
1918 continue;
1920 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1921 if (path_state_desc[j].state != state)
1922 continue;
1924 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1925 path_active_desc[i].desc, path_state_desc[j].desc,
1926 ipr_format_res_path(fabric->res_path, buffer,
1927 sizeof(buffer)));
1928 return;
1932 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1933 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1936 static const struct {
1937 u8 type;
1938 char *desc;
1939 } path_type_desc[] = {
1940 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1941 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1942 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1943 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1946 static const struct {
1947 u8 status;
1948 char *desc;
1949 } path_status_desc[] = {
1950 { IPR_PATH_CFG_NO_PROB, "Functional" },
1951 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1952 { IPR_PATH_CFG_FAILED, "Failed" },
1953 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1954 { IPR_PATH_NOT_DETECTED, "Missing" },
1955 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1958 static const char *link_rate[] = {
1959 "unknown",
1960 "disabled",
1961 "phy reset problem",
1962 "spinup hold",
1963 "port selector",
1964 "unknown",
1965 "unknown",
1966 "unknown",
1967 "1.5Gbps",
1968 "3.0Gbps",
1969 "unknown",
1970 "unknown",
1971 "unknown",
1972 "unknown",
1973 "unknown",
1974 "unknown"
1978 * ipr_log_path_elem - Log a fabric path element.
1979 * @hostrcb: hostrcb struct
1980 * @cfg: fabric path element struct
1982 * Return value:
1983 * none
1985 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1986 struct ipr_hostrcb_config_element *cfg)
1988 int i, j;
1989 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1990 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1992 if (type == IPR_PATH_CFG_NOT_EXIST)
1993 return;
1995 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1996 if (path_type_desc[i].type != type)
1997 continue;
1999 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2000 if (path_status_desc[j].status != status)
2001 continue;
2003 if (type == IPR_PATH_CFG_IOA_PORT) {
2004 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2005 path_status_desc[j].desc, path_type_desc[i].desc,
2006 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2007 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2008 } else {
2009 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2010 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2011 path_status_desc[j].desc, path_type_desc[i].desc,
2012 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2013 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2014 } else if (cfg->cascaded_expander == 0xff) {
2015 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2016 "WWN=%08X%08X\n", path_status_desc[j].desc,
2017 path_type_desc[i].desc, cfg->phy,
2018 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2019 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2020 } else if (cfg->phy == 0xff) {
2021 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2022 "WWN=%08X%08X\n", path_status_desc[j].desc,
2023 path_type_desc[i].desc, cfg->cascaded_expander,
2024 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2025 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2026 } else {
2027 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2028 "WWN=%08X%08X\n", path_status_desc[j].desc,
2029 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2030 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2031 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2034 return;
2038 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2039 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2040 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2041 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2045 * ipr_log64_path_elem - Log a fabric path element.
2046 * @hostrcb: hostrcb struct
2047 * @cfg: fabric path element struct
2049 * Return value:
2050 * none
2052 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2053 struct ipr_hostrcb64_config_element *cfg)
2055 int i, j;
2056 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2057 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2058 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2059 char buffer[IPR_MAX_RES_PATH_LENGTH];
2061 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2062 return;
2064 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2065 if (path_type_desc[i].type != type)
2066 continue;
2068 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2069 if (path_status_desc[j].status != status)
2070 continue;
2072 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2073 path_status_desc[j].desc, path_type_desc[i].desc,
2074 ipr_format_res_path(cfg->res_path, buffer,
2075 sizeof(buffer)),
2076 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2077 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2078 return;
2081 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2082 "WWN=%08X%08X\n", cfg->type_status,
2083 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2084 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2085 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2089 * ipr_log_fabric_error - Log a fabric error.
2090 * @ioa_cfg: ioa config struct
2091 * @hostrcb: hostrcb struct
2093 * Return value:
2094 * none
2096 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2097 struct ipr_hostrcb *hostrcb)
2099 struct ipr_hostrcb_type_20_error *error;
2100 struct ipr_hostrcb_fabric_desc *fabric;
2101 struct ipr_hostrcb_config_element *cfg;
2102 int i, add_len;
2104 error = &hostrcb->hcam.u.error.u.type_20_error;
2105 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2106 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2108 add_len = be32_to_cpu(hostrcb->hcam.length) -
2109 (offsetof(struct ipr_hostrcb_error, u) +
2110 offsetof(struct ipr_hostrcb_type_20_error, desc));
2112 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2113 ipr_log_fabric_path(hostrcb, fabric);
2114 for_each_fabric_cfg(fabric, cfg)
2115 ipr_log_path_elem(hostrcb, cfg);
2117 add_len -= be16_to_cpu(fabric->length);
2118 fabric = (struct ipr_hostrcb_fabric_desc *)
2119 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2122 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2126 * ipr_log_sis64_array_error - Log a sis64 array error.
2127 * @ioa_cfg: ioa config struct
2128 * @hostrcb: hostrcb struct
2130 * Return value:
2131 * none
2133 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2134 struct ipr_hostrcb *hostrcb)
2136 int i, num_entries;
2137 struct ipr_hostrcb_type_24_error *error;
2138 struct ipr_hostrcb64_array_data_entry *array_entry;
2139 char buffer[IPR_MAX_RES_PATH_LENGTH];
2140 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2142 error = &hostrcb->hcam.u.error64.u.type_24_error;
2144 ipr_err_separator;
2146 ipr_err("RAID %s Array Configuration: %s\n",
2147 error->protection_level,
2148 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2150 ipr_err_separator;
2152 array_entry = error->array_member;
2153 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2154 sizeof(error->array_member));
2156 for (i = 0; i < num_entries; i++, array_entry++) {
2158 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2159 continue;
2161 if (error->exposed_mode_adn == i)
2162 ipr_err("Exposed Array Member %d:\n", i);
2163 else
2164 ipr_err("Array Member %d:\n", i);
2166 ipr_err("Array Member %d:\n", i);
2167 ipr_log_ext_vpd(&array_entry->vpd);
2168 ipr_err("Current Location: %s",
2169 ipr_format_res_path(array_entry->res_path, buffer,
2170 sizeof(buffer)));
2171 ipr_err("Expected Location: %s",
2172 ipr_format_res_path(array_entry->expected_res_path,
2173 buffer, sizeof(buffer)));
2175 ipr_err_separator;
2180 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2181 * @ioa_cfg: ioa config struct
2182 * @hostrcb: hostrcb struct
2184 * Return value:
2185 * none
2187 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2188 struct ipr_hostrcb *hostrcb)
2190 struct ipr_hostrcb_type_30_error *error;
2191 struct ipr_hostrcb64_fabric_desc *fabric;
2192 struct ipr_hostrcb64_config_element *cfg;
2193 int i, add_len;
2195 error = &hostrcb->hcam.u.error64.u.type_30_error;
2197 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2198 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2200 add_len = be32_to_cpu(hostrcb->hcam.length) -
2201 (offsetof(struct ipr_hostrcb64_error, u) +
2202 offsetof(struct ipr_hostrcb_type_30_error, desc));
2204 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2205 ipr_log64_fabric_path(hostrcb, fabric);
2206 for_each_fabric_cfg(fabric, cfg)
2207 ipr_log64_path_elem(hostrcb, cfg);
2209 add_len -= be16_to_cpu(fabric->length);
2210 fabric = (struct ipr_hostrcb64_fabric_desc *)
2211 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2214 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2218 * ipr_log_generic_error - Log an adapter error.
2219 * @ioa_cfg: ioa config struct
2220 * @hostrcb: hostrcb struct
2222 * Return value:
2223 * none
2225 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2226 struct ipr_hostrcb *hostrcb)
2228 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2229 be32_to_cpu(hostrcb->hcam.length));
2233 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2234 * @ioasc: IOASC
2236 * This function will return the index of into the ipr_error_table
2237 * for the specified IOASC. If the IOASC is not in the table,
2238 * 0 will be returned, which points to the entry used for unknown errors.
2240 * Return value:
2241 * index into the ipr_error_table
2243 static u32 ipr_get_error(u32 ioasc)
2245 int i;
2247 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2248 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2249 return i;
2251 return 0;
2255 * ipr_handle_log_data - Log an adapter error.
2256 * @ioa_cfg: ioa config struct
2257 * @hostrcb: hostrcb struct
2259 * This function logs an adapter error to the system.
2261 * Return value:
2262 * none
2264 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2265 struct ipr_hostrcb *hostrcb)
2267 u32 ioasc;
2268 int error_index;
2270 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2271 return;
2273 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2274 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2276 if (ioa_cfg->sis64)
2277 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2278 else
2279 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2281 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2282 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2283 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2284 scsi_report_bus_reset(ioa_cfg->host,
2285 hostrcb->hcam.u.error.fd_res_addr.bus);
2288 error_index = ipr_get_error(ioasc);
2290 if (!ipr_error_table[error_index].log_hcam)
2291 return;
2293 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2295 /* Set indication we have logged an error */
2296 ioa_cfg->errors_logged++;
2298 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2299 return;
2300 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2301 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2303 switch (hostrcb->hcam.overlay_id) {
2304 case IPR_HOST_RCB_OVERLAY_ID_2:
2305 ipr_log_cache_error(ioa_cfg, hostrcb);
2306 break;
2307 case IPR_HOST_RCB_OVERLAY_ID_3:
2308 ipr_log_config_error(ioa_cfg, hostrcb);
2309 break;
2310 case IPR_HOST_RCB_OVERLAY_ID_4:
2311 case IPR_HOST_RCB_OVERLAY_ID_6:
2312 ipr_log_array_error(ioa_cfg, hostrcb);
2313 break;
2314 case IPR_HOST_RCB_OVERLAY_ID_7:
2315 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2316 break;
2317 case IPR_HOST_RCB_OVERLAY_ID_12:
2318 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2319 break;
2320 case IPR_HOST_RCB_OVERLAY_ID_13:
2321 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2322 break;
2323 case IPR_HOST_RCB_OVERLAY_ID_14:
2324 case IPR_HOST_RCB_OVERLAY_ID_16:
2325 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2326 break;
2327 case IPR_HOST_RCB_OVERLAY_ID_17:
2328 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2329 break;
2330 case IPR_HOST_RCB_OVERLAY_ID_20:
2331 ipr_log_fabric_error(ioa_cfg, hostrcb);
2332 break;
2333 case IPR_HOST_RCB_OVERLAY_ID_23:
2334 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2335 break;
2336 case IPR_HOST_RCB_OVERLAY_ID_24:
2337 case IPR_HOST_RCB_OVERLAY_ID_26:
2338 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2339 break;
2340 case IPR_HOST_RCB_OVERLAY_ID_30:
2341 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2342 break;
2343 case IPR_HOST_RCB_OVERLAY_ID_1:
2344 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2345 default:
2346 ipr_log_generic_error(ioa_cfg, hostrcb);
2347 break;
2352 * ipr_process_error - Op done function for an adapter error log.
2353 * @ipr_cmd: ipr command struct
2355 * This function is the op done function for an error log host
2356 * controlled async from the adapter. It will log the error and
2357 * send the HCAM back to the adapter.
2359 * Return value:
2360 * none
2362 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2365 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2366 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2367 u32 fd_ioasc;
2369 if (ioa_cfg->sis64)
2370 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2371 else
2372 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2374 list_del(&hostrcb->queue);
2375 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2377 if (!ioasc) {
2378 ipr_handle_log_data(ioa_cfg, hostrcb);
2379 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2380 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2381 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2382 dev_err(&ioa_cfg->pdev->dev,
2383 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2386 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2390 * ipr_timeout - An internally generated op has timed out.
2391 * @ipr_cmd: ipr command struct
2393 * This function blocks host requests and initiates an
2394 * adapter reset.
2396 * Return value:
2397 * none
2399 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2401 unsigned long lock_flags = 0;
2402 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2404 ENTER;
2405 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407 ioa_cfg->errors_logged++;
2408 dev_err(&ioa_cfg->pdev->dev,
2409 "Adapter being reset due to command timeout.\n");
2411 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2412 ioa_cfg->sdt_state = GET_DUMP;
2414 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418 LEAVE;
2422 * ipr_oper_timeout - Adapter timed out transitioning to operational
2423 * @ipr_cmd: ipr command struct
2425 * This function blocks host requests and initiates an
2426 * adapter reset.
2428 * Return value:
2429 * none
2431 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2433 unsigned long lock_flags = 0;
2434 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2436 ENTER;
2437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2439 ioa_cfg->errors_logged++;
2440 dev_err(&ioa_cfg->pdev->dev,
2441 "Adapter timed out transitioning to operational.\n");
2443 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2444 ioa_cfg->sdt_state = GET_DUMP;
2446 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2447 if (ipr_fastfail)
2448 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2449 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2453 LEAVE;
2457 * ipr_reset_reload - Reset/Reload the IOA
2458 * @ioa_cfg: ioa config struct
2459 * @shutdown_type: shutdown type
2461 * This function resets the adapter and re-initializes it.
2462 * This function assumes that all new host commands have been stopped.
2463 * Return value:
2464 * SUCCESS / FAILED
2466 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2467 enum ipr_shutdown_type shutdown_type)
2469 if (!ioa_cfg->in_reset_reload)
2470 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2472 spin_unlock_irq(ioa_cfg->host->host_lock);
2473 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2474 spin_lock_irq(ioa_cfg->host->host_lock);
2476 /* If we got hit with a host reset while we were already resetting
2477 the adapter for some reason, and the reset failed. */
2478 if (ioa_cfg->ioa_is_dead) {
2479 ipr_trace;
2480 return FAILED;
2483 return SUCCESS;
2487 * ipr_find_ses_entry - Find matching SES in SES table
2488 * @res: resource entry struct of SES
2490 * Return value:
2491 * pointer to SES table entry / NULL on failure
2493 static const struct ipr_ses_table_entry *
2494 ipr_find_ses_entry(struct ipr_resource_entry *res)
2496 int i, j, matches;
2497 struct ipr_std_inq_vpids *vpids;
2498 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2500 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2501 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2502 if (ste->compare_product_id_byte[j] == 'X') {
2503 vpids = &res->std_inq_data.vpids;
2504 if (vpids->product_id[j] == ste->product_id[j])
2505 matches++;
2506 else
2507 break;
2508 } else
2509 matches++;
2512 if (matches == IPR_PROD_ID_LEN)
2513 return ste;
2516 return NULL;
2520 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2521 * @ioa_cfg: ioa config struct
2522 * @bus: SCSI bus
2523 * @bus_width: bus width
2525 * Return value:
2526 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2527 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2528 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2529 * max 160MHz = max 320MB/sec).
2531 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2533 struct ipr_resource_entry *res;
2534 const struct ipr_ses_table_entry *ste;
2535 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2537 /* Loop through each config table entry in the config table buffer */
2538 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2539 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2540 continue;
2542 if (bus != res->bus)
2543 continue;
2545 if (!(ste = ipr_find_ses_entry(res)))
2546 continue;
2548 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2551 return max_xfer_rate;
2555 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2556 * @ioa_cfg: ioa config struct
2557 * @max_delay: max delay in micro-seconds to wait
2559 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2561 * Return value:
2562 * 0 on success / other on failure
2564 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2566 volatile u32 pcii_reg;
2567 int delay = 1;
2569 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2570 while (delay < max_delay) {
2571 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2573 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2574 return 0;
2576 /* udelay cannot be used if delay is more than a few milliseconds */
2577 if ((delay / 1000) > MAX_UDELAY_MS)
2578 mdelay(delay / 1000);
2579 else
2580 udelay(delay);
2582 delay += delay;
2584 return -EIO;
2588 * ipr_get_sis64_dump_data_section - Dump IOA memory
2589 * @ioa_cfg: ioa config struct
2590 * @start_addr: adapter address to dump
2591 * @dest: destination kernel buffer
2592 * @length_in_words: length to dump in 4 byte words
2594 * Return value:
2595 * 0 on success
2597 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2598 u32 start_addr,
2599 __be32 *dest, u32 length_in_words)
2601 int i;
2603 for (i = 0; i < length_in_words; i++) {
2604 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2605 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2606 dest++;
2609 return 0;
2613 * ipr_get_ldump_data_section - Dump IOA memory
2614 * @ioa_cfg: ioa config struct
2615 * @start_addr: adapter address to dump
2616 * @dest: destination kernel buffer
2617 * @length_in_words: length to dump in 4 byte words
2619 * Return value:
2620 * 0 on success / -EIO on failure
2622 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2623 u32 start_addr,
2624 __be32 *dest, u32 length_in_words)
2626 volatile u32 temp_pcii_reg;
2627 int i, delay = 0;
2629 if (ioa_cfg->sis64)
2630 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2631 dest, length_in_words);
2633 /* Write IOA interrupt reg starting LDUMP state */
2634 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2635 ioa_cfg->regs.set_uproc_interrupt_reg32);
2637 /* Wait for IO debug acknowledge */
2638 if (ipr_wait_iodbg_ack(ioa_cfg,
2639 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2640 dev_err(&ioa_cfg->pdev->dev,
2641 "IOA dump long data transfer timeout\n");
2642 return -EIO;
2645 /* Signal LDUMP interlocked - clear IO debug ack */
2646 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2647 ioa_cfg->regs.clr_interrupt_reg);
2649 /* Write Mailbox with starting address */
2650 writel(start_addr, ioa_cfg->ioa_mailbox);
2652 /* Signal address valid - clear IOA Reset alert */
2653 writel(IPR_UPROCI_RESET_ALERT,
2654 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2656 for (i = 0; i < length_in_words; i++) {
2657 /* Wait for IO debug acknowledge */
2658 if (ipr_wait_iodbg_ack(ioa_cfg,
2659 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2660 dev_err(&ioa_cfg->pdev->dev,
2661 "IOA dump short data transfer timeout\n");
2662 return -EIO;
2665 /* Read data from mailbox and increment destination pointer */
2666 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2667 dest++;
2669 /* For all but the last word of data, signal data received */
2670 if (i < (length_in_words - 1)) {
2671 /* Signal dump data received - Clear IO debug Ack */
2672 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2673 ioa_cfg->regs.clr_interrupt_reg);
2677 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2678 writel(IPR_UPROCI_RESET_ALERT,
2679 ioa_cfg->regs.set_uproc_interrupt_reg32);
2681 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2682 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2684 /* Signal dump data received - Clear IO debug Ack */
2685 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2686 ioa_cfg->regs.clr_interrupt_reg);
2688 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2689 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2690 temp_pcii_reg =
2691 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2693 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2694 return 0;
2696 udelay(10);
2697 delay += 10;
2700 return 0;
2703 #ifdef CONFIG_SCSI_IPR_DUMP
2705 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2706 * @ioa_cfg: ioa config struct
2707 * @pci_address: adapter address
2708 * @length: length of data to copy
2710 * Copy data from PCI adapter to kernel buffer.
2711 * Note: length MUST be a 4 byte multiple
2712 * Return value:
2713 * 0 on success / other on failure
2715 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2716 unsigned long pci_address, u32 length)
2718 int bytes_copied = 0;
2719 int cur_len, rc, rem_len, rem_page_len;
2720 __be32 *page;
2721 unsigned long lock_flags = 0;
2722 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724 while (bytes_copied < length &&
2725 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2726 if (ioa_dump->page_offset >= PAGE_SIZE ||
2727 ioa_dump->page_offset == 0) {
2728 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2730 if (!page) {
2731 ipr_trace;
2732 return bytes_copied;
2735 ioa_dump->page_offset = 0;
2736 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2737 ioa_dump->next_page_index++;
2738 } else
2739 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2741 rem_len = length - bytes_copied;
2742 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2743 cur_len = min(rem_len, rem_page_len);
2745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2746 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2747 rc = -EIO;
2748 } else {
2749 rc = ipr_get_ldump_data_section(ioa_cfg,
2750 pci_address + bytes_copied,
2751 &page[ioa_dump->page_offset / 4],
2752 (cur_len / sizeof(u32)));
2754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2756 if (!rc) {
2757 ioa_dump->page_offset += cur_len;
2758 bytes_copied += cur_len;
2759 } else {
2760 ipr_trace;
2761 break;
2763 schedule();
2766 return bytes_copied;
2770 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2771 * @hdr: dump entry header struct
2773 * Return value:
2774 * nothing
2776 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2778 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2779 hdr->num_elems = 1;
2780 hdr->offset = sizeof(*hdr);
2781 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2785 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2786 * @ioa_cfg: ioa config struct
2787 * @driver_dump: driver dump struct
2789 * Return value:
2790 * nothing
2792 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2793 struct ipr_driver_dump *driver_dump)
2795 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2797 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2798 driver_dump->ioa_type_entry.hdr.len =
2799 sizeof(struct ipr_dump_ioa_type_entry) -
2800 sizeof(struct ipr_dump_entry_header);
2801 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2802 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2803 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2804 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2805 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2806 ucode_vpd->minor_release[1];
2807 driver_dump->hdr.num_entries++;
2811 * ipr_dump_version_data - Fill in the driver version in the dump.
2812 * @ioa_cfg: ioa config struct
2813 * @driver_dump: driver dump struct
2815 * Return value:
2816 * nothing
2818 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2819 struct ipr_driver_dump *driver_dump)
2821 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2822 driver_dump->version_entry.hdr.len =
2823 sizeof(struct ipr_dump_version_entry) -
2824 sizeof(struct ipr_dump_entry_header);
2825 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2826 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2827 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2828 driver_dump->hdr.num_entries++;
2832 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2833 * @ioa_cfg: ioa config struct
2834 * @driver_dump: driver dump struct
2836 * Return value:
2837 * nothing
2839 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2840 struct ipr_driver_dump *driver_dump)
2842 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2843 driver_dump->trace_entry.hdr.len =
2844 sizeof(struct ipr_dump_trace_entry) -
2845 sizeof(struct ipr_dump_entry_header);
2846 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2847 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2848 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2849 driver_dump->hdr.num_entries++;
2853 * ipr_dump_location_data - Fill in the IOA location in the dump.
2854 * @ioa_cfg: ioa config struct
2855 * @driver_dump: driver dump struct
2857 * Return value:
2858 * nothing
2860 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2861 struct ipr_driver_dump *driver_dump)
2863 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2864 driver_dump->location_entry.hdr.len =
2865 sizeof(struct ipr_dump_location_entry) -
2866 sizeof(struct ipr_dump_entry_header);
2867 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2868 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2869 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2870 driver_dump->hdr.num_entries++;
2874 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2875 * @ioa_cfg: ioa config struct
2876 * @dump: dump struct
2878 * Return value:
2879 * nothing
2881 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2883 unsigned long start_addr, sdt_word;
2884 unsigned long lock_flags = 0;
2885 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2886 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2887 u32 num_entries, start_off, end_off;
2888 u32 bytes_to_copy, bytes_copied, rc;
2889 struct ipr_sdt *sdt;
2890 int valid = 1;
2891 int i;
2893 ENTER;
2895 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2897 if (ioa_cfg->sdt_state != GET_DUMP) {
2898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2899 return;
2902 start_addr = readl(ioa_cfg->ioa_mailbox);
2904 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2905 dev_err(&ioa_cfg->pdev->dev,
2906 "Invalid dump table format: %lx\n", start_addr);
2907 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2908 return;
2911 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2913 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2915 /* Initialize the overall dump header */
2916 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2917 driver_dump->hdr.num_entries = 1;
2918 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2919 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2920 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2921 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2923 ipr_dump_version_data(ioa_cfg, driver_dump);
2924 ipr_dump_location_data(ioa_cfg, driver_dump);
2925 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2926 ipr_dump_trace_data(ioa_cfg, driver_dump);
2928 /* Update dump_header */
2929 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2931 /* IOA Dump entry */
2932 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2933 ioa_dump->hdr.len = 0;
2934 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2935 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2937 /* First entries in sdt are actually a list of dump addresses and
2938 lengths to gather the real dump data. sdt represents the pointer
2939 to the ioa generated dump table. Dump data will be extracted based
2940 on entries in this table */
2941 sdt = &ioa_dump->sdt;
2943 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2944 sizeof(struct ipr_sdt) / sizeof(__be32));
2946 /* Smart Dump table is ready to use and the first entry is valid */
2947 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2948 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2949 dev_err(&ioa_cfg->pdev->dev,
2950 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2951 rc, be32_to_cpu(sdt->hdr.state));
2952 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2953 ioa_cfg->sdt_state = DUMP_OBTAINED;
2954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2955 return;
2958 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2960 if (num_entries > IPR_NUM_SDT_ENTRIES)
2961 num_entries = IPR_NUM_SDT_ENTRIES;
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2965 for (i = 0; i < num_entries; i++) {
2966 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2967 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2968 break;
2971 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2972 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2973 if (ioa_cfg->sis64)
2974 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2975 else {
2976 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2977 end_off = be32_to_cpu(sdt->entry[i].end_token);
2979 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2980 bytes_to_copy = end_off - start_off;
2981 else
2982 valid = 0;
2984 if (valid) {
2985 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2986 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2987 continue;
2990 /* Copy data from adapter to driver buffers */
2991 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2992 bytes_to_copy);
2994 ioa_dump->hdr.len += bytes_copied;
2996 if (bytes_copied != bytes_to_copy) {
2997 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2998 break;
3004 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3006 /* Update dump_header */
3007 driver_dump->hdr.len += ioa_dump->hdr.len;
3008 wmb();
3009 ioa_cfg->sdt_state = DUMP_OBTAINED;
3010 LEAVE;
3013 #else
3014 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3015 #endif
3018 * ipr_release_dump - Free adapter dump memory
3019 * @kref: kref struct
3021 * Return value:
3022 * nothing
3024 static void ipr_release_dump(struct kref *kref)
3026 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3027 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3028 unsigned long lock_flags = 0;
3029 int i;
3031 ENTER;
3032 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3033 ioa_cfg->dump = NULL;
3034 ioa_cfg->sdt_state = INACTIVE;
3035 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3037 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3038 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3040 kfree(dump);
3041 LEAVE;
3045 * ipr_worker_thread - Worker thread
3046 * @work: ioa config struct
3048 * Called at task level from a work thread. This function takes care
3049 * of adding and removing device from the mid-layer as configuration
3050 * changes are detected by the adapter.
3052 * Return value:
3053 * nothing
3055 static void ipr_worker_thread(struct work_struct *work)
3057 unsigned long lock_flags;
3058 struct ipr_resource_entry *res;
3059 struct scsi_device *sdev;
3060 struct ipr_dump *dump;
3061 struct ipr_ioa_cfg *ioa_cfg =
3062 container_of(work, struct ipr_ioa_cfg, work_q);
3063 u8 bus, target, lun;
3064 int did_work;
3066 ENTER;
3067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3069 if (ioa_cfg->sdt_state == GET_DUMP) {
3070 dump = ioa_cfg->dump;
3071 if (!dump) {
3072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3073 return;
3075 kref_get(&dump->kref);
3076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 ipr_get_ioa_dump(ioa_cfg, dump);
3078 kref_put(&dump->kref, ipr_release_dump);
3080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3081 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3082 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084 return;
3087 restart:
3088 do {
3089 did_work = 0;
3090 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092 return;
3095 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3096 if (res->del_from_ml && res->sdev) {
3097 did_work = 1;
3098 sdev = res->sdev;
3099 if (!scsi_device_get(sdev)) {
3100 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3102 scsi_remove_device(sdev);
3103 scsi_device_put(sdev);
3104 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3106 break;
3109 } while(did_work);
3111 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3112 if (res->add_to_ml) {
3113 bus = res->bus;
3114 target = res->target;
3115 lun = res->lun;
3116 res->add_to_ml = 0;
3117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3118 scsi_add_device(ioa_cfg->host, bus, target, lun);
3119 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3120 goto restart;
3124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3125 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3126 LEAVE;
3129 #ifdef CONFIG_SCSI_IPR_TRACE
3131 * ipr_read_trace - Dump the adapter trace
3132 * @filp: open sysfs file
3133 * @kobj: kobject struct
3134 * @bin_attr: bin_attribute struct
3135 * @buf: buffer
3136 * @off: offset
3137 * @count: buffer size
3139 * Return value:
3140 * number of bytes printed to buffer
3142 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3143 struct bin_attribute *bin_attr,
3144 char *buf, loff_t off, size_t count)
3146 struct device *dev = container_of(kobj, struct device, kobj);
3147 struct Scsi_Host *shost = class_to_shost(dev);
3148 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3149 unsigned long lock_flags = 0;
3150 ssize_t ret;
3152 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3153 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3154 IPR_TRACE_SIZE);
3155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157 return ret;
3160 static struct bin_attribute ipr_trace_attr = {
3161 .attr = {
3162 .name = "trace",
3163 .mode = S_IRUGO,
3165 .size = 0,
3166 .read = ipr_read_trace,
3168 #endif
3171 * ipr_show_fw_version - Show the firmware version
3172 * @dev: class device struct
3173 * @buf: buffer
3175 * Return value:
3176 * number of bytes printed to buffer
3178 static ssize_t ipr_show_fw_version(struct device *dev,
3179 struct device_attribute *attr, char *buf)
3181 struct Scsi_Host *shost = class_to_shost(dev);
3182 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3183 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3184 unsigned long lock_flags = 0;
3185 int len;
3187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3188 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3189 ucode_vpd->major_release, ucode_vpd->card_type,
3190 ucode_vpd->minor_release[0],
3191 ucode_vpd->minor_release[1]);
3192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3193 return len;
3196 static struct device_attribute ipr_fw_version_attr = {
3197 .attr = {
3198 .name = "fw_version",
3199 .mode = S_IRUGO,
3201 .show = ipr_show_fw_version,
3205 * ipr_show_log_level - Show the adapter's error logging level
3206 * @dev: class device struct
3207 * @buf: buffer
3209 * Return value:
3210 * number of bytes printed to buffer
3212 static ssize_t ipr_show_log_level(struct device *dev,
3213 struct device_attribute *attr, char *buf)
3215 struct Scsi_Host *shost = class_to_shost(dev);
3216 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3217 unsigned long lock_flags = 0;
3218 int len;
3220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3221 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223 return len;
3227 * ipr_store_log_level - Change the adapter's error logging level
3228 * @dev: class device struct
3229 * @buf: buffer
3231 * Return value:
3232 * number of bytes printed to buffer
3234 static ssize_t ipr_store_log_level(struct device *dev,
3235 struct device_attribute *attr,
3236 const char *buf, size_t count)
3238 struct Scsi_Host *shost = class_to_shost(dev);
3239 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3240 unsigned long lock_flags = 0;
3242 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3243 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3244 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3245 return strlen(buf);
3248 static struct device_attribute ipr_log_level_attr = {
3249 .attr = {
3250 .name = "log_level",
3251 .mode = S_IRUGO | S_IWUSR,
3253 .show = ipr_show_log_level,
3254 .store = ipr_store_log_level
3258 * ipr_store_diagnostics - IOA Diagnostics interface
3259 * @dev: device struct
3260 * @buf: buffer
3261 * @count: buffer size
3263 * This function will reset the adapter and wait a reasonable
3264 * amount of time for any errors that the adapter might log.
3266 * Return value:
3267 * count on success / other on failure
3269 static ssize_t ipr_store_diagnostics(struct device *dev,
3270 struct device_attribute *attr,
3271 const char *buf, size_t count)
3273 struct Scsi_Host *shost = class_to_shost(dev);
3274 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3275 unsigned long lock_flags = 0;
3276 int rc = count;
3278 if (!capable(CAP_SYS_ADMIN))
3279 return -EACCES;
3281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3282 while(ioa_cfg->in_reset_reload) {
3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3285 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3288 ioa_cfg->errors_logged = 0;
3289 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3291 if (ioa_cfg->in_reset_reload) {
3292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3293 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3295 /* Wait for a second for any errors to be logged */
3296 msleep(1000);
3297 } else {
3298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3299 return -EIO;
3302 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3303 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3304 rc = -EIO;
3305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3307 return rc;
3310 static struct device_attribute ipr_diagnostics_attr = {
3311 .attr = {
3312 .name = "run_diagnostics",
3313 .mode = S_IWUSR,
3315 .store = ipr_store_diagnostics
3319 * ipr_show_adapter_state - Show the adapter's state
3320 * @class_dev: device struct
3321 * @buf: buffer
3323 * Return value:
3324 * number of bytes printed to buffer
3326 static ssize_t ipr_show_adapter_state(struct device *dev,
3327 struct device_attribute *attr, char *buf)
3329 struct Scsi_Host *shost = class_to_shost(dev);
3330 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3331 unsigned long lock_flags = 0;
3332 int len;
3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335 if (ioa_cfg->ioa_is_dead)
3336 len = snprintf(buf, PAGE_SIZE, "offline\n");
3337 else
3338 len = snprintf(buf, PAGE_SIZE, "online\n");
3339 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340 return len;
3344 * ipr_store_adapter_state - Change adapter state
3345 * @dev: device struct
3346 * @buf: buffer
3347 * @count: buffer size
3349 * This function will change the adapter's state.
3351 * Return value:
3352 * count on success / other on failure
3354 static ssize_t ipr_store_adapter_state(struct device *dev,
3355 struct device_attribute *attr,
3356 const char *buf, size_t count)
3358 struct Scsi_Host *shost = class_to_shost(dev);
3359 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3360 unsigned long lock_flags;
3361 int result = count;
3363 if (!capable(CAP_SYS_ADMIN))
3364 return -EACCES;
3366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3367 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3368 ioa_cfg->ioa_is_dead = 0;
3369 ioa_cfg->reset_retries = 0;
3370 ioa_cfg->in_ioa_bringdown = 0;
3371 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3376 return result;
3379 static struct device_attribute ipr_ioa_state_attr = {
3380 .attr = {
3381 .name = "online_state",
3382 .mode = S_IRUGO | S_IWUSR,
3384 .show = ipr_show_adapter_state,
3385 .store = ipr_store_adapter_state
3389 * ipr_store_reset_adapter - Reset the adapter
3390 * @dev: device struct
3391 * @buf: buffer
3392 * @count: buffer size
3394 * This function will reset the adapter.
3396 * Return value:
3397 * count on success / other on failure
3399 static ssize_t ipr_store_reset_adapter(struct device *dev,
3400 struct device_attribute *attr,
3401 const char *buf, size_t count)
3403 struct Scsi_Host *shost = class_to_shost(dev);
3404 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405 unsigned long lock_flags;
3406 int result = count;
3408 if (!capable(CAP_SYS_ADMIN))
3409 return -EACCES;
3411 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3412 if (!ioa_cfg->in_reset_reload)
3413 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3415 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3417 return result;
3420 static struct device_attribute ipr_ioa_reset_attr = {
3421 .attr = {
3422 .name = "reset_host",
3423 .mode = S_IWUSR,
3425 .store = ipr_store_reset_adapter
3429 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3430 * @buf_len: buffer length
3432 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3433 * list to use for microcode download
3435 * Return value:
3436 * pointer to sglist / NULL on failure
3438 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3440 int sg_size, order, bsize_elem, num_elem, i, j;
3441 struct ipr_sglist *sglist;
3442 struct scatterlist *scatterlist;
3443 struct page *page;
3445 /* Get the minimum size per scatter/gather element */
3446 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3448 /* Get the actual size per element */
3449 order = get_order(sg_size);
3451 /* Determine the actual number of bytes per element */
3452 bsize_elem = PAGE_SIZE * (1 << order);
3454 /* Determine the actual number of sg entries needed */
3455 if (buf_len % bsize_elem)
3456 num_elem = (buf_len / bsize_elem) + 1;
3457 else
3458 num_elem = buf_len / bsize_elem;
3460 /* Allocate a scatter/gather list for the DMA */
3461 sglist = kzalloc(sizeof(struct ipr_sglist) +
3462 (sizeof(struct scatterlist) * (num_elem - 1)),
3463 GFP_KERNEL);
3465 if (sglist == NULL) {
3466 ipr_trace;
3467 return NULL;
3470 scatterlist = sglist->scatterlist;
3471 sg_init_table(scatterlist, num_elem);
3473 sglist->order = order;
3474 sglist->num_sg = num_elem;
3476 /* Allocate a bunch of sg elements */
3477 for (i = 0; i < num_elem; i++) {
3478 page = alloc_pages(GFP_KERNEL, order);
3479 if (!page) {
3480 ipr_trace;
3482 /* Free up what we already allocated */
3483 for (j = i - 1; j >= 0; j--)
3484 __free_pages(sg_page(&scatterlist[j]), order);
3485 kfree(sglist);
3486 return NULL;
3489 sg_set_page(&scatterlist[i], page, 0, 0);
3492 return sglist;
3496 * ipr_free_ucode_buffer - Frees a microcode download buffer
3497 * @p_dnld: scatter/gather list pointer
3499 * Free a DMA'able ucode download buffer previously allocated with
3500 * ipr_alloc_ucode_buffer
3502 * Return value:
3503 * nothing
3505 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3507 int i;
3509 for (i = 0; i < sglist->num_sg; i++)
3510 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3512 kfree(sglist);
3516 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3517 * @sglist: scatter/gather list pointer
3518 * @buffer: buffer pointer
3519 * @len: buffer length
3521 * Copy a microcode image from a user buffer into a buffer allocated by
3522 * ipr_alloc_ucode_buffer
3524 * Return value:
3525 * 0 on success / other on failure
3527 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3528 u8 *buffer, u32 len)
3530 int bsize_elem, i, result = 0;
3531 struct scatterlist *scatterlist;
3532 void *kaddr;
3534 /* Determine the actual number of bytes per element */
3535 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3537 scatterlist = sglist->scatterlist;
3539 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3540 struct page *page = sg_page(&scatterlist[i]);
3542 kaddr = kmap(page);
3543 memcpy(kaddr, buffer, bsize_elem);
3544 kunmap(page);
3546 scatterlist[i].length = bsize_elem;
3548 if (result != 0) {
3549 ipr_trace;
3550 return result;
3554 if (len % bsize_elem) {
3555 struct page *page = sg_page(&scatterlist[i]);
3557 kaddr = kmap(page);
3558 memcpy(kaddr, buffer, len % bsize_elem);
3559 kunmap(page);
3561 scatterlist[i].length = len % bsize_elem;
3564 sglist->buffer_len = len;
3565 return result;
3569 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3570 * @ipr_cmd: ipr command struct
3571 * @sglist: scatter/gather list
3573 * Builds a microcode download IOA data list (IOADL).
3576 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3577 struct ipr_sglist *sglist)
3579 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3580 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3581 struct scatterlist *scatterlist = sglist->scatterlist;
3582 int i;
3584 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3585 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3586 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3588 ioarcb->ioadl_len =
3589 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3590 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3591 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3592 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3593 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3596 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3600 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3601 * @ipr_cmd: ipr command struct
3602 * @sglist: scatter/gather list
3604 * Builds a microcode download IOA data list (IOADL).
3607 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3608 struct ipr_sglist *sglist)
3610 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3611 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3612 struct scatterlist *scatterlist = sglist->scatterlist;
3613 int i;
3615 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3616 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3617 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3619 ioarcb->ioadl_len =
3620 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3622 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3623 ioadl[i].flags_and_data_len =
3624 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3625 ioadl[i].address =
3626 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3629 ioadl[i-1].flags_and_data_len |=
3630 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3634 * ipr_update_ioa_ucode - Update IOA's microcode
3635 * @ioa_cfg: ioa config struct
3636 * @sglist: scatter/gather list
3638 * Initiate an adapter reset to update the IOA's microcode
3640 * Return value:
3641 * 0 on success / -EIO on failure
3643 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3644 struct ipr_sglist *sglist)
3646 unsigned long lock_flags;
3648 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3649 while(ioa_cfg->in_reset_reload) {
3650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3651 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3652 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3655 if (ioa_cfg->ucode_sglist) {
3656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657 dev_err(&ioa_cfg->pdev->dev,
3658 "Microcode download already in progress\n");
3659 return -EIO;
3662 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3663 sglist->num_sg, DMA_TO_DEVICE);
3665 if (!sglist->num_dma_sg) {
3666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667 dev_err(&ioa_cfg->pdev->dev,
3668 "Failed to map microcode download buffer!\n");
3669 return -EIO;
3672 ioa_cfg->ucode_sglist = sglist;
3673 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3675 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3677 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3678 ioa_cfg->ucode_sglist = NULL;
3679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 return 0;
3684 * ipr_store_update_fw - Update the firmware on the adapter
3685 * @class_dev: device struct
3686 * @buf: buffer
3687 * @count: buffer size
3689 * This function will update the firmware on the adapter.
3691 * Return value:
3692 * count on success / other on failure
3694 static ssize_t ipr_store_update_fw(struct device *dev,
3695 struct device_attribute *attr,
3696 const char *buf, size_t count)
3698 struct Scsi_Host *shost = class_to_shost(dev);
3699 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700 struct ipr_ucode_image_header *image_hdr;
3701 const struct firmware *fw_entry;
3702 struct ipr_sglist *sglist;
3703 char fname[100];
3704 char *src;
3705 int len, result, dnld_size;
3707 if (!capable(CAP_SYS_ADMIN))
3708 return -EACCES;
3710 len = snprintf(fname, 99, "%s", buf);
3711 fname[len-1] = '\0';
3713 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3714 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3715 return -EIO;
3718 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3720 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3721 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3722 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3723 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3724 release_firmware(fw_entry);
3725 return -EINVAL;
3728 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3729 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3730 sglist = ipr_alloc_ucode_buffer(dnld_size);
3732 if (!sglist) {
3733 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3734 release_firmware(fw_entry);
3735 return -ENOMEM;
3738 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3740 if (result) {
3741 dev_err(&ioa_cfg->pdev->dev,
3742 "Microcode buffer copy to DMA buffer failed\n");
3743 goto out;
3746 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3748 if (!result)
3749 result = count;
3750 out:
3751 ipr_free_ucode_buffer(sglist);
3752 release_firmware(fw_entry);
3753 return result;
3756 static struct device_attribute ipr_update_fw_attr = {
3757 .attr = {
3758 .name = "update_fw",
3759 .mode = S_IWUSR,
3761 .store = ipr_store_update_fw
3764 static struct device_attribute *ipr_ioa_attrs[] = {
3765 &ipr_fw_version_attr,
3766 &ipr_log_level_attr,
3767 &ipr_diagnostics_attr,
3768 &ipr_ioa_state_attr,
3769 &ipr_ioa_reset_attr,
3770 &ipr_update_fw_attr,
3771 NULL,
3774 #ifdef CONFIG_SCSI_IPR_DUMP
3776 * ipr_read_dump - Dump the adapter
3777 * @filp: open sysfs file
3778 * @kobj: kobject struct
3779 * @bin_attr: bin_attribute struct
3780 * @buf: buffer
3781 * @off: offset
3782 * @count: buffer size
3784 * Return value:
3785 * number of bytes printed to buffer
3787 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3788 struct bin_attribute *bin_attr,
3789 char *buf, loff_t off, size_t count)
3791 struct device *cdev = container_of(kobj, struct device, kobj);
3792 struct Scsi_Host *shost = class_to_shost(cdev);
3793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3794 struct ipr_dump *dump;
3795 unsigned long lock_flags = 0;
3796 char *src;
3797 int len;
3798 size_t rc = count;
3800 if (!capable(CAP_SYS_ADMIN))
3801 return -EACCES;
3803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3804 dump = ioa_cfg->dump;
3806 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3808 return 0;
3810 kref_get(&dump->kref);
3811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3813 if (off > dump->driver_dump.hdr.len) {
3814 kref_put(&dump->kref, ipr_release_dump);
3815 return 0;
3818 if (off + count > dump->driver_dump.hdr.len) {
3819 count = dump->driver_dump.hdr.len - off;
3820 rc = count;
3823 if (count && off < sizeof(dump->driver_dump)) {
3824 if (off + count > sizeof(dump->driver_dump))
3825 len = sizeof(dump->driver_dump) - off;
3826 else
3827 len = count;
3828 src = (u8 *)&dump->driver_dump + off;
3829 memcpy(buf, src, len);
3830 buf += len;
3831 off += len;
3832 count -= len;
3835 off -= sizeof(dump->driver_dump);
3837 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3838 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3839 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3840 else
3841 len = count;
3842 src = (u8 *)&dump->ioa_dump + off;
3843 memcpy(buf, src, len);
3844 buf += len;
3845 off += len;
3846 count -= len;
3849 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3851 while (count) {
3852 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3853 len = PAGE_ALIGN(off) - off;
3854 else
3855 len = count;
3856 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3857 src += off & ~PAGE_MASK;
3858 memcpy(buf, src, len);
3859 buf += len;
3860 off += len;
3861 count -= len;
3864 kref_put(&dump->kref, ipr_release_dump);
3865 return rc;
3869 * ipr_alloc_dump - Prepare for adapter dump
3870 * @ioa_cfg: ioa config struct
3872 * Return value:
3873 * 0 on success / other on failure
3875 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3877 struct ipr_dump *dump;
3878 unsigned long lock_flags = 0;
3880 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3882 if (!dump) {
3883 ipr_err("Dump memory allocation failed\n");
3884 return -ENOMEM;
3887 kref_init(&dump->kref);
3888 dump->ioa_cfg = ioa_cfg;
3890 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3892 if (INACTIVE != ioa_cfg->sdt_state) {
3893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3894 kfree(dump);
3895 return 0;
3898 ioa_cfg->dump = dump;
3899 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3900 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3901 ioa_cfg->dump_taken = 1;
3902 schedule_work(&ioa_cfg->work_q);
3904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3906 return 0;
3910 * ipr_free_dump - Free adapter dump memory
3911 * @ioa_cfg: ioa config struct
3913 * Return value:
3914 * 0 on success / other on failure
3916 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3918 struct ipr_dump *dump;
3919 unsigned long lock_flags = 0;
3921 ENTER;
3923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3924 dump = ioa_cfg->dump;
3925 if (!dump) {
3926 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3927 return 0;
3930 ioa_cfg->dump = NULL;
3931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3933 kref_put(&dump->kref, ipr_release_dump);
3935 LEAVE;
3936 return 0;
3940 * ipr_write_dump - Setup dump state of adapter
3941 * @filp: open sysfs file
3942 * @kobj: kobject struct
3943 * @bin_attr: bin_attribute struct
3944 * @buf: buffer
3945 * @off: offset
3946 * @count: buffer size
3948 * Return value:
3949 * number of bytes printed to buffer
3951 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
3952 struct bin_attribute *bin_attr,
3953 char *buf, loff_t off, size_t count)
3955 struct device *cdev = container_of(kobj, struct device, kobj);
3956 struct Scsi_Host *shost = class_to_shost(cdev);
3957 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3958 int rc;
3960 if (!capable(CAP_SYS_ADMIN))
3961 return -EACCES;
3963 if (buf[0] == '1')
3964 rc = ipr_alloc_dump(ioa_cfg);
3965 else if (buf[0] == '0')
3966 rc = ipr_free_dump(ioa_cfg);
3967 else
3968 return -EINVAL;
3970 if (rc)
3971 return rc;
3972 else
3973 return count;
3976 static struct bin_attribute ipr_dump_attr = {
3977 .attr = {
3978 .name = "dump",
3979 .mode = S_IRUSR | S_IWUSR,
3981 .size = 0,
3982 .read = ipr_read_dump,
3983 .write = ipr_write_dump
3985 #else
3986 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3987 #endif
3990 * ipr_change_queue_depth - Change the device's queue depth
3991 * @sdev: scsi device struct
3992 * @qdepth: depth to set
3993 * @reason: calling context
3995 * Return value:
3996 * actual depth set
3998 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3999 int reason)
4001 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4002 struct ipr_resource_entry *res;
4003 unsigned long lock_flags = 0;
4005 if (reason != SCSI_QDEPTH_DEFAULT)
4006 return -EOPNOTSUPP;
4008 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4009 res = (struct ipr_resource_entry *)sdev->hostdata;
4011 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4012 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4015 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4016 return sdev->queue_depth;
4020 * ipr_change_queue_type - Change the device's queue type
4021 * @dsev: scsi device struct
4022 * @tag_type: type of tags to use
4024 * Return value:
4025 * actual queue type set
4027 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4029 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4030 struct ipr_resource_entry *res;
4031 unsigned long lock_flags = 0;
4033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4034 res = (struct ipr_resource_entry *)sdev->hostdata;
4036 if (res) {
4037 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4039 * We don't bother quiescing the device here since the
4040 * adapter firmware does it for us.
4042 scsi_set_tag_type(sdev, tag_type);
4044 if (tag_type)
4045 scsi_activate_tcq(sdev, sdev->queue_depth);
4046 else
4047 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4048 } else
4049 tag_type = 0;
4050 } else
4051 tag_type = 0;
4053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054 return tag_type;
4058 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4059 * @dev: device struct
4060 * @buf: buffer
4062 * Return value:
4063 * number of bytes printed to buffer
4065 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4067 struct scsi_device *sdev = to_scsi_device(dev);
4068 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4069 struct ipr_resource_entry *res;
4070 unsigned long lock_flags = 0;
4071 ssize_t len = -ENXIO;
4073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4074 res = (struct ipr_resource_entry *)sdev->hostdata;
4075 if (res)
4076 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4078 return len;
4081 static struct device_attribute ipr_adapter_handle_attr = {
4082 .attr = {
4083 .name = "adapter_handle",
4084 .mode = S_IRUSR,
4086 .show = ipr_show_adapter_handle
4090 * ipr_show_resource_path - Show the resource path or the resource address for
4091 * this device.
4092 * @dev: device struct
4093 * @buf: buffer
4095 * Return value:
4096 * number of bytes printed to buffer
4098 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4100 struct scsi_device *sdev = to_scsi_device(dev);
4101 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4102 struct ipr_resource_entry *res;
4103 unsigned long lock_flags = 0;
4104 ssize_t len = -ENXIO;
4105 char buffer[IPR_MAX_RES_PATH_LENGTH];
4107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4108 res = (struct ipr_resource_entry *)sdev->hostdata;
4109 if (res && ioa_cfg->sis64)
4110 len = snprintf(buf, PAGE_SIZE, "%s\n",
4111 ipr_format_res_path(res->res_path, buffer,
4112 sizeof(buffer)));
4113 else if (res)
4114 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4115 res->bus, res->target, res->lun);
4117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118 return len;
4121 static struct device_attribute ipr_resource_path_attr = {
4122 .attr = {
4123 .name = "resource_path",
4124 .mode = S_IRUSR,
4126 .show = ipr_show_resource_path
4129 static struct device_attribute *ipr_dev_attrs[] = {
4130 &ipr_adapter_handle_attr,
4131 &ipr_resource_path_attr,
4132 NULL,
4136 * ipr_biosparam - Return the HSC mapping
4137 * @sdev: scsi device struct
4138 * @block_device: block device pointer
4139 * @capacity: capacity of the device
4140 * @parm: Array containing returned HSC values.
4142 * This function generates the HSC parms that fdisk uses.
4143 * We want to make sure we return something that places partitions
4144 * on 4k boundaries for best performance with the IOA.
4146 * Return value:
4147 * 0 on success
4149 static int ipr_biosparam(struct scsi_device *sdev,
4150 struct block_device *block_device,
4151 sector_t capacity, int *parm)
4153 int heads, sectors;
4154 sector_t cylinders;
4156 heads = 128;
4157 sectors = 32;
4159 cylinders = capacity;
4160 sector_div(cylinders, (128 * 32));
4162 /* return result */
4163 parm[0] = heads;
4164 parm[1] = sectors;
4165 parm[2] = cylinders;
4167 return 0;
4171 * ipr_find_starget - Find target based on bus/target.
4172 * @starget: scsi target struct
4174 * Return value:
4175 * resource entry pointer if found / NULL if not found
4177 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4179 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4180 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4181 struct ipr_resource_entry *res;
4183 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4184 if ((res->bus == starget->channel) &&
4185 (res->target == starget->id) &&
4186 (res->lun == 0)) {
4187 return res;
4191 return NULL;
4194 static struct ata_port_info sata_port_info;
4197 * ipr_target_alloc - Prepare for commands to a SCSI target
4198 * @starget: scsi target struct
4200 * If the device is a SATA device, this function allocates an
4201 * ATA port with libata, else it does nothing.
4203 * Return value:
4204 * 0 on success / non-0 on failure
4206 static int ipr_target_alloc(struct scsi_target *starget)
4208 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4209 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4210 struct ipr_sata_port *sata_port;
4211 struct ata_port *ap;
4212 struct ipr_resource_entry *res;
4213 unsigned long lock_flags;
4215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216 res = ipr_find_starget(starget);
4217 starget->hostdata = NULL;
4219 if (res && ipr_is_gata(res)) {
4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4221 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4222 if (!sata_port)
4223 return -ENOMEM;
4225 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4226 if (ap) {
4227 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4228 sata_port->ioa_cfg = ioa_cfg;
4229 sata_port->ap = ap;
4230 sata_port->res = res;
4232 res->sata_port = sata_port;
4233 ap->private_data = sata_port;
4234 starget->hostdata = sata_port;
4235 } else {
4236 kfree(sata_port);
4237 return -ENOMEM;
4240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4242 return 0;
4246 * ipr_target_destroy - Destroy a SCSI target
4247 * @starget: scsi target struct
4249 * If the device was a SATA device, this function frees the libata
4250 * ATA port, else it does nothing.
4253 static void ipr_target_destroy(struct scsi_target *starget)
4255 struct ipr_sata_port *sata_port = starget->hostdata;
4256 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4257 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4259 if (ioa_cfg->sis64) {
4260 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4261 clear_bit(starget->id, ioa_cfg->array_ids);
4262 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4263 clear_bit(starget->id, ioa_cfg->vset_ids);
4264 else if (starget->channel == 0)
4265 clear_bit(starget->id, ioa_cfg->target_ids);
4268 if (sata_port) {
4269 starget->hostdata = NULL;
4270 ata_sas_port_destroy(sata_port->ap);
4271 kfree(sata_port);
4276 * ipr_find_sdev - Find device based on bus/target/lun.
4277 * @sdev: scsi device struct
4279 * Return value:
4280 * resource entry pointer if found / NULL if not found
4282 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4284 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4285 struct ipr_resource_entry *res;
4287 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4288 if ((res->bus == sdev->channel) &&
4289 (res->target == sdev->id) &&
4290 (res->lun == sdev->lun))
4291 return res;
4294 return NULL;
4298 * ipr_slave_destroy - Unconfigure a SCSI device
4299 * @sdev: scsi device struct
4301 * Return value:
4302 * nothing
4304 static void ipr_slave_destroy(struct scsi_device *sdev)
4306 struct ipr_resource_entry *res;
4307 struct ipr_ioa_cfg *ioa_cfg;
4308 unsigned long lock_flags = 0;
4310 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4312 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4313 res = (struct ipr_resource_entry *) sdev->hostdata;
4314 if (res) {
4315 if (res->sata_port)
4316 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4317 sdev->hostdata = NULL;
4318 res->sdev = NULL;
4319 res->sata_port = NULL;
4321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4325 * ipr_slave_configure - Configure a SCSI device
4326 * @sdev: scsi device struct
4328 * This function configures the specified scsi device.
4330 * Return value:
4331 * 0 on success
4333 static int ipr_slave_configure(struct scsi_device *sdev)
4335 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4336 struct ipr_resource_entry *res;
4337 struct ata_port *ap = NULL;
4338 unsigned long lock_flags = 0;
4339 char buffer[IPR_MAX_RES_PATH_LENGTH];
4341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4342 res = sdev->hostdata;
4343 if (res) {
4344 if (ipr_is_af_dasd_device(res))
4345 sdev->type = TYPE_RAID;
4346 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4347 sdev->scsi_level = 4;
4348 sdev->no_uld_attach = 1;
4350 if (ipr_is_vset_device(res)) {
4351 blk_queue_rq_timeout(sdev->request_queue,
4352 IPR_VSET_RW_TIMEOUT);
4353 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4355 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
4356 sdev->allow_restart = 1;
4357 if (ipr_is_gata(res) && res->sata_port)
4358 ap = res->sata_port->ap;
4359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4361 if (ap) {
4362 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4363 ata_sas_slave_configure(sdev, ap);
4364 } else
4365 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4366 if (ioa_cfg->sis64)
4367 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4368 ipr_format_res_path(res->res_path, buffer,
4369 sizeof(buffer)));
4370 return 0;
4372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4373 return 0;
4377 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4378 * @sdev: scsi device struct
4380 * This function initializes an ATA port so that future commands
4381 * sent through queuecommand will work.
4383 * Return value:
4384 * 0 on success
4386 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4388 struct ipr_sata_port *sata_port = NULL;
4389 int rc = -ENXIO;
4391 ENTER;
4392 if (sdev->sdev_target)
4393 sata_port = sdev->sdev_target->hostdata;
4394 if (sata_port)
4395 rc = ata_sas_port_init(sata_port->ap);
4396 if (rc)
4397 ipr_slave_destroy(sdev);
4399 LEAVE;
4400 return rc;
4404 * ipr_slave_alloc - Prepare for commands to a device.
4405 * @sdev: scsi device struct
4407 * This function saves a pointer to the resource entry
4408 * in the scsi device struct if the device exists. We
4409 * can then use this pointer in ipr_queuecommand when
4410 * handling new commands.
4412 * Return value:
4413 * 0 on success / -ENXIO if device does not exist
4415 static int ipr_slave_alloc(struct scsi_device *sdev)
4417 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4418 struct ipr_resource_entry *res;
4419 unsigned long lock_flags;
4420 int rc = -ENXIO;
4422 sdev->hostdata = NULL;
4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4426 res = ipr_find_sdev(sdev);
4427 if (res) {
4428 res->sdev = sdev;
4429 res->add_to_ml = 0;
4430 res->in_erp = 0;
4431 sdev->hostdata = res;
4432 if (!ipr_is_naca_model(res))
4433 res->needs_sync_complete = 1;
4434 rc = 0;
4435 if (ipr_is_gata(res)) {
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4437 return ipr_ata_slave_alloc(sdev);
4441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4443 return rc;
4447 * ipr_eh_host_reset - Reset the host adapter
4448 * @scsi_cmd: scsi command struct
4450 * Return value:
4451 * SUCCESS / FAILED
4453 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4455 struct ipr_ioa_cfg *ioa_cfg;
4456 int rc;
4458 ENTER;
4459 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4461 dev_err(&ioa_cfg->pdev->dev,
4462 "Adapter being reset as a result of error recovery.\n");
4464 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4465 ioa_cfg->sdt_state = GET_DUMP;
4467 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4469 LEAVE;
4470 return rc;
4473 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4475 int rc;
4477 spin_lock_irq(cmd->device->host->host_lock);
4478 rc = __ipr_eh_host_reset(cmd);
4479 spin_unlock_irq(cmd->device->host->host_lock);
4481 return rc;
4485 * ipr_device_reset - Reset the device
4486 * @ioa_cfg: ioa config struct
4487 * @res: resource entry struct
4489 * This function issues a device reset to the affected device.
4490 * If the device is a SCSI device, a LUN reset will be sent
4491 * to the device first. If that does not work, a target reset
4492 * will be sent. If the device is a SATA device, a PHY reset will
4493 * be sent.
4495 * Return value:
4496 * 0 on success / non-zero on failure
4498 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4499 struct ipr_resource_entry *res)
4501 struct ipr_cmnd *ipr_cmd;
4502 struct ipr_ioarcb *ioarcb;
4503 struct ipr_cmd_pkt *cmd_pkt;
4504 struct ipr_ioarcb_ata_regs *regs;
4505 u32 ioasc;
4507 ENTER;
4508 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4509 ioarcb = &ipr_cmd->ioarcb;
4510 cmd_pkt = &ioarcb->cmd_pkt;
4512 if (ipr_cmd->ioa_cfg->sis64) {
4513 regs = &ipr_cmd->i.ata_ioadl.regs;
4514 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4515 } else
4516 regs = &ioarcb->u.add_data.u.regs;
4518 ioarcb->res_handle = res->res_handle;
4519 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4520 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4521 if (ipr_is_gata(res)) {
4522 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4523 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4524 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4527 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4528 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4529 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4530 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4531 if (ipr_cmd->ioa_cfg->sis64)
4532 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4533 sizeof(struct ipr_ioasa_gata));
4534 else
4535 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4536 sizeof(struct ipr_ioasa_gata));
4539 LEAVE;
4540 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4544 * ipr_sata_reset - Reset the SATA port
4545 * @link: SATA link to reset
4546 * @classes: class of the attached device
4548 * This function issues a SATA phy reset to the affected ATA link.
4550 * Return value:
4551 * 0 on success / non-zero on failure
4553 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4554 unsigned long deadline)
4556 struct ipr_sata_port *sata_port = link->ap->private_data;
4557 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4558 struct ipr_resource_entry *res;
4559 unsigned long lock_flags = 0;
4560 int rc = -ENXIO;
4562 ENTER;
4563 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4564 while(ioa_cfg->in_reset_reload) {
4565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4566 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4567 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4570 res = sata_port->res;
4571 if (res) {
4572 rc = ipr_device_reset(ioa_cfg, res);
4573 *classes = res->ata_class;
4576 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4577 LEAVE;
4578 return rc;
4582 * ipr_eh_dev_reset - Reset the device
4583 * @scsi_cmd: scsi command struct
4585 * This function issues a device reset to the affected device.
4586 * A LUN reset will be sent to the device first. If that does
4587 * not work, a target reset will be sent.
4589 * Return value:
4590 * SUCCESS / FAILED
4592 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4594 struct ipr_cmnd *ipr_cmd;
4595 struct ipr_ioa_cfg *ioa_cfg;
4596 struct ipr_resource_entry *res;
4597 struct ata_port *ap;
4598 int rc = 0;
4600 ENTER;
4601 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4602 res = scsi_cmd->device->hostdata;
4604 if (!res)
4605 return FAILED;
4608 * If we are currently going through reset/reload, return failed. This will force the
4609 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4610 * reset to complete
4612 if (ioa_cfg->in_reset_reload)
4613 return FAILED;
4614 if (ioa_cfg->ioa_is_dead)
4615 return FAILED;
4617 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4618 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4619 if (ipr_cmd->scsi_cmd)
4620 ipr_cmd->done = ipr_scsi_eh_done;
4621 if (ipr_cmd->qc)
4622 ipr_cmd->done = ipr_sata_eh_done;
4623 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4624 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4625 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4630 res->resetting_device = 1;
4631 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4633 if (ipr_is_gata(res) && res->sata_port) {
4634 ap = res->sata_port->ap;
4635 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4636 ata_std_error_handler(ap);
4637 spin_lock_irq(scsi_cmd->device->host->host_lock);
4639 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4640 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4641 rc = -EIO;
4642 break;
4645 } else
4646 rc = ipr_device_reset(ioa_cfg, res);
4647 res->resetting_device = 0;
4649 LEAVE;
4650 return (rc ? FAILED : SUCCESS);
4653 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4655 int rc;
4657 spin_lock_irq(cmd->device->host->host_lock);
4658 rc = __ipr_eh_dev_reset(cmd);
4659 spin_unlock_irq(cmd->device->host->host_lock);
4661 return rc;
4665 * ipr_bus_reset_done - Op done function for bus reset.
4666 * @ipr_cmd: ipr command struct
4668 * This function is the op done function for a bus reset
4670 * Return value:
4671 * none
4673 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4676 struct ipr_resource_entry *res;
4678 ENTER;
4679 if (!ioa_cfg->sis64)
4680 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4681 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4682 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4683 break;
4688 * If abort has not completed, indicate the reset has, else call the
4689 * abort's done function to wake the sleeping eh thread
4691 if (ipr_cmd->sibling->sibling)
4692 ipr_cmd->sibling->sibling = NULL;
4693 else
4694 ipr_cmd->sibling->done(ipr_cmd->sibling);
4696 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4697 LEAVE;
4701 * ipr_abort_timeout - An abort task has timed out
4702 * @ipr_cmd: ipr command struct
4704 * This function handles when an abort task times out. If this
4705 * happens we issue a bus reset since we have resources tied
4706 * up that must be freed before returning to the midlayer.
4708 * Return value:
4709 * none
4711 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4713 struct ipr_cmnd *reset_cmd;
4714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4715 struct ipr_cmd_pkt *cmd_pkt;
4716 unsigned long lock_flags = 0;
4718 ENTER;
4719 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4720 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722 return;
4725 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4726 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4727 ipr_cmd->sibling = reset_cmd;
4728 reset_cmd->sibling = ipr_cmd;
4729 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4730 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4731 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4732 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4733 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4735 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4737 LEAVE;
4741 * ipr_cancel_op - Cancel specified op
4742 * @scsi_cmd: scsi command struct
4744 * This function cancels specified op.
4746 * Return value:
4747 * SUCCESS / FAILED
4749 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4751 struct ipr_cmnd *ipr_cmd;
4752 struct ipr_ioa_cfg *ioa_cfg;
4753 struct ipr_resource_entry *res;
4754 struct ipr_cmd_pkt *cmd_pkt;
4755 u32 ioasc;
4756 int op_found = 0;
4758 ENTER;
4759 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4760 res = scsi_cmd->device->hostdata;
4762 /* If we are currently going through reset/reload, return failed.
4763 * This will force the mid-layer to call ipr_eh_host_reset,
4764 * which will then go to sleep and wait for the reset to complete
4766 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4767 return FAILED;
4768 if (!res || !ipr_is_gscsi(res))
4769 return FAILED;
4771 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4772 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4773 ipr_cmd->done = ipr_scsi_eh_done;
4774 op_found = 1;
4775 break;
4779 if (!op_found)
4780 return SUCCESS;
4782 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4783 ipr_cmd->ioarcb.res_handle = res->res_handle;
4784 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4785 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4786 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4787 ipr_cmd->u.sdev = scsi_cmd->device;
4789 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4790 scsi_cmd->cmnd[0]);
4791 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4792 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4795 * If the abort task timed out and we sent a bus reset, we will get
4796 * one the following responses to the abort
4798 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4799 ioasc = 0;
4800 ipr_trace;
4803 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4804 if (!ipr_is_naca_model(res))
4805 res->needs_sync_complete = 1;
4807 LEAVE;
4808 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4812 * ipr_eh_abort - Abort a single op
4813 * @scsi_cmd: scsi command struct
4815 * Return value:
4816 * SUCCESS / FAILED
4818 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4820 unsigned long flags;
4821 int rc;
4823 ENTER;
4825 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4826 rc = ipr_cancel_op(scsi_cmd);
4827 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4829 LEAVE;
4830 return rc;
4834 * ipr_handle_other_interrupt - Handle "other" interrupts
4835 * @ioa_cfg: ioa config struct
4837 * Return value:
4838 * IRQ_NONE / IRQ_HANDLED
4840 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
4842 irqreturn_t rc = IRQ_HANDLED;
4843 volatile u32 int_reg, int_mask_reg;
4845 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4848 /* If an interrupt on the adapter did not occur, ignore it.
4849 * Or in the case of SIS 64, check for a stage change interrupt.
4851 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4852 if (ioa_cfg->sis64) {
4853 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4854 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4855 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4857 /* clear stage change */
4858 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4859 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4860 list_del(&ioa_cfg->reset_cmd->queue);
4861 del_timer(&ioa_cfg->reset_cmd->timer);
4862 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4863 return IRQ_HANDLED;
4867 return IRQ_NONE;
4870 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4871 /* Mask the interrupt */
4872 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4874 /* Clear the interrupt */
4875 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4876 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4878 list_del(&ioa_cfg->reset_cmd->queue);
4879 del_timer(&ioa_cfg->reset_cmd->timer);
4880 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4881 } else {
4882 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4883 ioa_cfg->ioa_unit_checked = 1;
4884 else
4885 dev_err(&ioa_cfg->pdev->dev,
4886 "Permanent IOA failure. 0x%08X\n", int_reg);
4888 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4889 ioa_cfg->sdt_state = GET_DUMP;
4891 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4892 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4895 return rc;
4899 * ipr_isr_eh - Interrupt service routine error handler
4900 * @ioa_cfg: ioa config struct
4901 * @msg: message to log
4903 * Return value:
4904 * none
4906 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4908 ioa_cfg->errors_logged++;
4909 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4911 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4912 ioa_cfg->sdt_state = GET_DUMP;
4914 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4918 * ipr_isr - Interrupt service routine
4919 * @irq: irq number
4920 * @devp: pointer to ioa config struct
4922 * Return value:
4923 * IRQ_NONE / IRQ_HANDLED
4925 static irqreturn_t ipr_isr(int irq, void *devp)
4927 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4928 unsigned long lock_flags = 0;
4929 volatile u32 int_reg;
4930 u32 ioasc;
4931 u16 cmd_index;
4932 int num_hrrq = 0;
4933 struct ipr_cmnd *ipr_cmd;
4934 irqreturn_t rc = IRQ_NONE;
4936 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4938 /* If interrupts are disabled, ignore the interrupt */
4939 if (!ioa_cfg->allow_interrupts) {
4940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941 return IRQ_NONE;
4944 while (1) {
4945 ipr_cmd = NULL;
4947 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4948 ioa_cfg->toggle_bit) {
4950 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4951 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4953 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4954 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956 return IRQ_HANDLED;
4959 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4961 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4963 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4965 list_del(&ipr_cmd->queue);
4966 del_timer(&ipr_cmd->timer);
4967 ipr_cmd->done(ipr_cmd);
4969 rc = IRQ_HANDLED;
4971 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4972 ioa_cfg->hrrq_curr++;
4973 } else {
4974 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4975 ioa_cfg->toggle_bit ^= 1u;
4979 if (ipr_cmd != NULL) {
4980 /* Clear the PCI interrupt */
4981 do {
4982 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4983 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
4984 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4985 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4987 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4988 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4989 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4990 return IRQ_HANDLED;
4993 } else
4994 break;
4997 if (unlikely(rc == IRQ_NONE))
4998 rc = ipr_handle_other_interrupt(ioa_cfg);
5000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5001 return rc;
5005 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5006 * @ioa_cfg: ioa config struct
5007 * @ipr_cmd: ipr command struct
5009 * Return value:
5010 * 0 on success / -1 on failure
5012 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5013 struct ipr_cmnd *ipr_cmd)
5015 int i, nseg;
5016 struct scatterlist *sg;
5017 u32 length;
5018 u32 ioadl_flags = 0;
5019 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5020 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5021 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5023 length = scsi_bufflen(scsi_cmd);
5024 if (!length)
5025 return 0;
5027 nseg = scsi_dma_map(scsi_cmd);
5028 if (nseg < 0) {
5029 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5030 return -1;
5033 ipr_cmd->dma_use_sg = nseg;
5035 ioarcb->data_transfer_length = cpu_to_be32(length);
5036 ioarcb->ioadl_len =
5037 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5039 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5040 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5041 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5042 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5043 ioadl_flags = IPR_IOADL_FLAGS_READ;
5045 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5046 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5047 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5048 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5051 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5052 return 0;
5056 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5057 * @ioa_cfg: ioa config struct
5058 * @ipr_cmd: ipr command struct
5060 * Return value:
5061 * 0 on success / -1 on failure
5063 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5064 struct ipr_cmnd *ipr_cmd)
5066 int i, nseg;
5067 struct scatterlist *sg;
5068 u32 length;
5069 u32 ioadl_flags = 0;
5070 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5071 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5072 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5074 length = scsi_bufflen(scsi_cmd);
5075 if (!length)
5076 return 0;
5078 nseg = scsi_dma_map(scsi_cmd);
5079 if (nseg < 0) {
5080 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5081 return -1;
5084 ipr_cmd->dma_use_sg = nseg;
5086 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5087 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5088 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5089 ioarcb->data_transfer_length = cpu_to_be32(length);
5090 ioarcb->ioadl_len =
5091 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5092 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5093 ioadl_flags = IPR_IOADL_FLAGS_READ;
5094 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5095 ioarcb->read_ioadl_len =
5096 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5099 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5100 ioadl = ioarcb->u.add_data.u.ioadl;
5101 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5102 offsetof(struct ipr_ioarcb, u.add_data));
5103 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5106 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5107 ioadl[i].flags_and_data_len =
5108 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5109 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5112 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5113 return 0;
5117 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5118 * @scsi_cmd: scsi command struct
5120 * Return value:
5121 * task attributes
5123 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5125 u8 tag[2];
5126 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5128 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5129 switch (tag[0]) {
5130 case MSG_SIMPLE_TAG:
5131 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5132 break;
5133 case MSG_HEAD_TAG:
5134 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5135 break;
5136 case MSG_ORDERED_TAG:
5137 rc = IPR_FLAGS_LO_ORDERED_TASK;
5138 break;
5142 return rc;
5146 * ipr_erp_done - Process completion of ERP for a device
5147 * @ipr_cmd: ipr command struct
5149 * This function copies the sense buffer into the scsi_cmd
5150 * struct and pushes the scsi_done function.
5152 * Return value:
5153 * nothing
5155 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5157 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5158 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5160 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5162 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5163 scsi_cmd->result |= (DID_ERROR << 16);
5164 scmd_printk(KERN_ERR, scsi_cmd,
5165 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5166 } else {
5167 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5168 SCSI_SENSE_BUFFERSIZE);
5171 if (res) {
5172 if (!ipr_is_naca_model(res))
5173 res->needs_sync_complete = 1;
5174 res->in_erp = 0;
5176 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5177 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5178 scsi_cmd->scsi_done(scsi_cmd);
5182 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5183 * @ipr_cmd: ipr command struct
5185 * Return value:
5186 * none
5188 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5190 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5191 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5192 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5194 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5195 ioarcb->data_transfer_length = 0;
5196 ioarcb->read_data_transfer_length = 0;
5197 ioarcb->ioadl_len = 0;
5198 ioarcb->read_ioadl_len = 0;
5199 ioasa->hdr.ioasc = 0;
5200 ioasa->hdr.residual_data_len = 0;
5202 if (ipr_cmd->ioa_cfg->sis64)
5203 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5204 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5205 else {
5206 ioarcb->write_ioadl_addr =
5207 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5208 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5213 * ipr_erp_request_sense - Send request sense to a device
5214 * @ipr_cmd: ipr command struct
5216 * This function sends a request sense to a device as a result
5217 * of a check condition.
5219 * Return value:
5220 * nothing
5222 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5224 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5225 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5227 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5228 ipr_erp_done(ipr_cmd);
5229 return;
5232 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5234 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5235 cmd_pkt->cdb[0] = REQUEST_SENSE;
5236 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5237 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5238 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5239 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5241 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5242 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5244 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5245 IPR_REQUEST_SENSE_TIMEOUT * 2);
5249 * ipr_erp_cancel_all - Send cancel all to a device
5250 * @ipr_cmd: ipr command struct
5252 * This function sends a cancel all to a device to clear the
5253 * queue. If we are running TCQ on the device, QERR is set to 1,
5254 * which means all outstanding ops have been dropped on the floor.
5255 * Cancel all will return them to us.
5257 * Return value:
5258 * nothing
5260 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5262 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5263 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5264 struct ipr_cmd_pkt *cmd_pkt;
5266 res->in_erp = 1;
5268 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5270 if (!scsi_get_tag_type(scsi_cmd->device)) {
5271 ipr_erp_request_sense(ipr_cmd);
5272 return;
5275 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5276 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5277 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5279 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5280 IPR_CANCEL_ALL_TIMEOUT);
5284 * ipr_dump_ioasa - Dump contents of IOASA
5285 * @ioa_cfg: ioa config struct
5286 * @ipr_cmd: ipr command struct
5287 * @res: resource entry struct
5289 * This function is invoked by the interrupt handler when ops
5290 * fail. It will log the IOASA if appropriate. Only called
5291 * for GPDD ops.
5293 * Return value:
5294 * none
5296 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5297 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5299 int i;
5300 u16 data_len;
5301 u32 ioasc, fd_ioasc;
5302 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5303 __be32 *ioasa_data = (__be32 *)ioasa;
5304 int error_index;
5306 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5307 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5309 if (0 == ioasc)
5310 return;
5312 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5313 return;
5315 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5316 error_index = ipr_get_error(fd_ioasc);
5317 else
5318 error_index = ipr_get_error(ioasc);
5320 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5321 /* Don't log an error if the IOA already logged one */
5322 if (ioasa->hdr.ilid != 0)
5323 return;
5325 if (!ipr_is_gscsi(res))
5326 return;
5328 if (ipr_error_table[error_index].log_ioasa == 0)
5329 return;
5332 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5334 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5335 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5336 data_len = sizeof(struct ipr_ioasa64);
5337 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5338 data_len = sizeof(struct ipr_ioasa);
5340 ipr_err("IOASA Dump:\n");
5342 for (i = 0; i < data_len / 4; i += 4) {
5343 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5344 be32_to_cpu(ioasa_data[i]),
5345 be32_to_cpu(ioasa_data[i+1]),
5346 be32_to_cpu(ioasa_data[i+2]),
5347 be32_to_cpu(ioasa_data[i+3]));
5352 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5353 * @ioasa: IOASA
5354 * @sense_buf: sense data buffer
5356 * Return value:
5357 * none
5359 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5361 u32 failing_lba;
5362 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5363 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5364 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5365 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5367 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5369 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5370 return;
5372 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5374 if (ipr_is_vset_device(res) &&
5375 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5376 ioasa->u.vset.failing_lba_hi != 0) {
5377 sense_buf[0] = 0x72;
5378 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5379 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5380 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5382 sense_buf[7] = 12;
5383 sense_buf[8] = 0;
5384 sense_buf[9] = 0x0A;
5385 sense_buf[10] = 0x80;
5387 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5389 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5390 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5391 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5392 sense_buf[15] = failing_lba & 0x000000ff;
5394 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5396 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5397 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5398 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5399 sense_buf[19] = failing_lba & 0x000000ff;
5400 } else {
5401 sense_buf[0] = 0x70;
5402 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5403 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5404 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5406 /* Illegal request */
5407 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5408 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5409 sense_buf[7] = 10; /* additional length */
5411 /* IOARCB was in error */
5412 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5413 sense_buf[15] = 0xC0;
5414 else /* Parameter data was invalid */
5415 sense_buf[15] = 0x80;
5417 sense_buf[16] =
5418 ((IPR_FIELD_POINTER_MASK &
5419 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5420 sense_buf[17] =
5421 (IPR_FIELD_POINTER_MASK &
5422 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5423 } else {
5424 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5425 if (ipr_is_vset_device(res))
5426 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5427 else
5428 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5430 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5431 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5432 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5433 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5434 sense_buf[6] = failing_lba & 0x000000ff;
5437 sense_buf[7] = 6; /* additional length */
5443 * ipr_get_autosense - Copy autosense data to sense buffer
5444 * @ipr_cmd: ipr command struct
5446 * This function copies the autosense buffer to the buffer
5447 * in the scsi_cmd, if there is autosense available.
5449 * Return value:
5450 * 1 if autosense was available / 0 if not
5452 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5454 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5455 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5457 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5458 return 0;
5460 if (ipr_cmd->ioa_cfg->sis64)
5461 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5462 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5463 SCSI_SENSE_BUFFERSIZE));
5464 else
5465 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5466 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5467 SCSI_SENSE_BUFFERSIZE));
5468 return 1;
5472 * ipr_erp_start - Process an error response for a SCSI op
5473 * @ioa_cfg: ioa config struct
5474 * @ipr_cmd: ipr command struct
5476 * This function determines whether or not to initiate ERP
5477 * on the affected device.
5479 * Return value:
5480 * nothing
5482 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5483 struct ipr_cmnd *ipr_cmd)
5485 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5486 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5487 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5488 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5490 if (!res) {
5491 ipr_scsi_eh_done(ipr_cmd);
5492 return;
5495 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5496 ipr_gen_sense(ipr_cmd);
5498 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5500 switch (masked_ioasc) {
5501 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5502 if (ipr_is_naca_model(res))
5503 scsi_cmd->result |= (DID_ABORT << 16);
5504 else
5505 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5506 break;
5507 case IPR_IOASC_IR_RESOURCE_HANDLE:
5508 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5509 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5510 break;
5511 case IPR_IOASC_HW_SEL_TIMEOUT:
5512 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5513 if (!ipr_is_naca_model(res))
5514 res->needs_sync_complete = 1;
5515 break;
5516 case IPR_IOASC_SYNC_REQUIRED:
5517 if (!res->in_erp)
5518 res->needs_sync_complete = 1;
5519 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5520 break;
5521 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5522 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5523 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5524 break;
5525 case IPR_IOASC_BUS_WAS_RESET:
5526 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5528 * Report the bus reset and ask for a retry. The device
5529 * will give CC/UA the next command.
5531 if (!res->resetting_device)
5532 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5533 scsi_cmd->result |= (DID_ERROR << 16);
5534 if (!ipr_is_naca_model(res))
5535 res->needs_sync_complete = 1;
5536 break;
5537 case IPR_IOASC_HW_DEV_BUS_STATUS:
5538 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5539 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5540 if (!ipr_get_autosense(ipr_cmd)) {
5541 if (!ipr_is_naca_model(res)) {
5542 ipr_erp_cancel_all(ipr_cmd);
5543 return;
5547 if (!ipr_is_naca_model(res))
5548 res->needs_sync_complete = 1;
5549 break;
5550 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5551 break;
5552 default:
5553 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5554 scsi_cmd->result |= (DID_ERROR << 16);
5555 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5556 res->needs_sync_complete = 1;
5557 break;
5560 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5561 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5562 scsi_cmd->scsi_done(scsi_cmd);
5566 * ipr_scsi_done - mid-layer done function
5567 * @ipr_cmd: ipr command struct
5569 * This function is invoked by the interrupt handler for
5570 * ops generated by the SCSI mid-layer
5572 * Return value:
5573 * none
5575 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5577 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5578 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5579 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5581 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5583 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5584 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5585 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5586 scsi_cmd->scsi_done(scsi_cmd);
5587 } else
5588 ipr_erp_start(ioa_cfg, ipr_cmd);
5592 * ipr_queuecommand - Queue a mid-layer request
5593 * @scsi_cmd: scsi command struct
5594 * @done: done function
5596 * This function queues a request generated by the mid-layer.
5598 * Return value:
5599 * 0 on success
5600 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5601 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5603 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5604 void (*done) (struct scsi_cmnd *))
5606 struct ipr_ioa_cfg *ioa_cfg;
5607 struct ipr_resource_entry *res;
5608 struct ipr_ioarcb *ioarcb;
5609 struct ipr_cmnd *ipr_cmd;
5610 int rc = 0;
5612 scsi_cmd->scsi_done = done;
5613 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5614 res = scsi_cmd->device->hostdata;
5615 scsi_cmd->result = (DID_OK << 16);
5618 * We are currently blocking all devices due to a host reset
5619 * We have told the host to stop giving us new requests, but
5620 * ERP ops don't count. FIXME
5622 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5623 return SCSI_MLQUEUE_HOST_BUSY;
5626 * FIXME - Create scsi_set_host_offline interface
5627 * and the ioa_is_dead check can be removed
5629 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5630 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5631 scsi_cmd->result = (DID_NO_CONNECT << 16);
5632 scsi_cmd->scsi_done(scsi_cmd);
5633 return 0;
5636 if (ipr_is_gata(res) && res->sata_port)
5637 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5639 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5640 ioarcb = &ipr_cmd->ioarcb;
5641 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5643 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5644 ipr_cmd->scsi_cmd = scsi_cmd;
5645 ioarcb->res_handle = res->res_handle;
5646 ipr_cmd->done = ipr_scsi_done;
5647 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5649 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5650 if (scsi_cmd->underflow == 0)
5651 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5653 if (res->needs_sync_complete) {
5654 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5655 res->needs_sync_complete = 0;
5658 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5659 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5660 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5661 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5664 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5665 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5666 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5668 if (likely(rc == 0)) {
5669 if (ioa_cfg->sis64)
5670 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5671 else
5672 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5675 if (likely(rc == 0)) {
5676 mb();
5677 ipr_send_command(ipr_cmd);
5678 } else {
5679 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5680 return SCSI_MLQUEUE_HOST_BUSY;
5683 return 0;
5687 * ipr_ioctl - IOCTL handler
5688 * @sdev: scsi device struct
5689 * @cmd: IOCTL cmd
5690 * @arg: IOCTL arg
5692 * Return value:
5693 * 0 on success / other on failure
5695 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5697 struct ipr_resource_entry *res;
5699 res = (struct ipr_resource_entry *)sdev->hostdata;
5700 if (res && ipr_is_gata(res)) {
5701 if (cmd == HDIO_GET_IDENTITY)
5702 return -ENOTTY;
5703 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5706 return -EINVAL;
5710 * ipr_info - Get information about the card/driver
5711 * @scsi_host: scsi host struct
5713 * Return value:
5714 * pointer to buffer with description string
5716 static const char * ipr_ioa_info(struct Scsi_Host *host)
5718 static char buffer[512];
5719 struct ipr_ioa_cfg *ioa_cfg;
5720 unsigned long lock_flags = 0;
5722 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5724 spin_lock_irqsave(host->host_lock, lock_flags);
5725 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5726 spin_unlock_irqrestore(host->host_lock, lock_flags);
5728 return buffer;
5731 static struct scsi_host_template driver_template = {
5732 .module = THIS_MODULE,
5733 .name = "IPR",
5734 .info = ipr_ioa_info,
5735 .ioctl = ipr_ioctl,
5736 .queuecommand = ipr_queuecommand,
5737 .eh_abort_handler = ipr_eh_abort,
5738 .eh_device_reset_handler = ipr_eh_dev_reset,
5739 .eh_host_reset_handler = ipr_eh_host_reset,
5740 .slave_alloc = ipr_slave_alloc,
5741 .slave_configure = ipr_slave_configure,
5742 .slave_destroy = ipr_slave_destroy,
5743 .target_alloc = ipr_target_alloc,
5744 .target_destroy = ipr_target_destroy,
5745 .change_queue_depth = ipr_change_queue_depth,
5746 .change_queue_type = ipr_change_queue_type,
5747 .bios_param = ipr_biosparam,
5748 .can_queue = IPR_MAX_COMMANDS,
5749 .this_id = -1,
5750 .sg_tablesize = IPR_MAX_SGLIST,
5751 .max_sectors = IPR_IOA_MAX_SECTORS,
5752 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5753 .use_clustering = ENABLE_CLUSTERING,
5754 .shost_attrs = ipr_ioa_attrs,
5755 .sdev_attrs = ipr_dev_attrs,
5756 .proc_name = IPR_NAME
5760 * ipr_ata_phy_reset - libata phy_reset handler
5761 * @ap: ata port to reset
5764 static void ipr_ata_phy_reset(struct ata_port *ap)
5766 unsigned long flags;
5767 struct ipr_sata_port *sata_port = ap->private_data;
5768 struct ipr_resource_entry *res = sata_port->res;
5769 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5770 int rc;
5772 ENTER;
5773 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5774 while(ioa_cfg->in_reset_reload) {
5775 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5776 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5777 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5780 if (!ioa_cfg->allow_cmds)
5781 goto out_unlock;
5783 rc = ipr_device_reset(ioa_cfg, res);
5785 if (rc) {
5786 ap->link.device[0].class = ATA_DEV_NONE;
5787 goto out_unlock;
5790 ap->link.device[0].class = res->ata_class;
5791 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5792 ap->link.device[0].class = ATA_DEV_NONE;
5794 out_unlock:
5795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5796 LEAVE;
5800 * ipr_ata_post_internal - Cleanup after an internal command
5801 * @qc: ATA queued command
5803 * Return value:
5804 * none
5806 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5808 struct ipr_sata_port *sata_port = qc->ap->private_data;
5809 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5810 struct ipr_cmnd *ipr_cmd;
5811 unsigned long flags;
5813 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5814 while(ioa_cfg->in_reset_reload) {
5815 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5816 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5817 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5820 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5821 if (ipr_cmd->qc == qc) {
5822 ipr_device_reset(ioa_cfg, sata_port->res);
5823 break;
5826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5830 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5831 * @regs: destination
5832 * @tf: source ATA taskfile
5834 * Return value:
5835 * none
5837 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5838 struct ata_taskfile *tf)
5840 regs->feature = tf->feature;
5841 regs->nsect = tf->nsect;
5842 regs->lbal = tf->lbal;
5843 regs->lbam = tf->lbam;
5844 regs->lbah = tf->lbah;
5845 regs->device = tf->device;
5846 regs->command = tf->command;
5847 regs->hob_feature = tf->hob_feature;
5848 regs->hob_nsect = tf->hob_nsect;
5849 regs->hob_lbal = tf->hob_lbal;
5850 regs->hob_lbam = tf->hob_lbam;
5851 regs->hob_lbah = tf->hob_lbah;
5852 regs->ctl = tf->ctl;
5856 * ipr_sata_done - done function for SATA commands
5857 * @ipr_cmd: ipr command struct
5859 * This function is invoked by the interrupt handler for
5860 * ops generated by the SCSI mid-layer to SATA devices
5862 * Return value:
5863 * none
5865 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5868 struct ata_queued_cmd *qc = ipr_cmd->qc;
5869 struct ipr_sata_port *sata_port = qc->ap->private_data;
5870 struct ipr_resource_entry *res = sata_port->res;
5871 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5873 if (ipr_cmd->ioa_cfg->sis64)
5874 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5875 sizeof(struct ipr_ioasa_gata));
5876 else
5877 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5878 sizeof(struct ipr_ioasa_gata));
5879 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5881 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5882 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5884 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5885 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
5886 else
5887 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
5888 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5889 ata_qc_complete(qc);
5893 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5894 * @ipr_cmd: ipr command struct
5895 * @qc: ATA queued command
5898 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5899 struct ata_queued_cmd *qc)
5901 u32 ioadl_flags = 0;
5902 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5903 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5904 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5905 int len = qc->nbytes;
5906 struct scatterlist *sg;
5907 unsigned int si;
5908 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5910 if (len == 0)
5911 return;
5913 if (qc->dma_dir == DMA_TO_DEVICE) {
5914 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5915 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5916 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5917 ioadl_flags = IPR_IOADL_FLAGS_READ;
5919 ioarcb->data_transfer_length = cpu_to_be32(len);
5920 ioarcb->ioadl_len =
5921 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5922 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5923 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5925 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5926 ioadl64->flags = cpu_to_be32(ioadl_flags);
5927 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5928 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5930 last_ioadl64 = ioadl64;
5931 ioadl64++;
5934 if (likely(last_ioadl64))
5935 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5939 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5940 * @ipr_cmd: ipr command struct
5941 * @qc: ATA queued command
5944 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5945 struct ata_queued_cmd *qc)
5947 u32 ioadl_flags = 0;
5948 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5949 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5950 struct ipr_ioadl_desc *last_ioadl = NULL;
5951 int len = qc->nbytes;
5952 struct scatterlist *sg;
5953 unsigned int si;
5955 if (len == 0)
5956 return;
5958 if (qc->dma_dir == DMA_TO_DEVICE) {
5959 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5960 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5961 ioarcb->data_transfer_length = cpu_to_be32(len);
5962 ioarcb->ioadl_len =
5963 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5964 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5965 ioadl_flags = IPR_IOADL_FLAGS_READ;
5966 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5967 ioarcb->read_ioadl_len =
5968 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5971 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5972 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5973 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5975 last_ioadl = ioadl;
5976 ioadl++;
5979 if (likely(last_ioadl))
5980 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5984 * ipr_qc_issue - Issue a SATA qc to a device
5985 * @qc: queued command
5987 * Return value:
5988 * 0 if success
5990 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5992 struct ata_port *ap = qc->ap;
5993 struct ipr_sata_port *sata_port = ap->private_data;
5994 struct ipr_resource_entry *res = sata_port->res;
5995 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5996 struct ipr_cmnd *ipr_cmd;
5997 struct ipr_ioarcb *ioarcb;
5998 struct ipr_ioarcb_ata_regs *regs;
6000 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6001 return AC_ERR_SYSTEM;
6003 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6004 ioarcb = &ipr_cmd->ioarcb;
6006 if (ioa_cfg->sis64) {
6007 regs = &ipr_cmd->i.ata_ioadl.regs;
6008 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6009 } else
6010 regs = &ioarcb->u.add_data.u.regs;
6012 memset(regs, 0, sizeof(*regs));
6013 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6015 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6016 ipr_cmd->qc = qc;
6017 ipr_cmd->done = ipr_sata_done;
6018 ipr_cmd->ioarcb.res_handle = res->res_handle;
6019 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6020 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6021 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6022 ipr_cmd->dma_use_sg = qc->n_elem;
6024 if (ioa_cfg->sis64)
6025 ipr_build_ata_ioadl64(ipr_cmd, qc);
6026 else
6027 ipr_build_ata_ioadl(ipr_cmd, qc);
6029 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6030 ipr_copy_sata_tf(regs, &qc->tf);
6031 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6032 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6034 switch (qc->tf.protocol) {
6035 case ATA_PROT_NODATA:
6036 case ATA_PROT_PIO:
6037 break;
6039 case ATA_PROT_DMA:
6040 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6041 break;
6043 case ATAPI_PROT_PIO:
6044 case ATAPI_PROT_NODATA:
6045 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6046 break;
6048 case ATAPI_PROT_DMA:
6049 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6050 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6051 break;
6053 default:
6054 WARN_ON(1);
6055 return AC_ERR_INVALID;
6058 mb();
6060 ipr_send_command(ipr_cmd);
6062 return 0;
6066 * ipr_qc_fill_rtf - Read result TF
6067 * @qc: ATA queued command
6069 * Return value:
6070 * true
6072 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6074 struct ipr_sata_port *sata_port = qc->ap->private_data;
6075 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6076 struct ata_taskfile *tf = &qc->result_tf;
6078 tf->feature = g->error;
6079 tf->nsect = g->nsect;
6080 tf->lbal = g->lbal;
6081 tf->lbam = g->lbam;
6082 tf->lbah = g->lbah;
6083 tf->device = g->device;
6084 tf->command = g->status;
6085 tf->hob_nsect = g->hob_nsect;
6086 tf->hob_lbal = g->hob_lbal;
6087 tf->hob_lbam = g->hob_lbam;
6088 tf->hob_lbah = g->hob_lbah;
6089 tf->ctl = g->alt_status;
6091 return true;
6094 static struct ata_port_operations ipr_sata_ops = {
6095 .phy_reset = ipr_ata_phy_reset,
6096 .hardreset = ipr_sata_reset,
6097 .post_internal_cmd = ipr_ata_post_internal,
6098 .qc_prep = ata_noop_qc_prep,
6099 .qc_issue = ipr_qc_issue,
6100 .qc_fill_rtf = ipr_qc_fill_rtf,
6101 .port_start = ata_sas_port_start,
6102 .port_stop = ata_sas_port_stop
6105 static struct ata_port_info sata_port_info = {
6106 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6107 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6108 .pio_mask = 0x10, /* pio4 */
6109 .mwdma_mask = 0x07,
6110 .udma_mask = 0x7f, /* udma0-6 */
6111 .port_ops = &ipr_sata_ops
6114 #ifdef CONFIG_PPC_PSERIES
6115 static const u16 ipr_blocked_processors[] = {
6116 PV_NORTHSTAR,
6117 PV_PULSAR,
6118 PV_POWER4,
6119 PV_ICESTAR,
6120 PV_SSTAR,
6121 PV_POWER4p,
6122 PV_630,
6123 PV_630p
6127 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6128 * @ioa_cfg: ioa cfg struct
6130 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6131 * certain pSeries hardware. This function determines if the given
6132 * adapter is in one of these confgurations or not.
6134 * Return value:
6135 * 1 if adapter is not supported / 0 if adapter is supported
6137 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6139 int i;
6141 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6142 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6143 if (__is_processor(ipr_blocked_processors[i]))
6144 return 1;
6147 return 0;
6149 #else
6150 #define ipr_invalid_adapter(ioa_cfg) 0
6151 #endif
6154 * ipr_ioa_bringdown_done - IOA bring down completion.
6155 * @ipr_cmd: ipr command struct
6157 * This function processes the completion of an adapter bring down.
6158 * It wakes any reset sleepers.
6160 * Return value:
6161 * IPR_RC_JOB_RETURN
6163 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6165 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6167 ENTER;
6168 ioa_cfg->in_reset_reload = 0;
6169 ioa_cfg->reset_retries = 0;
6170 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6171 wake_up_all(&ioa_cfg->reset_wait_q);
6173 spin_unlock_irq(ioa_cfg->host->host_lock);
6174 scsi_unblock_requests(ioa_cfg->host);
6175 spin_lock_irq(ioa_cfg->host->host_lock);
6176 LEAVE;
6178 return IPR_RC_JOB_RETURN;
6182 * ipr_ioa_reset_done - IOA reset completion.
6183 * @ipr_cmd: ipr command struct
6185 * This function processes the completion of an adapter reset.
6186 * It schedules any necessary mid-layer add/removes and
6187 * wakes any reset sleepers.
6189 * Return value:
6190 * IPR_RC_JOB_RETURN
6192 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6195 struct ipr_resource_entry *res;
6196 struct ipr_hostrcb *hostrcb, *temp;
6197 int i = 0;
6199 ENTER;
6200 ioa_cfg->in_reset_reload = 0;
6201 ioa_cfg->allow_cmds = 1;
6202 ioa_cfg->reset_cmd = NULL;
6203 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6205 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6206 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6207 ipr_trace;
6208 break;
6211 schedule_work(&ioa_cfg->work_q);
6213 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6214 list_del(&hostrcb->queue);
6215 if (i++ < IPR_NUM_LOG_HCAMS)
6216 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6217 else
6218 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6221 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6222 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6224 ioa_cfg->reset_retries = 0;
6225 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6226 wake_up_all(&ioa_cfg->reset_wait_q);
6228 spin_unlock(ioa_cfg->host->host_lock);
6229 scsi_unblock_requests(ioa_cfg->host);
6230 spin_lock(ioa_cfg->host->host_lock);
6232 if (!ioa_cfg->allow_cmds)
6233 scsi_block_requests(ioa_cfg->host);
6235 LEAVE;
6236 return IPR_RC_JOB_RETURN;
6240 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6241 * @supported_dev: supported device struct
6242 * @vpids: vendor product id struct
6244 * Return value:
6245 * none
6247 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6248 struct ipr_std_inq_vpids *vpids)
6250 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6251 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6252 supported_dev->num_records = 1;
6253 supported_dev->data_length =
6254 cpu_to_be16(sizeof(struct ipr_supported_device));
6255 supported_dev->reserved = 0;
6259 * ipr_set_supported_devs - Send Set Supported Devices for a device
6260 * @ipr_cmd: ipr command struct
6262 * This function sends a Set Supported Devices to the adapter
6264 * Return value:
6265 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6267 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6270 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6271 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6272 struct ipr_resource_entry *res = ipr_cmd->u.res;
6274 ipr_cmd->job_step = ipr_ioa_reset_done;
6276 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6277 if (!ipr_is_scsi_disk(res))
6278 continue;
6280 ipr_cmd->u.res = res;
6281 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6283 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6284 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6285 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6287 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6288 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6289 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6290 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6292 ipr_init_ioadl(ipr_cmd,
6293 ioa_cfg->vpd_cbs_dma +
6294 offsetof(struct ipr_misc_cbs, supp_dev),
6295 sizeof(struct ipr_supported_device),
6296 IPR_IOADL_FLAGS_WRITE_LAST);
6298 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6299 IPR_SET_SUP_DEVICE_TIMEOUT);
6301 if (!ioa_cfg->sis64)
6302 ipr_cmd->job_step = ipr_set_supported_devs;
6303 return IPR_RC_JOB_RETURN;
6306 return IPR_RC_JOB_CONTINUE;
6310 * ipr_get_mode_page - Locate specified mode page
6311 * @mode_pages: mode page buffer
6312 * @page_code: page code to find
6313 * @len: minimum required length for mode page
6315 * Return value:
6316 * pointer to mode page / NULL on failure
6318 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6319 u32 page_code, u32 len)
6321 struct ipr_mode_page_hdr *mode_hdr;
6322 u32 page_length;
6323 u32 length;
6325 if (!mode_pages || (mode_pages->hdr.length == 0))
6326 return NULL;
6328 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6329 mode_hdr = (struct ipr_mode_page_hdr *)
6330 (mode_pages->data + mode_pages->hdr.block_desc_len);
6332 while (length) {
6333 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6334 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6335 return mode_hdr;
6336 break;
6337 } else {
6338 page_length = (sizeof(struct ipr_mode_page_hdr) +
6339 mode_hdr->page_length);
6340 length -= page_length;
6341 mode_hdr = (struct ipr_mode_page_hdr *)
6342 ((unsigned long)mode_hdr + page_length);
6345 return NULL;
6349 * ipr_check_term_power - Check for term power errors
6350 * @ioa_cfg: ioa config struct
6351 * @mode_pages: IOAFP mode pages buffer
6353 * Check the IOAFP's mode page 28 for term power errors
6355 * Return value:
6356 * nothing
6358 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6359 struct ipr_mode_pages *mode_pages)
6361 int i;
6362 int entry_length;
6363 struct ipr_dev_bus_entry *bus;
6364 struct ipr_mode_page28 *mode_page;
6366 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6367 sizeof(struct ipr_mode_page28));
6369 entry_length = mode_page->entry_length;
6371 bus = mode_page->bus;
6373 for (i = 0; i < mode_page->num_entries; i++) {
6374 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6375 dev_err(&ioa_cfg->pdev->dev,
6376 "Term power is absent on scsi bus %d\n",
6377 bus->res_addr.bus);
6380 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6385 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6386 * @ioa_cfg: ioa config struct
6388 * Looks through the config table checking for SES devices. If
6389 * the SES device is in the SES table indicating a maximum SCSI
6390 * bus speed, the speed is limited for the bus.
6392 * Return value:
6393 * none
6395 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6397 u32 max_xfer_rate;
6398 int i;
6400 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6401 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6402 ioa_cfg->bus_attr[i].bus_width);
6404 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6405 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6410 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6411 * @ioa_cfg: ioa config struct
6412 * @mode_pages: mode page 28 buffer
6414 * Updates mode page 28 based on driver configuration
6416 * Return value:
6417 * none
6419 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6420 struct ipr_mode_pages *mode_pages)
6422 int i, entry_length;
6423 struct ipr_dev_bus_entry *bus;
6424 struct ipr_bus_attributes *bus_attr;
6425 struct ipr_mode_page28 *mode_page;
6427 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6428 sizeof(struct ipr_mode_page28));
6430 entry_length = mode_page->entry_length;
6432 /* Loop for each device bus entry */
6433 for (i = 0, bus = mode_page->bus;
6434 i < mode_page->num_entries;
6435 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6436 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6437 dev_err(&ioa_cfg->pdev->dev,
6438 "Invalid resource address reported: 0x%08X\n",
6439 IPR_GET_PHYS_LOC(bus->res_addr));
6440 continue;
6443 bus_attr = &ioa_cfg->bus_attr[i];
6444 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6445 bus->bus_width = bus_attr->bus_width;
6446 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6447 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6448 if (bus_attr->qas_enabled)
6449 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6450 else
6451 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6456 * ipr_build_mode_select - Build a mode select command
6457 * @ipr_cmd: ipr command struct
6458 * @res_handle: resource handle to send command to
6459 * @parm: Byte 2 of Mode Sense command
6460 * @dma_addr: DMA buffer address
6461 * @xfer_len: data transfer length
6463 * Return value:
6464 * none
6466 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6467 __be32 res_handle, u8 parm,
6468 dma_addr_t dma_addr, u8 xfer_len)
6470 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6472 ioarcb->res_handle = res_handle;
6473 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6474 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6475 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6476 ioarcb->cmd_pkt.cdb[1] = parm;
6477 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6479 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6483 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6484 * @ipr_cmd: ipr command struct
6486 * This function sets up the SCSI bus attributes and sends
6487 * a Mode Select for Page 28 to activate them.
6489 * Return value:
6490 * IPR_RC_JOB_RETURN
6492 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6494 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6495 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6496 int length;
6498 ENTER;
6499 ipr_scsi_bus_speed_limit(ioa_cfg);
6500 ipr_check_term_power(ioa_cfg, mode_pages);
6501 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6502 length = mode_pages->hdr.length + 1;
6503 mode_pages->hdr.length = 0;
6505 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6506 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6507 length);
6509 ipr_cmd->job_step = ipr_set_supported_devs;
6510 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6511 struct ipr_resource_entry, queue);
6512 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6514 LEAVE;
6515 return IPR_RC_JOB_RETURN;
6519 * ipr_build_mode_sense - Builds a mode sense command
6520 * @ipr_cmd: ipr command struct
6521 * @res: resource entry struct
6522 * @parm: Byte 2 of mode sense command
6523 * @dma_addr: DMA address of mode sense buffer
6524 * @xfer_len: Size of DMA buffer
6526 * Return value:
6527 * none
6529 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6530 __be32 res_handle,
6531 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6533 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6535 ioarcb->res_handle = res_handle;
6536 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6537 ioarcb->cmd_pkt.cdb[2] = parm;
6538 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6539 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6541 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6545 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6546 * @ipr_cmd: ipr command struct
6548 * This function handles the failure of an IOA bringup command.
6550 * Return value:
6551 * IPR_RC_JOB_RETURN
6553 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6556 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6558 dev_err(&ioa_cfg->pdev->dev,
6559 "0x%02X failed with IOASC: 0x%08X\n",
6560 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6562 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6563 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6564 return IPR_RC_JOB_RETURN;
6568 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6569 * @ipr_cmd: ipr command struct
6571 * This function handles the failure of a Mode Sense to the IOAFP.
6572 * Some adapters do not handle all mode pages.
6574 * Return value:
6575 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6577 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6580 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6582 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6583 ipr_cmd->job_step = ipr_set_supported_devs;
6584 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6585 struct ipr_resource_entry, queue);
6586 return IPR_RC_JOB_CONTINUE;
6589 return ipr_reset_cmd_failed(ipr_cmd);
6593 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6594 * @ipr_cmd: ipr command struct
6596 * This function send a Page 28 mode sense to the IOA to
6597 * retrieve SCSI bus attributes.
6599 * Return value:
6600 * IPR_RC_JOB_RETURN
6602 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6606 ENTER;
6607 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6608 0x28, ioa_cfg->vpd_cbs_dma +
6609 offsetof(struct ipr_misc_cbs, mode_pages),
6610 sizeof(struct ipr_mode_pages));
6612 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6613 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6615 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6617 LEAVE;
6618 return IPR_RC_JOB_RETURN;
6622 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6623 * @ipr_cmd: ipr command struct
6625 * This function enables dual IOA RAID support if possible.
6627 * Return value:
6628 * IPR_RC_JOB_RETURN
6630 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6633 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6634 struct ipr_mode_page24 *mode_page;
6635 int length;
6637 ENTER;
6638 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6639 sizeof(struct ipr_mode_page24));
6641 if (mode_page)
6642 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6644 length = mode_pages->hdr.length + 1;
6645 mode_pages->hdr.length = 0;
6647 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6648 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6649 length);
6651 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6652 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6654 LEAVE;
6655 return IPR_RC_JOB_RETURN;
6659 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6660 * @ipr_cmd: ipr command struct
6662 * This function handles the failure of a Mode Sense to the IOAFP.
6663 * Some adapters do not handle all mode pages.
6665 * Return value:
6666 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6668 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6670 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6672 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6673 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6674 return IPR_RC_JOB_CONTINUE;
6677 return ipr_reset_cmd_failed(ipr_cmd);
6681 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6682 * @ipr_cmd: ipr command struct
6684 * This function send a mode sense to the IOA to retrieve
6685 * the IOA Advanced Function Control mode page.
6687 * Return value:
6688 * IPR_RC_JOB_RETURN
6690 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6692 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6694 ENTER;
6695 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6696 0x24, ioa_cfg->vpd_cbs_dma +
6697 offsetof(struct ipr_misc_cbs, mode_pages),
6698 sizeof(struct ipr_mode_pages));
6700 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6701 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6703 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6705 LEAVE;
6706 return IPR_RC_JOB_RETURN;
6710 * ipr_init_res_table - Initialize the resource table
6711 * @ipr_cmd: ipr command struct
6713 * This function looks through the existing resource table, comparing
6714 * it with the config table. This function will take care of old/new
6715 * devices and schedule adding/removing them from the mid-layer
6716 * as appropriate.
6718 * Return value:
6719 * IPR_RC_JOB_CONTINUE
6721 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6724 struct ipr_resource_entry *res, *temp;
6725 struct ipr_config_table_entry_wrapper cfgtew;
6726 int entries, found, flag, i;
6727 LIST_HEAD(old_res);
6729 ENTER;
6730 if (ioa_cfg->sis64)
6731 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6732 else
6733 flag = ioa_cfg->u.cfg_table->hdr.flags;
6735 if (flag & IPR_UCODE_DOWNLOAD_REQ)
6736 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6738 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6739 list_move_tail(&res->queue, &old_res);
6741 if (ioa_cfg->sis64)
6742 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6743 else
6744 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6746 for (i = 0; i < entries; i++) {
6747 if (ioa_cfg->sis64)
6748 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6749 else
6750 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6751 found = 0;
6753 list_for_each_entry_safe(res, temp, &old_res, queue) {
6754 if (ipr_is_same_device(res, &cfgtew)) {
6755 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6756 found = 1;
6757 break;
6761 if (!found) {
6762 if (list_empty(&ioa_cfg->free_res_q)) {
6763 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6764 break;
6767 found = 1;
6768 res = list_entry(ioa_cfg->free_res_q.next,
6769 struct ipr_resource_entry, queue);
6770 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6771 ipr_init_res_entry(res, &cfgtew);
6772 res->add_to_ml = 1;
6775 if (found)
6776 ipr_update_res_entry(res, &cfgtew);
6779 list_for_each_entry_safe(res, temp, &old_res, queue) {
6780 if (res->sdev) {
6781 res->del_from_ml = 1;
6782 res->res_handle = IPR_INVALID_RES_HANDLE;
6783 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6787 list_for_each_entry_safe(res, temp, &old_res, queue) {
6788 ipr_clear_res_target(res);
6789 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6792 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6793 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6794 else
6795 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6797 LEAVE;
6798 return IPR_RC_JOB_CONTINUE;
6802 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6803 * @ipr_cmd: ipr command struct
6805 * This function sends a Query IOA Configuration command
6806 * to the adapter to retrieve the IOA configuration table.
6808 * Return value:
6809 * IPR_RC_JOB_RETURN
6811 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6813 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6814 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6815 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6816 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6818 ENTER;
6819 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6820 ioa_cfg->dual_raid = 1;
6821 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6822 ucode_vpd->major_release, ucode_vpd->card_type,
6823 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6824 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6825 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6827 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6828 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6829 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6830 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6832 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6833 IPR_IOADL_FLAGS_READ_LAST);
6835 ipr_cmd->job_step = ipr_init_res_table;
6837 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6839 LEAVE;
6840 return IPR_RC_JOB_RETURN;
6844 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6845 * @ipr_cmd: ipr command struct
6847 * This utility function sends an inquiry to the adapter.
6849 * Return value:
6850 * none
6852 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6853 dma_addr_t dma_addr, u8 xfer_len)
6855 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6857 ENTER;
6858 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6859 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6861 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6862 ioarcb->cmd_pkt.cdb[1] = flags;
6863 ioarcb->cmd_pkt.cdb[2] = page;
6864 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6866 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6868 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6869 LEAVE;
6873 * ipr_inquiry_page_supported - Is the given inquiry page supported
6874 * @page0: inquiry page 0 buffer
6875 * @page: page code.
6877 * This function determines if the specified inquiry page is supported.
6879 * Return value:
6880 * 1 if page is supported / 0 if not
6882 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6884 int i;
6886 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6887 if (page0->page[i] == page)
6888 return 1;
6890 return 0;
6894 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6895 * @ipr_cmd: ipr command struct
6897 * This function sends a Page 0xD0 inquiry to the adapter
6898 * to retrieve adapter capabilities.
6900 * Return value:
6901 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6903 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6906 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6907 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6909 ENTER;
6910 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6911 memset(cap, 0, sizeof(*cap));
6913 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6914 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6915 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6916 sizeof(struct ipr_inquiry_cap));
6917 return IPR_RC_JOB_RETURN;
6920 LEAVE;
6921 return IPR_RC_JOB_CONTINUE;
6925 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6926 * @ipr_cmd: ipr command struct
6928 * This function sends a Page 3 inquiry to the adapter
6929 * to retrieve software VPD information.
6931 * Return value:
6932 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6934 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6938 ENTER;
6940 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6942 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6943 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6944 sizeof(struct ipr_inquiry_page3));
6946 LEAVE;
6947 return IPR_RC_JOB_RETURN;
6951 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6952 * @ipr_cmd: ipr command struct
6954 * This function sends a Page 0 inquiry to the adapter
6955 * to retrieve supported inquiry pages.
6957 * Return value:
6958 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6960 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963 char type[5];
6965 ENTER;
6967 /* Grab the type out of the VPD and store it away */
6968 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6969 type[4] = '\0';
6970 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6972 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6974 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6975 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6976 sizeof(struct ipr_inquiry_page0));
6978 LEAVE;
6979 return IPR_RC_JOB_RETURN;
6983 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6984 * @ipr_cmd: ipr command struct
6986 * This function sends a standard inquiry to the adapter.
6988 * Return value:
6989 * IPR_RC_JOB_RETURN
6991 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6995 ENTER;
6996 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6998 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6999 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7000 sizeof(struct ipr_ioa_vpd));
7002 LEAVE;
7003 return IPR_RC_JOB_RETURN;
7007 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7008 * @ipr_cmd: ipr command struct
7010 * This function send an Identify Host Request Response Queue
7011 * command to establish the HRRQ with the adapter.
7013 * Return value:
7014 * IPR_RC_JOB_RETURN
7016 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7018 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7019 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7021 ENTER;
7022 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7024 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7025 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7027 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7028 if (ioa_cfg->sis64)
7029 ioarcb->cmd_pkt.cdb[1] = 0x1;
7030 ioarcb->cmd_pkt.cdb[2] =
7031 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7032 ioarcb->cmd_pkt.cdb[3] =
7033 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7034 ioarcb->cmd_pkt.cdb[4] =
7035 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7036 ioarcb->cmd_pkt.cdb[5] =
7037 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7038 ioarcb->cmd_pkt.cdb[7] =
7039 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7040 ioarcb->cmd_pkt.cdb[8] =
7041 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7043 if (ioa_cfg->sis64) {
7044 ioarcb->cmd_pkt.cdb[10] =
7045 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7046 ioarcb->cmd_pkt.cdb[11] =
7047 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7048 ioarcb->cmd_pkt.cdb[12] =
7049 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7050 ioarcb->cmd_pkt.cdb[13] =
7051 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7054 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7056 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7058 LEAVE;
7059 return IPR_RC_JOB_RETURN;
7063 * ipr_reset_timer_done - Adapter reset timer function
7064 * @ipr_cmd: ipr command struct
7066 * Description: This function is used in adapter reset processing
7067 * for timing events. If the reset_cmd pointer in the IOA
7068 * config struct is not this adapter's we are doing nested
7069 * resets and fail_all_ops will take care of freeing the
7070 * command block.
7072 * Return value:
7073 * none
7075 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7078 unsigned long lock_flags = 0;
7080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7082 if (ioa_cfg->reset_cmd == ipr_cmd) {
7083 list_del(&ipr_cmd->queue);
7084 ipr_cmd->done(ipr_cmd);
7087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7091 * ipr_reset_start_timer - Start a timer for adapter reset job
7092 * @ipr_cmd: ipr command struct
7093 * @timeout: timeout value
7095 * Description: This function is used in adapter reset processing
7096 * for timing events. If the reset_cmd pointer in the IOA
7097 * config struct is not this adapter's we are doing nested
7098 * resets and fail_all_ops will take care of freeing the
7099 * command block.
7101 * Return value:
7102 * none
7104 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7105 unsigned long timeout)
7107 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7108 ipr_cmd->done = ipr_reset_ioa_job;
7110 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7111 ipr_cmd->timer.expires = jiffies + timeout;
7112 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7113 add_timer(&ipr_cmd->timer);
7117 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7118 * @ioa_cfg: ioa cfg struct
7120 * Return value:
7121 * nothing
7123 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7125 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7127 /* Initialize Host RRQ pointers */
7128 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7129 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7130 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7131 ioa_cfg->toggle_bit = 1;
7133 /* Zero out config table */
7134 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7138 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7139 * @ipr_cmd: ipr command struct
7141 * Return value:
7142 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7144 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7146 unsigned long stage, stage_time;
7147 u32 feedback;
7148 volatile u32 int_reg;
7149 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7150 u64 maskval = 0;
7152 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7153 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7154 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7156 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7158 /* sanity check the stage_time value */
7159 if (stage_time == 0)
7160 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7161 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7162 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7163 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7164 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7166 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7167 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7168 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7169 stage_time = ioa_cfg->transop_timeout;
7170 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7171 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7172 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7173 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7174 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7175 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7176 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7177 return IPR_RC_JOB_CONTINUE;
7180 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7181 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7182 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7183 ipr_cmd->done = ipr_reset_ioa_job;
7184 add_timer(&ipr_cmd->timer);
7185 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7187 return IPR_RC_JOB_RETURN;
7191 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7192 * @ipr_cmd: ipr command struct
7194 * This function reinitializes some control blocks and
7195 * enables destructive diagnostics on the adapter.
7197 * Return value:
7198 * IPR_RC_JOB_RETURN
7200 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7203 volatile u32 int_reg;
7204 volatile u64 maskval;
7206 ENTER;
7207 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7208 ipr_init_ioa_mem(ioa_cfg);
7210 ioa_cfg->allow_interrupts = 1;
7211 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7213 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7214 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7215 ioa_cfg->regs.clr_interrupt_mask_reg32);
7216 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7217 return IPR_RC_JOB_CONTINUE;
7220 /* Enable destructive diagnostics on IOA */
7221 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7223 if (ioa_cfg->sis64) {
7224 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7225 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7226 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7227 } else
7228 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7230 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7232 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7234 if (ioa_cfg->sis64) {
7235 ipr_cmd->job_step = ipr_reset_next_stage;
7236 return IPR_RC_JOB_CONTINUE;
7239 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7240 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7241 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7242 ipr_cmd->done = ipr_reset_ioa_job;
7243 add_timer(&ipr_cmd->timer);
7244 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7246 LEAVE;
7247 return IPR_RC_JOB_RETURN;
7251 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7252 * @ipr_cmd: ipr command struct
7254 * This function is invoked when an adapter dump has run out
7255 * of processing time.
7257 * Return value:
7258 * IPR_RC_JOB_CONTINUE
7260 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7264 if (ioa_cfg->sdt_state == GET_DUMP)
7265 ioa_cfg->sdt_state = ABORT_DUMP;
7267 ipr_cmd->job_step = ipr_reset_alert;
7269 return IPR_RC_JOB_CONTINUE;
7273 * ipr_unit_check_no_data - Log a unit check/no data error log
7274 * @ioa_cfg: ioa config struct
7276 * Logs an error indicating the adapter unit checked, but for some
7277 * reason, we were unable to fetch the unit check buffer.
7279 * Return value:
7280 * nothing
7282 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7284 ioa_cfg->errors_logged++;
7285 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7289 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7290 * @ioa_cfg: ioa config struct
7292 * Fetches the unit check buffer from the adapter by clocking the data
7293 * through the mailbox register.
7295 * Return value:
7296 * nothing
7298 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7300 unsigned long mailbox;
7301 struct ipr_hostrcb *hostrcb;
7302 struct ipr_uc_sdt sdt;
7303 int rc, length;
7304 u32 ioasc;
7306 mailbox = readl(ioa_cfg->ioa_mailbox);
7308 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7309 ipr_unit_check_no_data(ioa_cfg);
7310 return;
7313 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7314 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7315 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7317 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7318 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7319 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7320 ipr_unit_check_no_data(ioa_cfg);
7321 return;
7324 /* Find length of the first sdt entry (UC buffer) */
7325 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7326 length = be32_to_cpu(sdt.entry[0].end_token);
7327 else
7328 length = (be32_to_cpu(sdt.entry[0].end_token) -
7329 be32_to_cpu(sdt.entry[0].start_token)) &
7330 IPR_FMT2_MBX_ADDR_MASK;
7332 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7333 struct ipr_hostrcb, queue);
7334 list_del(&hostrcb->queue);
7335 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7337 rc = ipr_get_ldump_data_section(ioa_cfg,
7338 be32_to_cpu(sdt.entry[0].start_token),
7339 (__be32 *)&hostrcb->hcam,
7340 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7342 if (!rc) {
7343 ipr_handle_log_data(ioa_cfg, hostrcb);
7344 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7345 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7346 ioa_cfg->sdt_state == GET_DUMP)
7347 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7348 } else
7349 ipr_unit_check_no_data(ioa_cfg);
7351 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7355 * ipr_reset_restore_cfg_space - Restore PCI config space.
7356 * @ipr_cmd: ipr command struct
7358 * Description: This function restores the saved PCI config space of
7359 * the adapter, fails all outstanding ops back to the callers, and
7360 * fetches the dump/unit check if applicable to this reset.
7362 * Return value:
7363 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7365 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7368 int rc;
7370 ENTER;
7371 ioa_cfg->pdev->state_saved = true;
7372 rc = pci_restore_state(ioa_cfg->pdev);
7374 if (rc != PCIBIOS_SUCCESSFUL) {
7375 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7376 return IPR_RC_JOB_CONTINUE;
7379 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7380 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7381 return IPR_RC_JOB_CONTINUE;
7384 ipr_fail_all_ops(ioa_cfg);
7386 if (ioa_cfg->ioa_unit_checked) {
7387 ioa_cfg->ioa_unit_checked = 0;
7388 ipr_get_unit_check_buffer(ioa_cfg);
7389 ipr_cmd->job_step = ipr_reset_alert;
7390 ipr_reset_start_timer(ipr_cmd, 0);
7391 return IPR_RC_JOB_RETURN;
7394 if (ioa_cfg->in_ioa_bringdown) {
7395 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7396 } else {
7397 ipr_cmd->job_step = ipr_reset_enable_ioa;
7399 if (GET_DUMP == ioa_cfg->sdt_state) {
7400 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7401 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7402 schedule_work(&ioa_cfg->work_q);
7403 return IPR_RC_JOB_RETURN;
7407 LEAVE;
7408 return IPR_RC_JOB_CONTINUE;
7412 * ipr_reset_bist_done - BIST has completed on the adapter.
7413 * @ipr_cmd: ipr command struct
7415 * Description: Unblock config space and resume the reset process.
7417 * Return value:
7418 * IPR_RC_JOB_CONTINUE
7420 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7422 ENTER;
7423 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7424 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7425 LEAVE;
7426 return IPR_RC_JOB_CONTINUE;
7430 * ipr_reset_start_bist - Run BIST on the adapter.
7431 * @ipr_cmd: ipr command struct
7433 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7435 * Return value:
7436 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7438 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7440 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7441 int rc;
7443 ENTER;
7444 pci_block_user_cfg_access(ioa_cfg->pdev);
7445 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7447 if (rc != PCIBIOS_SUCCESSFUL) {
7448 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7449 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7450 rc = IPR_RC_JOB_CONTINUE;
7451 } else {
7452 ipr_cmd->job_step = ipr_reset_bist_done;
7453 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7454 rc = IPR_RC_JOB_RETURN;
7457 LEAVE;
7458 return rc;
7462 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7463 * @ipr_cmd: ipr command struct
7465 * Description: This clears PCI reset to the adapter and delays two seconds.
7467 * Return value:
7468 * IPR_RC_JOB_RETURN
7470 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7472 ENTER;
7473 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7474 ipr_cmd->job_step = ipr_reset_bist_done;
7475 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7476 LEAVE;
7477 return IPR_RC_JOB_RETURN;
7481 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7482 * @ipr_cmd: ipr command struct
7484 * Description: This asserts PCI reset to the adapter.
7486 * Return value:
7487 * IPR_RC_JOB_RETURN
7489 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7492 struct pci_dev *pdev = ioa_cfg->pdev;
7494 ENTER;
7495 pci_block_user_cfg_access(pdev);
7496 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7497 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7498 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7499 LEAVE;
7500 return IPR_RC_JOB_RETURN;
7504 * ipr_reset_allowed - Query whether or not IOA can be reset
7505 * @ioa_cfg: ioa config struct
7507 * Return value:
7508 * 0 if reset not allowed / non-zero if reset is allowed
7510 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7512 volatile u32 temp_reg;
7514 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7515 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7519 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7520 * @ipr_cmd: ipr command struct
7522 * Description: This function waits for adapter permission to run BIST,
7523 * then runs BIST. If the adapter does not give permission after a
7524 * reasonable time, we will reset the adapter anyway. The impact of
7525 * resetting the adapter without warning the adapter is the risk of
7526 * losing the persistent error log on the adapter. If the adapter is
7527 * reset while it is writing to the flash on the adapter, the flash
7528 * segment will have bad ECC and be zeroed.
7530 * Return value:
7531 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7533 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7535 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7536 int rc = IPR_RC_JOB_RETURN;
7538 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7539 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7540 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7541 } else {
7542 ipr_cmd->job_step = ioa_cfg->reset;
7543 rc = IPR_RC_JOB_CONTINUE;
7546 return rc;
7550 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7551 * @ipr_cmd: ipr command struct
7553 * Description: This function alerts the adapter that it will be reset.
7554 * If memory space is not currently enabled, proceed directly
7555 * to running BIST on the adapter. The timer must always be started
7556 * so we guarantee we do not run BIST from ipr_isr.
7558 * Return value:
7559 * IPR_RC_JOB_RETURN
7561 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7563 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7564 u16 cmd_reg;
7565 int rc;
7567 ENTER;
7568 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7570 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7571 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7572 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7573 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7574 } else {
7575 ipr_cmd->job_step = ioa_cfg->reset;
7578 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7579 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7581 LEAVE;
7582 return IPR_RC_JOB_RETURN;
7586 * ipr_reset_ucode_download_done - Microcode download completion
7587 * @ipr_cmd: ipr command struct
7589 * Description: This function unmaps the microcode download buffer.
7591 * Return value:
7592 * IPR_RC_JOB_CONTINUE
7594 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7596 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7597 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7599 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7600 sglist->num_sg, DMA_TO_DEVICE);
7602 ipr_cmd->job_step = ipr_reset_alert;
7603 return IPR_RC_JOB_CONTINUE;
7607 * ipr_reset_ucode_download - Download microcode to the adapter
7608 * @ipr_cmd: ipr command struct
7610 * Description: This function checks to see if it there is microcode
7611 * to download to the adapter. If there is, a download is performed.
7613 * Return value:
7614 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7616 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7618 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7619 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7621 ENTER;
7622 ipr_cmd->job_step = ipr_reset_alert;
7624 if (!sglist)
7625 return IPR_RC_JOB_CONTINUE;
7627 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7628 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7629 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7630 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7631 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7632 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7633 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7635 if (ioa_cfg->sis64)
7636 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7637 else
7638 ipr_build_ucode_ioadl(ipr_cmd, sglist);
7639 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7641 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7642 IPR_WRITE_BUFFER_TIMEOUT);
7644 LEAVE;
7645 return IPR_RC_JOB_RETURN;
7649 * ipr_reset_shutdown_ioa - Shutdown the adapter
7650 * @ipr_cmd: ipr command struct
7652 * Description: This function issues an adapter shutdown of the
7653 * specified type to the specified adapter as part of the
7654 * adapter reset job.
7656 * Return value:
7657 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7659 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7663 unsigned long timeout;
7664 int rc = IPR_RC_JOB_CONTINUE;
7666 ENTER;
7667 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7668 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7669 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7670 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7671 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7673 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7674 timeout = IPR_SHUTDOWN_TIMEOUT;
7675 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7676 timeout = IPR_INTERNAL_TIMEOUT;
7677 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7678 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7679 else
7680 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7682 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7684 rc = IPR_RC_JOB_RETURN;
7685 ipr_cmd->job_step = ipr_reset_ucode_download;
7686 } else
7687 ipr_cmd->job_step = ipr_reset_alert;
7689 LEAVE;
7690 return rc;
7694 * ipr_reset_ioa_job - Adapter reset job
7695 * @ipr_cmd: ipr command struct
7697 * Description: This function is the job router for the adapter reset job.
7699 * Return value:
7700 * none
7702 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7704 u32 rc, ioasc;
7705 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7707 do {
7708 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7710 if (ioa_cfg->reset_cmd != ipr_cmd) {
7712 * We are doing nested adapter resets and this is
7713 * not the current reset job.
7715 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7716 return;
7719 if (IPR_IOASC_SENSE_KEY(ioasc)) {
7720 rc = ipr_cmd->job_step_failed(ipr_cmd);
7721 if (rc == IPR_RC_JOB_RETURN)
7722 return;
7725 ipr_reinit_ipr_cmnd(ipr_cmd);
7726 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7727 rc = ipr_cmd->job_step(ipr_cmd);
7728 } while(rc == IPR_RC_JOB_CONTINUE);
7732 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7733 * @ioa_cfg: ioa config struct
7734 * @job_step: first job step of reset job
7735 * @shutdown_type: shutdown type
7737 * Description: This function will initiate the reset of the given adapter
7738 * starting at the selected job step.
7739 * If the caller needs to wait on the completion of the reset,
7740 * the caller must sleep on the reset_wait_q.
7742 * Return value:
7743 * none
7745 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7746 int (*job_step) (struct ipr_cmnd *),
7747 enum ipr_shutdown_type shutdown_type)
7749 struct ipr_cmnd *ipr_cmd;
7751 ioa_cfg->in_reset_reload = 1;
7752 ioa_cfg->allow_cmds = 0;
7753 scsi_block_requests(ioa_cfg->host);
7755 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7756 ioa_cfg->reset_cmd = ipr_cmd;
7757 ipr_cmd->job_step = job_step;
7758 ipr_cmd->u.shutdown_type = shutdown_type;
7760 ipr_reset_ioa_job(ipr_cmd);
7764 * ipr_initiate_ioa_reset - Initiate an adapter reset
7765 * @ioa_cfg: ioa config struct
7766 * @shutdown_type: shutdown type
7768 * Description: This function will initiate the reset of the given adapter.
7769 * If the caller needs to wait on the completion of the reset,
7770 * the caller must sleep on the reset_wait_q.
7772 * Return value:
7773 * none
7775 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7776 enum ipr_shutdown_type shutdown_type)
7778 if (ioa_cfg->ioa_is_dead)
7779 return;
7781 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7782 ioa_cfg->sdt_state = ABORT_DUMP;
7784 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7785 dev_err(&ioa_cfg->pdev->dev,
7786 "IOA taken offline - error recovery failed\n");
7788 ioa_cfg->reset_retries = 0;
7789 ioa_cfg->ioa_is_dead = 1;
7791 if (ioa_cfg->in_ioa_bringdown) {
7792 ioa_cfg->reset_cmd = NULL;
7793 ioa_cfg->in_reset_reload = 0;
7794 ipr_fail_all_ops(ioa_cfg);
7795 wake_up_all(&ioa_cfg->reset_wait_q);
7797 spin_unlock_irq(ioa_cfg->host->host_lock);
7798 scsi_unblock_requests(ioa_cfg->host);
7799 spin_lock_irq(ioa_cfg->host->host_lock);
7800 return;
7801 } else {
7802 ioa_cfg->in_ioa_bringdown = 1;
7803 shutdown_type = IPR_SHUTDOWN_NONE;
7807 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7808 shutdown_type);
7812 * ipr_reset_freeze - Hold off all I/O activity
7813 * @ipr_cmd: ipr command struct
7815 * Description: If the PCI slot is frozen, hold off all I/O
7816 * activity; then, as soon as the slot is available again,
7817 * initiate an adapter reset.
7819 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7821 /* Disallow new interrupts, avoid loop */
7822 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7823 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7824 ipr_cmd->done = ipr_reset_ioa_job;
7825 return IPR_RC_JOB_RETURN;
7829 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7830 * @pdev: PCI device struct
7832 * Description: This routine is called to tell us that the PCI bus
7833 * is down. Can't do anything here, except put the device driver
7834 * into a holding pattern, waiting for the PCI bus to come back.
7836 static void ipr_pci_frozen(struct pci_dev *pdev)
7838 unsigned long flags = 0;
7839 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7841 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7842 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7847 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7848 * @pdev: PCI device struct
7850 * Description: This routine is called by the pci error recovery
7851 * code after the PCI slot has been reset, just before we
7852 * should resume normal operations.
7854 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7856 unsigned long flags = 0;
7857 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7859 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7860 if (ioa_cfg->needs_warm_reset)
7861 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7862 else
7863 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7864 IPR_SHUTDOWN_NONE);
7865 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7866 return PCI_ERS_RESULT_RECOVERED;
7870 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7871 * @pdev: PCI device struct
7873 * Description: This routine is called when the PCI bus has
7874 * permanently failed.
7876 static void ipr_pci_perm_failure(struct pci_dev *pdev)
7878 unsigned long flags = 0;
7879 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7881 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7882 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7883 ioa_cfg->sdt_state = ABORT_DUMP;
7884 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7885 ioa_cfg->in_ioa_bringdown = 1;
7886 ioa_cfg->allow_cmds = 0;
7887 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7892 * ipr_pci_error_detected - Called when a PCI error is detected.
7893 * @pdev: PCI device struct
7894 * @state: PCI channel state
7896 * Description: Called when a PCI error is detected.
7898 * Return value:
7899 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7901 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7902 pci_channel_state_t state)
7904 switch (state) {
7905 case pci_channel_io_frozen:
7906 ipr_pci_frozen(pdev);
7907 return PCI_ERS_RESULT_NEED_RESET;
7908 case pci_channel_io_perm_failure:
7909 ipr_pci_perm_failure(pdev);
7910 return PCI_ERS_RESULT_DISCONNECT;
7911 break;
7912 default:
7913 break;
7915 return PCI_ERS_RESULT_NEED_RESET;
7919 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7920 * @ioa_cfg: ioa cfg struct
7922 * Description: This is the second phase of adapter intialization
7923 * This function takes care of initilizing the adapter to the point
7924 * where it can accept new commands.
7926 * Return value:
7927 * 0 on success / -EIO on failure
7929 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7931 int rc = 0;
7932 unsigned long host_lock_flags = 0;
7934 ENTER;
7935 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7936 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7937 if (ioa_cfg->needs_hard_reset) {
7938 ioa_cfg->needs_hard_reset = 0;
7939 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7940 } else
7941 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7942 IPR_SHUTDOWN_NONE);
7944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7945 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7946 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7948 if (ioa_cfg->ioa_is_dead) {
7949 rc = -EIO;
7950 } else if (ipr_invalid_adapter(ioa_cfg)) {
7951 if (!ipr_testmode)
7952 rc = -EIO;
7954 dev_err(&ioa_cfg->pdev->dev,
7955 "Adapter not supported in this hardware configuration.\n");
7958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7960 LEAVE;
7961 return rc;
7965 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7966 * @ioa_cfg: ioa config struct
7968 * Return value:
7969 * none
7971 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7973 int i;
7975 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7976 if (ioa_cfg->ipr_cmnd_list[i])
7977 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7978 ioa_cfg->ipr_cmnd_list[i],
7979 ioa_cfg->ipr_cmnd_list_dma[i]);
7981 ioa_cfg->ipr_cmnd_list[i] = NULL;
7984 if (ioa_cfg->ipr_cmd_pool)
7985 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7987 ioa_cfg->ipr_cmd_pool = NULL;
7991 * ipr_free_mem - Frees memory allocated for an adapter
7992 * @ioa_cfg: ioa cfg struct
7994 * Return value:
7995 * nothing
7997 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7999 int i;
8001 kfree(ioa_cfg->res_entries);
8002 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8003 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8004 ipr_free_cmd_blks(ioa_cfg);
8005 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8006 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8007 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8008 ioa_cfg->u.cfg_table,
8009 ioa_cfg->cfg_table_dma);
8011 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8012 pci_free_consistent(ioa_cfg->pdev,
8013 sizeof(struct ipr_hostrcb),
8014 ioa_cfg->hostrcb[i],
8015 ioa_cfg->hostrcb_dma[i]);
8018 ipr_free_dump(ioa_cfg);
8019 kfree(ioa_cfg->trace);
8023 * ipr_free_all_resources - Free all allocated resources for an adapter.
8024 * @ipr_cmd: ipr command struct
8026 * This function frees all allocated resources for the
8027 * specified adapter.
8029 * Return value:
8030 * none
8032 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8034 struct pci_dev *pdev = ioa_cfg->pdev;
8036 ENTER;
8037 free_irq(pdev->irq, ioa_cfg);
8038 pci_disable_msi(pdev);
8039 iounmap(ioa_cfg->hdw_dma_regs);
8040 pci_release_regions(pdev);
8041 ipr_free_mem(ioa_cfg);
8042 scsi_host_put(ioa_cfg->host);
8043 pci_disable_device(pdev);
8044 LEAVE;
8048 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8049 * @ioa_cfg: ioa config struct
8051 * Return value:
8052 * 0 on success / -ENOMEM on allocation failure
8054 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8056 struct ipr_cmnd *ipr_cmd;
8057 struct ipr_ioarcb *ioarcb;
8058 dma_addr_t dma_addr;
8059 int i;
8061 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8062 sizeof(struct ipr_cmnd), 16, 0);
8064 if (!ioa_cfg->ipr_cmd_pool)
8065 return -ENOMEM;
8067 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8068 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8070 if (!ipr_cmd) {
8071 ipr_free_cmd_blks(ioa_cfg);
8072 return -ENOMEM;
8075 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8076 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8077 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8079 ioarcb = &ipr_cmd->ioarcb;
8080 ipr_cmd->dma_addr = dma_addr;
8081 if (ioa_cfg->sis64)
8082 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8083 else
8084 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8086 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8087 if (ioa_cfg->sis64) {
8088 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8089 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8090 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8091 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8092 } else {
8093 ioarcb->write_ioadl_addr =
8094 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8095 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8096 ioarcb->ioasa_host_pci_addr =
8097 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8099 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8100 ipr_cmd->cmd_index = i;
8101 ipr_cmd->ioa_cfg = ioa_cfg;
8102 ipr_cmd->sense_buffer_dma = dma_addr +
8103 offsetof(struct ipr_cmnd, sense_buffer);
8105 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8108 return 0;
8112 * ipr_alloc_mem - Allocate memory for an adapter
8113 * @ioa_cfg: ioa config struct
8115 * Return value:
8116 * 0 on success / non-zero for error
8118 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8120 struct pci_dev *pdev = ioa_cfg->pdev;
8121 int i, rc = -ENOMEM;
8123 ENTER;
8124 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8125 ioa_cfg->max_devs_supported, GFP_KERNEL);
8127 if (!ioa_cfg->res_entries)
8128 goto out;
8130 if (ioa_cfg->sis64) {
8131 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8132 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8133 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8134 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8135 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8136 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8139 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8140 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8141 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8144 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8145 sizeof(struct ipr_misc_cbs),
8146 &ioa_cfg->vpd_cbs_dma);
8148 if (!ioa_cfg->vpd_cbs)
8149 goto out_free_res_entries;
8151 if (ipr_alloc_cmd_blks(ioa_cfg))
8152 goto out_free_vpd_cbs;
8154 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8155 sizeof(u32) * IPR_NUM_CMD_BLKS,
8156 &ioa_cfg->host_rrq_dma);
8158 if (!ioa_cfg->host_rrq)
8159 goto out_ipr_free_cmd_blocks;
8161 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8162 ioa_cfg->cfg_table_size,
8163 &ioa_cfg->cfg_table_dma);
8165 if (!ioa_cfg->u.cfg_table)
8166 goto out_free_host_rrq;
8168 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8169 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8170 sizeof(struct ipr_hostrcb),
8171 &ioa_cfg->hostrcb_dma[i]);
8173 if (!ioa_cfg->hostrcb[i])
8174 goto out_free_hostrcb_dma;
8176 ioa_cfg->hostrcb[i]->hostrcb_dma =
8177 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8178 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8179 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8182 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8183 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8185 if (!ioa_cfg->trace)
8186 goto out_free_hostrcb_dma;
8188 rc = 0;
8189 out:
8190 LEAVE;
8191 return rc;
8193 out_free_hostrcb_dma:
8194 while (i-- > 0) {
8195 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8196 ioa_cfg->hostrcb[i],
8197 ioa_cfg->hostrcb_dma[i]);
8199 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8200 ioa_cfg->u.cfg_table,
8201 ioa_cfg->cfg_table_dma);
8202 out_free_host_rrq:
8203 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8204 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8205 out_ipr_free_cmd_blocks:
8206 ipr_free_cmd_blks(ioa_cfg);
8207 out_free_vpd_cbs:
8208 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8209 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8210 out_free_res_entries:
8211 kfree(ioa_cfg->res_entries);
8212 goto out;
8216 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8217 * @ioa_cfg: ioa config struct
8219 * Return value:
8220 * none
8222 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8224 int i;
8226 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8227 ioa_cfg->bus_attr[i].bus = i;
8228 ioa_cfg->bus_attr[i].qas_enabled = 0;
8229 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8230 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8231 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8232 else
8233 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8238 * ipr_init_ioa_cfg - Initialize IOA config struct
8239 * @ioa_cfg: ioa config struct
8240 * @host: scsi host struct
8241 * @pdev: PCI dev struct
8243 * Return value:
8244 * none
8246 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8247 struct Scsi_Host *host, struct pci_dev *pdev)
8249 const struct ipr_interrupt_offsets *p;
8250 struct ipr_interrupts *t;
8251 void __iomem *base;
8253 ioa_cfg->host = host;
8254 ioa_cfg->pdev = pdev;
8255 ioa_cfg->log_level = ipr_log_level;
8256 ioa_cfg->doorbell = IPR_DOORBELL;
8257 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8258 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8259 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8260 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8261 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8262 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8263 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8264 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8266 INIT_LIST_HEAD(&ioa_cfg->free_q);
8267 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8268 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8269 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8270 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8271 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8272 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8273 init_waitqueue_head(&ioa_cfg->reset_wait_q);
8274 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8275 ioa_cfg->sdt_state = INACTIVE;
8277 ipr_initialize_bus_attr(ioa_cfg);
8278 ioa_cfg->max_devs_supported = ipr_max_devs;
8280 if (ioa_cfg->sis64) {
8281 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8282 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8283 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8284 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8285 } else {
8286 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8287 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8288 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8289 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8291 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8292 host->unique_id = host->host_no;
8293 host->max_cmd_len = IPR_MAX_CDB_LEN;
8294 pci_set_drvdata(pdev, ioa_cfg);
8296 p = &ioa_cfg->chip_cfg->regs;
8297 t = &ioa_cfg->regs;
8298 base = ioa_cfg->hdw_dma_regs;
8300 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8301 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8302 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8303 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8304 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8305 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8306 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8307 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8308 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8309 t->ioarrin_reg = base + p->ioarrin_reg;
8310 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8311 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8312 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8313 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8314 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8315 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8317 if (ioa_cfg->sis64) {
8318 t->init_feedback_reg = base + p->init_feedback_reg;
8319 t->dump_addr_reg = base + p->dump_addr_reg;
8320 t->dump_data_reg = base + p->dump_data_reg;
8325 * ipr_get_chip_info - Find adapter chip information
8326 * @dev_id: PCI device id struct
8328 * Return value:
8329 * ptr to chip information on success / NULL on failure
8331 static const struct ipr_chip_t * __devinit
8332 ipr_get_chip_info(const struct pci_device_id *dev_id)
8334 int i;
8336 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8337 if (ipr_chip[i].vendor == dev_id->vendor &&
8338 ipr_chip[i].device == dev_id->device)
8339 return &ipr_chip[i];
8340 return NULL;
8344 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8345 * @pdev: PCI device struct
8347 * Description: Simply set the msi_received flag to 1 indicating that
8348 * Message Signaled Interrupts are supported.
8350 * Return value:
8351 * 0 on success / non-zero on failure
8353 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8355 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8356 unsigned long lock_flags = 0;
8357 irqreturn_t rc = IRQ_HANDLED;
8359 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8361 ioa_cfg->msi_received = 1;
8362 wake_up(&ioa_cfg->msi_wait_q);
8364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8365 return rc;
8369 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8370 * @pdev: PCI device struct
8372 * Description: The return value from pci_enable_msi() can not always be
8373 * trusted. This routine sets up and initiates a test interrupt to determine
8374 * if the interrupt is received via the ipr_test_intr() service routine.
8375 * If the tests fails, the driver will fall back to LSI.
8377 * Return value:
8378 * 0 on success / non-zero on failure
8380 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8381 struct pci_dev *pdev)
8383 int rc;
8384 volatile u32 int_reg;
8385 unsigned long lock_flags = 0;
8387 ENTER;
8389 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8390 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8391 ioa_cfg->msi_received = 0;
8392 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8393 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8394 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8397 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8398 if (rc) {
8399 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8400 return rc;
8401 } else if (ipr_debug)
8402 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8404 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8405 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8406 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8407 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8410 if (!ioa_cfg->msi_received) {
8411 /* MSI test failed */
8412 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8413 rc = -EOPNOTSUPP;
8414 } else if (ipr_debug)
8415 dev_info(&pdev->dev, "MSI test succeeded.\n");
8417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8419 free_irq(pdev->irq, ioa_cfg);
8421 LEAVE;
8423 return rc;
8427 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8428 * @pdev: PCI device struct
8429 * @dev_id: PCI device id struct
8431 * Return value:
8432 * 0 on success / non-zero on failure
8434 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8435 const struct pci_device_id *dev_id)
8437 struct ipr_ioa_cfg *ioa_cfg;
8438 struct Scsi_Host *host;
8439 unsigned long ipr_regs_pci;
8440 void __iomem *ipr_regs;
8441 int rc = PCIBIOS_SUCCESSFUL;
8442 volatile u32 mask, uproc, interrupts;
8444 ENTER;
8446 if ((rc = pci_enable_device(pdev))) {
8447 dev_err(&pdev->dev, "Cannot enable adapter\n");
8448 goto out;
8451 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8453 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8455 if (!host) {
8456 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8457 rc = -ENOMEM;
8458 goto out_disable;
8461 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8462 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8463 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8464 sata_port_info.flags, &ipr_sata_ops);
8466 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8468 if (!ioa_cfg->ipr_chip) {
8469 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8470 dev_id->vendor, dev_id->device);
8471 goto out_scsi_host_put;
8474 /* set SIS 32 or SIS 64 */
8475 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8476 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8478 if (ipr_transop_timeout)
8479 ioa_cfg->transop_timeout = ipr_transop_timeout;
8480 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8481 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8482 else
8483 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8485 ioa_cfg->revid = pdev->revision;
8487 ipr_regs_pci = pci_resource_start(pdev, 0);
8489 rc = pci_request_regions(pdev, IPR_NAME);
8490 if (rc < 0) {
8491 dev_err(&pdev->dev,
8492 "Couldn't register memory range of registers\n");
8493 goto out_scsi_host_put;
8496 ipr_regs = pci_ioremap_bar(pdev, 0);
8498 if (!ipr_regs) {
8499 dev_err(&pdev->dev,
8500 "Couldn't map memory range of registers\n");
8501 rc = -ENOMEM;
8502 goto out_release_regions;
8505 ioa_cfg->hdw_dma_regs = ipr_regs;
8506 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8507 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8509 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8511 pci_set_master(pdev);
8513 if (ioa_cfg->sis64) {
8514 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8515 if (rc < 0) {
8516 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8517 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8520 } else
8521 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8523 if (rc < 0) {
8524 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8525 goto cleanup_nomem;
8528 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8529 ioa_cfg->chip_cfg->cache_line_size);
8531 if (rc != PCIBIOS_SUCCESSFUL) {
8532 dev_err(&pdev->dev, "Write of cache line size failed\n");
8533 rc = -EIO;
8534 goto cleanup_nomem;
8537 /* Enable MSI style interrupts if they are supported. */
8538 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8539 rc = ipr_test_msi(ioa_cfg, pdev);
8540 if (rc == -EOPNOTSUPP)
8541 pci_disable_msi(pdev);
8542 else if (rc)
8543 goto out_msi_disable;
8544 else
8545 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8546 } else if (ipr_debug)
8547 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8549 /* Save away PCI config space for use following IOA reset */
8550 rc = pci_save_state(pdev);
8552 if (rc != PCIBIOS_SUCCESSFUL) {
8553 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8554 rc = -EIO;
8555 goto cleanup_nomem;
8558 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8559 goto cleanup_nomem;
8561 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8562 goto cleanup_nomem;
8564 if (ioa_cfg->sis64)
8565 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8566 + ((sizeof(struct ipr_config_table_entry64)
8567 * ioa_cfg->max_devs_supported)));
8568 else
8569 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8570 + ((sizeof(struct ipr_config_table_entry)
8571 * ioa_cfg->max_devs_supported)));
8573 rc = ipr_alloc_mem(ioa_cfg);
8574 if (rc < 0) {
8575 dev_err(&pdev->dev,
8576 "Couldn't allocate enough memory for device driver!\n");
8577 goto cleanup_nomem;
8581 * If HRRQ updated interrupt is not masked, or reset alert is set,
8582 * the card is in an unknown state and needs a hard reset
8584 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8585 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8586 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8587 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8588 ioa_cfg->needs_hard_reset = 1;
8589 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8590 ioa_cfg->needs_hard_reset = 1;
8591 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8592 ioa_cfg->ioa_unit_checked = 1;
8594 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8595 rc = request_irq(pdev->irq, ipr_isr,
8596 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8597 IPR_NAME, ioa_cfg);
8599 if (rc) {
8600 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8601 pdev->irq, rc);
8602 goto cleanup_nolog;
8605 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8606 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8607 ioa_cfg->needs_warm_reset = 1;
8608 ioa_cfg->reset = ipr_reset_slot_reset;
8609 } else
8610 ioa_cfg->reset = ipr_reset_start_bist;
8612 spin_lock(&ipr_driver_lock);
8613 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8614 spin_unlock(&ipr_driver_lock);
8616 LEAVE;
8617 out:
8618 return rc;
8620 cleanup_nolog:
8621 ipr_free_mem(ioa_cfg);
8622 cleanup_nomem:
8623 iounmap(ipr_regs);
8624 out_msi_disable:
8625 pci_disable_msi(pdev);
8626 out_release_regions:
8627 pci_release_regions(pdev);
8628 out_scsi_host_put:
8629 scsi_host_put(host);
8630 out_disable:
8631 pci_disable_device(pdev);
8632 goto out;
8636 * ipr_scan_vsets - Scans for VSET devices
8637 * @ioa_cfg: ioa config struct
8639 * Description: Since the VSET resources do not follow SAM in that we can have
8640 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8642 * Return value:
8643 * none
8645 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8647 int target, lun;
8649 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8650 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8651 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8655 * ipr_initiate_ioa_bringdown - Bring down an adapter
8656 * @ioa_cfg: ioa config struct
8657 * @shutdown_type: shutdown type
8659 * Description: This function will initiate bringing down the adapter.
8660 * This consists of issuing an IOA shutdown to the adapter
8661 * to flush the cache, and running BIST.
8662 * If the caller needs to wait on the completion of the reset,
8663 * the caller must sleep on the reset_wait_q.
8665 * Return value:
8666 * none
8668 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8669 enum ipr_shutdown_type shutdown_type)
8671 ENTER;
8672 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8673 ioa_cfg->sdt_state = ABORT_DUMP;
8674 ioa_cfg->reset_retries = 0;
8675 ioa_cfg->in_ioa_bringdown = 1;
8676 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8677 LEAVE;
8681 * __ipr_remove - Remove a single adapter
8682 * @pdev: pci device struct
8684 * Adapter hot plug remove entry point.
8686 * Return value:
8687 * none
8689 static void __ipr_remove(struct pci_dev *pdev)
8691 unsigned long host_lock_flags = 0;
8692 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8693 ENTER;
8695 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8696 while(ioa_cfg->in_reset_reload) {
8697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8698 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8699 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8702 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8704 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8705 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8706 flush_scheduled_work();
8707 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8709 spin_lock(&ipr_driver_lock);
8710 list_del(&ioa_cfg->queue);
8711 spin_unlock(&ipr_driver_lock);
8713 if (ioa_cfg->sdt_state == ABORT_DUMP)
8714 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8717 ipr_free_all_resources(ioa_cfg);
8719 LEAVE;
8723 * ipr_remove - IOA hot plug remove entry point
8724 * @pdev: pci device struct
8726 * Adapter hot plug remove entry point.
8728 * Return value:
8729 * none
8731 static void __devexit ipr_remove(struct pci_dev *pdev)
8733 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8735 ENTER;
8737 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8738 &ipr_trace_attr);
8739 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8740 &ipr_dump_attr);
8741 scsi_remove_host(ioa_cfg->host);
8743 __ipr_remove(pdev);
8745 LEAVE;
8749 * ipr_probe - Adapter hot plug add entry point
8751 * Return value:
8752 * 0 on success / non-zero on failure
8754 static int __devinit ipr_probe(struct pci_dev *pdev,
8755 const struct pci_device_id *dev_id)
8757 struct ipr_ioa_cfg *ioa_cfg;
8758 int rc;
8760 rc = ipr_probe_ioa(pdev, dev_id);
8762 if (rc)
8763 return rc;
8765 ioa_cfg = pci_get_drvdata(pdev);
8766 rc = ipr_probe_ioa_part2(ioa_cfg);
8768 if (rc) {
8769 __ipr_remove(pdev);
8770 return rc;
8773 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8775 if (rc) {
8776 __ipr_remove(pdev);
8777 return rc;
8780 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8781 &ipr_trace_attr);
8783 if (rc) {
8784 scsi_remove_host(ioa_cfg->host);
8785 __ipr_remove(pdev);
8786 return rc;
8789 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8790 &ipr_dump_attr);
8792 if (rc) {
8793 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8794 &ipr_trace_attr);
8795 scsi_remove_host(ioa_cfg->host);
8796 __ipr_remove(pdev);
8797 return rc;
8800 scsi_scan_host(ioa_cfg->host);
8801 ipr_scan_vsets(ioa_cfg);
8802 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8803 ioa_cfg->allow_ml_add_del = 1;
8804 ioa_cfg->host->max_channel = IPR_VSET_BUS;
8805 schedule_work(&ioa_cfg->work_q);
8806 return 0;
8810 * ipr_shutdown - Shutdown handler.
8811 * @pdev: pci device struct
8813 * This function is invoked upon system shutdown/reboot. It will issue
8814 * an adapter shutdown to the adapter to flush the write cache.
8816 * Return value:
8817 * none
8819 static void ipr_shutdown(struct pci_dev *pdev)
8821 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8822 unsigned long lock_flags = 0;
8824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8825 while(ioa_cfg->in_reset_reload) {
8826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8827 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8828 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8831 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8833 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8836 static struct pci_device_id ipr_pci_table[] __devinitdata = {
8837 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8838 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8839 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8841 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8843 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8844 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8851 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8852 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8853 IPR_USE_LONG_TRANSOP_TIMEOUT },
8854 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8855 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8856 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8857 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8858 IPR_USE_LONG_TRANSOP_TIMEOUT },
8859 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8860 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8861 IPR_USE_LONG_TRANSOP_TIMEOUT },
8862 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8863 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8864 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8865 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8866 IPR_USE_LONG_TRANSOP_TIMEOUT},
8867 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8868 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8869 IPR_USE_LONG_TRANSOP_TIMEOUT },
8870 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8871 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8872 IPR_USE_LONG_TRANSOP_TIMEOUT },
8873 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8874 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8875 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8876 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8877 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8878 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8879 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8880 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8881 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8882 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8883 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8884 IPR_USE_LONG_TRANSOP_TIMEOUT },
8885 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8886 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8887 IPR_USE_LONG_TRANSOP_TIMEOUT },
8888 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8889 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8890 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8891 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8892 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8893 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8894 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8898 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8899 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8900 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8901 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8902 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8903 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
8906 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8908 static struct pci_error_handlers ipr_err_handler = {
8909 .error_detected = ipr_pci_error_detected,
8910 .slot_reset = ipr_pci_slot_reset,
8913 static struct pci_driver ipr_driver = {
8914 .name = IPR_NAME,
8915 .id_table = ipr_pci_table,
8916 .probe = ipr_probe,
8917 .remove = __devexit_p(ipr_remove),
8918 .shutdown = ipr_shutdown,
8919 .err_handler = &ipr_err_handler,
8923 * ipr_halt_done - Shutdown prepare completion
8925 * Return value:
8926 * none
8928 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8930 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8932 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8936 * ipr_halt - Issue shutdown prepare to all adapters
8938 * Return value:
8939 * NOTIFY_OK on success / NOTIFY_DONE on failure
8941 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8943 struct ipr_cmnd *ipr_cmd;
8944 struct ipr_ioa_cfg *ioa_cfg;
8945 unsigned long flags = 0;
8947 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8948 return NOTIFY_DONE;
8950 spin_lock(&ipr_driver_lock);
8952 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8953 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8954 if (!ioa_cfg->allow_cmds) {
8955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8956 continue;
8959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8960 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8961 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8962 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8963 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8965 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8968 spin_unlock(&ipr_driver_lock);
8970 return NOTIFY_OK;
8973 static struct notifier_block ipr_notifier = {
8974 ipr_halt, NULL, 0
8978 * ipr_init - Module entry point
8980 * Return value:
8981 * 0 on success / negative value on failure
8983 static int __init ipr_init(void)
8985 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8986 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8988 register_reboot_notifier(&ipr_notifier);
8989 return pci_register_driver(&ipr_driver);
8993 * ipr_exit - Module unload
8995 * Module unload entry point.
8997 * Return value:
8998 * none
9000 static void __exit ipr_exit(void)
9002 unregister_reboot_notifier(&ipr_notifier);
9003 pci_unregister_driver(&ipr_driver);
9006 module_init(ipr_init);
9007 module_exit(ipr_exit);