[ARM] Add thread_notify infrastructure
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / ipr.c
blob96b65b307dd05e1854fc8f533ffab181eea9f2a2
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Notes:
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include "ipr.h"
85 * Global Data
87 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89 static unsigned int ipr_max_speed = 1;
90 static int ipr_testmode = 0;
91 static unsigned int ipr_fastfail = 0;
92 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
93 static unsigned int ipr_enable_cache = 1;
94 static unsigned int ipr_debug = 0;
95 static int ipr_auto_create = 1;
96 static DEFINE_SPINLOCK(ipr_driver_lock);
98 /* This table describes the differences between DMA controller chips */
99 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100 { /* Gemstone, Citrine, and Obsidian */
101 .mailbox = 0x0042C,
102 .cache_line_size = 0x20,
104 .set_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_mask_reg = 0x00230,
106 .sense_interrupt_mask_reg = 0x0022C,
107 .clr_interrupt_reg = 0x00228,
108 .sense_interrupt_reg = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .set_uproc_interrupt_reg = 0x00214,
112 .clr_uproc_interrupt_reg = 0x00218
115 { /* Snipe and Scamp */
116 .mailbox = 0x0052C,
117 .cache_line_size = 0x20,
119 .set_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_mask_reg = 0x0028C,
121 .sense_interrupt_mask_reg = 0x00288,
122 .clr_interrupt_reg = 0x00284,
123 .sense_interrupt_reg = 0x00280,
124 .ioarrin_reg = 0x00504,
125 .sense_uproc_interrupt_reg = 0x00290,
126 .set_uproc_interrupt_reg = 0x00290,
127 .clr_uproc_interrupt_reg = 0x00294
132 static const struct ipr_chip_t ipr_chip[] = {
133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
141 static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147 module_param_named(max_speed, ipr_max_speed, uint, 0);
148 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149 module_param_named(log_level, ipr_log_level, uint, 0);
150 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151 module_param_named(testmode, ipr_testmode, int, 0);
152 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153 module_param_named(fastfail, ipr_fastfail, int, 0);
154 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
157 module_param_named(enable_cache, ipr_enable_cache, int, 0);
158 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159 module_param_named(debug, ipr_debug, int, 0);
160 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
161 module_param_named(auto_create, ipr_auto_create, int, 0);
162 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
163 MODULE_LICENSE("GPL");
164 MODULE_VERSION(IPR_DRIVER_VERSION);
166 /* A constant array of IOASCs/URCs/Error Messages */
167 static const
168 struct ipr_error_table_t ipr_error_table[] = {
169 {0x00000000, 1, 1,
170 "8155: An unknown error was received"},
171 {0x00330000, 0, 0,
172 "Soft underlength error"},
173 {0x005A0000, 0, 0,
174 "Command to be cancelled not found"},
175 {0x00808000, 0, 0,
176 "Qualified success"},
177 {0x01080000, 1, 1,
178 "FFFE: Soft device bus error recovered by the IOA"},
179 {0x01170600, 0, 1,
180 "FFF9: Device sector reassign successful"},
181 {0x01170900, 0, 1,
182 "FFF7: Media error recovered by device rewrite procedures"},
183 {0x01180200, 0, 1,
184 "7001: IOA sector reassignment successful"},
185 {0x01180500, 0, 1,
186 "FFF9: Soft media error. Sector reassignment recommended"},
187 {0x01180600, 0, 1,
188 "FFF7: Media error recovered by IOA rewrite procedures"},
189 {0x01418000, 0, 1,
190 "FF3D: Soft PCI bus error recovered by the IOA"},
191 {0x01440000, 1, 1,
192 "FFF6: Device hardware error recovered by the IOA"},
193 {0x01448100, 0, 1,
194 "FFF6: Device hardware error recovered by the device"},
195 {0x01448200, 1, 1,
196 "FF3D: Soft IOA error recovered by the IOA"},
197 {0x01448300, 0, 1,
198 "FFFA: Undefined device response recovered by the IOA"},
199 {0x014A0000, 1, 1,
200 "FFF6: Device bus error, message or command phase"},
201 {0x015D0000, 0, 1,
202 "FFF6: Failure prediction threshold exceeded"},
203 {0x015D9200, 0, 1,
204 "8009: Impending cache battery pack failure"},
205 {0x02040400, 0, 0,
206 "34FF: Disk device format in progress"},
207 {0x023F0000, 0, 0,
208 "Synchronization required"},
209 {0x024E0000, 0, 0,
210 "No ready, IOA shutdown"},
211 {0x025A0000, 0, 0,
212 "Not ready, IOA has been shutdown"},
213 {0x02670100, 0, 1,
214 "3020: Storage subsystem configuration error"},
215 {0x03110B00, 0, 0,
216 "FFF5: Medium error, data unreadable, recommend reassign"},
217 {0x03110C00, 0, 0,
218 "7000: Medium error, data unreadable, do not reassign"},
219 {0x03310000, 0, 1,
220 "FFF3: Disk media format bad"},
221 {0x04050000, 0, 1,
222 "3002: Addressed device failed to respond to selection"},
223 {0x04080000, 1, 1,
224 "3100: Device bus error"},
225 {0x04080100, 0, 1,
226 "3109: IOA timed out a device command"},
227 {0x04088000, 0, 0,
228 "3120: SCSI bus is not operational"},
229 {0x04118000, 0, 1,
230 "9000: IOA reserved area data check"},
231 {0x04118100, 0, 1,
232 "9001: IOA reserved area invalid data pattern"},
233 {0x04118200, 0, 1,
234 "9002: IOA reserved area LRC error"},
235 {0x04320000, 0, 1,
236 "102E: Out of alternate sectors for disk storage"},
237 {0x04330000, 1, 1,
238 "FFF4: Data transfer underlength error"},
239 {0x04338000, 1, 1,
240 "FFF4: Data transfer overlength error"},
241 {0x043E0100, 0, 1,
242 "3400: Logical unit failure"},
243 {0x04408500, 0, 1,
244 "FFF4: Device microcode is corrupt"},
245 {0x04418000, 1, 1,
246 "8150: PCI bus error"},
247 {0x04430000, 1, 0,
248 "Unsupported device bus message received"},
249 {0x04440000, 1, 1,
250 "FFF4: Disk device problem"},
251 {0x04448200, 1, 1,
252 "8150: Permanent IOA failure"},
253 {0x04448300, 0, 1,
254 "3010: Disk device returned wrong response to IOA"},
255 {0x04448400, 0, 1,
256 "8151: IOA microcode error"},
257 {0x04448500, 0, 0,
258 "Device bus status error"},
259 {0x04448600, 0, 1,
260 "8157: IOA error requiring IOA reset to recover"},
261 {0x04490000, 0, 0,
262 "Message reject received from the device"},
263 {0x04449200, 0, 1,
264 "8008: A permanent cache battery pack failure occurred"},
265 {0x0444A000, 0, 1,
266 "9090: Disk unit has been modified after the last known status"},
267 {0x0444A200, 0, 1,
268 "9081: IOA detected device error"},
269 {0x0444A300, 0, 1,
270 "9082: IOA detected device error"},
271 {0x044A0000, 1, 1,
272 "3110: Device bus error, message or command phase"},
273 {0x04670400, 0, 1,
274 "9091: Incorrect hardware configuration change has been detected"},
275 {0x04678000, 0, 1,
276 "9073: Invalid multi-adapter configuration"},
277 {0x046E0000, 0, 1,
278 "FFF4: Command to logical unit failed"},
279 {0x05240000, 1, 0,
280 "Illegal request, invalid request type or request packet"},
281 {0x05250000, 0, 0,
282 "Illegal request, invalid resource handle"},
283 {0x05258000, 0, 0,
284 "Illegal request, commands not allowed to this device"},
285 {0x05258100, 0, 0,
286 "Illegal request, command not allowed to a secondary adapter"},
287 {0x05260000, 0, 0,
288 "Illegal request, invalid field in parameter list"},
289 {0x05260100, 0, 0,
290 "Illegal request, parameter not supported"},
291 {0x05260200, 0, 0,
292 "Illegal request, parameter value invalid"},
293 {0x052C0000, 0, 0,
294 "Illegal request, command sequence error"},
295 {0x052C8000, 1, 0,
296 "Illegal request, dual adapter support not enabled"},
297 {0x06040500, 0, 1,
298 "9031: Array protection temporarily suspended, protection resuming"},
299 {0x06040600, 0, 1,
300 "9040: Array protection temporarily suspended, protection resuming"},
301 {0x06290000, 0, 1,
302 "FFFB: SCSI bus was reset"},
303 {0x06290500, 0, 0,
304 "FFFE: SCSI bus transition to single ended"},
305 {0x06290600, 0, 0,
306 "FFFE: SCSI bus transition to LVD"},
307 {0x06298000, 0, 1,
308 "FFFB: SCSI bus was reset by another initiator"},
309 {0x063F0300, 0, 1,
310 "3029: A device replacement has occurred"},
311 {0x064C8000, 0, 1,
312 "9051: IOA cache data exists for a missing or failed device"},
313 {0x064C8100, 0, 1,
314 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
315 {0x06670100, 0, 1,
316 "9025: Disk unit is not supported at its physical location"},
317 {0x06670600, 0, 1,
318 "3020: IOA detected a SCSI bus configuration error"},
319 {0x06678000, 0, 1,
320 "3150: SCSI bus configuration error"},
321 {0x06678100, 0, 1,
322 "9074: Asymmetric advanced function disk configuration"},
323 {0x06690200, 0, 1,
324 "9041: Array protection temporarily suspended"},
325 {0x06698200, 0, 1,
326 "9042: Corrupt array parity detected on specified device"},
327 {0x066B0200, 0, 1,
328 "9030: Array no longer protected due to missing or failed disk unit"},
329 {0x066B8000, 0, 1,
330 "9071: Link operational transition"},
331 {0x066B8100, 0, 1,
332 "9072: Link not operational transition"},
333 {0x066B8200, 0, 1,
334 "9032: Array exposed but still protected"},
335 {0x07270000, 0, 0,
336 "Failure due to other device"},
337 {0x07278000, 0, 1,
338 "9008: IOA does not support functions expected by devices"},
339 {0x07278100, 0, 1,
340 "9010: Cache data associated with attached devices cannot be found"},
341 {0x07278200, 0, 1,
342 "9011: Cache data belongs to devices other than those attached"},
343 {0x07278400, 0, 1,
344 "9020: Array missing 2 or more devices with only 1 device present"},
345 {0x07278500, 0, 1,
346 "9021: Array missing 2 or more devices with 2 or more devices present"},
347 {0x07278600, 0, 1,
348 "9022: Exposed array is missing a required device"},
349 {0x07278700, 0, 1,
350 "9023: Array member(s) not at required physical locations"},
351 {0x07278800, 0, 1,
352 "9024: Array not functional due to present hardware configuration"},
353 {0x07278900, 0, 1,
354 "9026: Array not functional due to present hardware configuration"},
355 {0x07278A00, 0, 1,
356 "9027: Array is missing a device and parity is out of sync"},
357 {0x07278B00, 0, 1,
358 "9028: Maximum number of arrays already exist"},
359 {0x07278C00, 0, 1,
360 "9050: Required cache data cannot be located for a disk unit"},
361 {0x07278D00, 0, 1,
362 "9052: Cache data exists for a device that has been modified"},
363 {0x07278F00, 0, 1,
364 "9054: IOA resources not available due to previous problems"},
365 {0x07279100, 0, 1,
366 "9092: Disk unit requires initialization before use"},
367 {0x07279200, 0, 1,
368 "9029: Incorrect hardware configuration change has been detected"},
369 {0x07279600, 0, 1,
370 "9060: One or more disk pairs are missing from an array"},
371 {0x07279700, 0, 1,
372 "9061: One or more disks are missing from an array"},
373 {0x07279800, 0, 1,
374 "9062: One or more disks are missing from an array"},
375 {0x07279900, 0, 1,
376 "9063: Maximum number of functional arrays has been exceeded"},
377 {0x0B260000, 0, 0,
378 "Aborted command, invalid descriptor"},
379 {0x0B5A0000, 0, 0,
380 "Command terminated by host"}
383 static const struct ipr_ses_table_entry ipr_ses_table[] = {
384 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
385 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
386 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
387 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
388 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
389 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
390 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
391 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
392 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
400 * Function Prototypes
402 static int ipr_reset_alert(struct ipr_cmnd *);
403 static void ipr_process_ccn(struct ipr_cmnd *);
404 static void ipr_process_error(struct ipr_cmnd *);
405 static void ipr_reset_ioa_job(struct ipr_cmnd *);
406 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
407 enum ipr_shutdown_type);
409 #ifdef CONFIG_SCSI_IPR_TRACE
411 * ipr_trc_hook - Add a trace entry to the driver trace
412 * @ipr_cmd: ipr command struct
413 * @type: trace type
414 * @add_data: additional data
416 * Return value:
417 * none
419 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
420 u8 type, u32 add_data)
422 struct ipr_trace_entry *trace_entry;
423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
425 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
426 trace_entry->time = jiffies;
427 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
428 trace_entry->type = type;
429 trace_entry->cmd_index = ipr_cmd->cmd_index;
430 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
431 trace_entry->u.add_data = add_data;
433 #else
434 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
435 #endif
438 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
439 * @ipr_cmd: ipr command struct
441 * Return value:
442 * none
444 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
446 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
447 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
449 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
450 ioarcb->write_data_transfer_length = 0;
451 ioarcb->read_data_transfer_length = 0;
452 ioarcb->write_ioadl_len = 0;
453 ioarcb->read_ioadl_len = 0;
454 ioasa->ioasc = 0;
455 ioasa->residual_data_len = 0;
457 ipr_cmd->scsi_cmd = NULL;
458 ipr_cmd->sense_buffer[0] = 0;
459 ipr_cmd->dma_use_sg = 0;
463 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
464 * @ipr_cmd: ipr command struct
466 * Return value:
467 * none
469 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
471 ipr_reinit_ipr_cmnd(ipr_cmd);
472 ipr_cmd->u.scratch = 0;
473 ipr_cmd->sibling = NULL;
474 init_timer(&ipr_cmd->timer);
478 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
479 * @ioa_cfg: ioa config struct
481 * Return value:
482 * pointer to ipr command struct
484 static
485 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
487 struct ipr_cmnd *ipr_cmd;
489 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
490 list_del(&ipr_cmd->queue);
491 ipr_init_ipr_cmnd(ipr_cmd);
493 return ipr_cmd;
497 * ipr_unmap_sglist - Unmap scatterlist if mapped
498 * @ioa_cfg: ioa config struct
499 * @ipr_cmd: ipr command struct
501 * Return value:
502 * nothing
504 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
505 struct ipr_cmnd *ipr_cmd)
507 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
509 if (ipr_cmd->dma_use_sg) {
510 if (scsi_cmd->use_sg > 0) {
511 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
512 scsi_cmd->use_sg,
513 scsi_cmd->sc_data_direction);
514 } else {
515 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
516 scsi_cmd->request_bufflen,
517 scsi_cmd->sc_data_direction);
523 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
524 * @ioa_cfg: ioa config struct
525 * @clr_ints: interrupts to clear
527 * This function masks all interrupts on the adapter, then clears the
528 * interrupts specified in the mask
530 * Return value:
531 * none
533 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
534 u32 clr_ints)
536 volatile u32 int_reg;
538 /* Stop new interrupts */
539 ioa_cfg->allow_interrupts = 0;
541 /* Set interrupt mask to stop all new interrupts */
542 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
544 /* Clear any pending interrupts */
545 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
550 * ipr_save_pcix_cmd_reg - Save PCI-X command register
551 * @ioa_cfg: ioa config struct
553 * Return value:
554 * 0 on success / -EIO on failure
556 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
558 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
560 if (pcix_cmd_reg == 0) {
561 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
562 return -EIO;
565 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
566 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
567 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
568 return -EIO;
571 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
572 return 0;
576 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
577 * @ioa_cfg: ioa config struct
579 * Return value:
580 * 0 on success / -EIO on failure
582 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
584 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
586 if (pcix_cmd_reg) {
587 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
590 return -EIO;
592 } else {
593 dev_err(&ioa_cfg->pdev->dev,
594 "Failed to setup PCI-X command register\n");
595 return -EIO;
598 return 0;
602 * ipr_scsi_eh_done - mid-layer done function for aborted ops
603 * @ipr_cmd: ipr command struct
605 * This function is invoked by the interrupt handler for
606 * ops generated by the SCSI mid-layer which are being aborted.
608 * Return value:
609 * none
611 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
614 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
616 scsi_cmd->result |= (DID_ERROR << 16);
618 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
619 scsi_cmd->scsi_done(scsi_cmd);
620 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
624 * ipr_fail_all_ops - Fails all outstanding ops.
625 * @ioa_cfg: ioa config struct
627 * This function fails all outstanding ops.
629 * Return value:
630 * none
632 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
634 struct ipr_cmnd *ipr_cmd, *temp;
636 ENTER;
637 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
638 list_del(&ipr_cmd->queue);
640 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
641 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
643 if (ipr_cmd->scsi_cmd)
644 ipr_cmd->done = ipr_scsi_eh_done;
646 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
647 del_timer(&ipr_cmd->timer);
648 ipr_cmd->done(ipr_cmd);
651 LEAVE;
655 * ipr_do_req - Send driver initiated requests.
656 * @ipr_cmd: ipr command struct
657 * @done: done function
658 * @timeout_func: timeout function
659 * @timeout: timeout value
661 * This function sends the specified command to the adapter with the
662 * timeout given. The done function is invoked on command completion.
664 * Return value:
665 * none
667 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
668 void (*done) (struct ipr_cmnd *),
669 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
673 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
675 ipr_cmd->done = done;
677 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
678 ipr_cmd->timer.expires = jiffies + timeout;
679 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
681 add_timer(&ipr_cmd->timer);
683 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
685 mb();
686 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
687 ioa_cfg->regs.ioarrin_reg);
691 * ipr_internal_cmd_done - Op done function for an internally generated op.
692 * @ipr_cmd: ipr command struct
694 * This function is the op done function for an internally generated,
695 * blocking op. It simply wakes the sleeping thread.
697 * Return value:
698 * none
700 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
702 if (ipr_cmd->sibling)
703 ipr_cmd->sibling = NULL;
704 else
705 complete(&ipr_cmd->completion);
709 * ipr_send_blocking_cmd - Send command and sleep on its completion.
710 * @ipr_cmd: ipr command struct
711 * @timeout_func: function to invoke if command times out
712 * @timeout: timeout
714 * Return value:
715 * none
717 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
718 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
719 u32 timeout)
721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
723 init_completion(&ipr_cmd->completion);
724 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
726 spin_unlock_irq(ioa_cfg->host->host_lock);
727 wait_for_completion(&ipr_cmd->completion);
728 spin_lock_irq(ioa_cfg->host->host_lock);
732 * ipr_send_hcam - Send an HCAM to the adapter.
733 * @ioa_cfg: ioa config struct
734 * @type: HCAM type
735 * @hostrcb: hostrcb struct
737 * This function will send a Host Controlled Async command to the adapter.
738 * If HCAMs are currently not allowed to be issued to the adapter, it will
739 * place the hostrcb on the free queue.
741 * Return value:
742 * none
744 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
745 struct ipr_hostrcb *hostrcb)
747 struct ipr_cmnd *ipr_cmd;
748 struct ipr_ioarcb *ioarcb;
750 if (ioa_cfg->allow_cmds) {
751 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
753 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
755 ipr_cmd->u.hostrcb = hostrcb;
756 ioarcb = &ipr_cmd->ioarcb;
758 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
759 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
760 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
761 ioarcb->cmd_pkt.cdb[1] = type;
762 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
763 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
765 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
766 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
767 ipr_cmd->ioadl[0].flags_and_data_len =
768 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
769 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
771 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
772 ipr_cmd->done = ipr_process_ccn;
773 else
774 ipr_cmd->done = ipr_process_error;
776 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
778 mb();
779 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
780 ioa_cfg->regs.ioarrin_reg);
781 } else {
782 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
787 * ipr_init_res_entry - Initialize a resource entry struct.
788 * @res: resource entry struct
790 * Return value:
791 * none
793 static void ipr_init_res_entry(struct ipr_resource_entry *res)
795 res->needs_sync_complete = 0;
796 res->in_erp = 0;
797 res->add_to_ml = 0;
798 res->del_from_ml = 0;
799 res->resetting_device = 0;
800 res->sdev = NULL;
804 * ipr_handle_config_change - Handle a config change from the adapter
805 * @ioa_cfg: ioa config struct
806 * @hostrcb: hostrcb
808 * Return value:
809 * none
811 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
812 struct ipr_hostrcb *hostrcb)
814 struct ipr_resource_entry *res = NULL;
815 struct ipr_config_table_entry *cfgte;
816 u32 is_ndn = 1;
818 cfgte = &hostrcb->hcam.u.ccn.cfgte;
820 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
821 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
822 sizeof(cfgte->res_addr))) {
823 is_ndn = 0;
824 break;
828 if (is_ndn) {
829 if (list_empty(&ioa_cfg->free_res_q)) {
830 ipr_send_hcam(ioa_cfg,
831 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
832 hostrcb);
833 return;
836 res = list_entry(ioa_cfg->free_res_q.next,
837 struct ipr_resource_entry, queue);
839 list_del(&res->queue);
840 ipr_init_res_entry(res);
841 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
844 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
846 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
847 if (res->sdev) {
848 res->del_from_ml = 1;
849 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
850 if (ioa_cfg->allow_ml_add_del)
851 schedule_work(&ioa_cfg->work_q);
852 } else
853 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
854 } else if (!res->sdev) {
855 res->add_to_ml = 1;
856 if (ioa_cfg->allow_ml_add_del)
857 schedule_work(&ioa_cfg->work_q);
860 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
864 * ipr_process_ccn - Op done function for a CCN.
865 * @ipr_cmd: ipr command struct
867 * This function is the op done function for a configuration
868 * change notification host controlled async from the adapter.
870 * Return value:
871 * none
873 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
876 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
877 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
879 list_del(&hostrcb->queue);
880 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
882 if (ioasc) {
883 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
884 dev_err(&ioa_cfg->pdev->dev,
885 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
887 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
888 } else {
889 ipr_handle_config_change(ioa_cfg, hostrcb);
894 * ipr_log_vpd - Log the passed VPD to the error log.
895 * @vpd: vendor/product id/sn struct
897 * Return value:
898 * none
900 static void ipr_log_vpd(struct ipr_vpd *vpd)
902 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
903 + IPR_SERIAL_NUM_LEN];
905 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
906 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
907 IPR_PROD_ID_LEN);
908 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
909 ipr_err("Vendor/Product ID: %s\n", buffer);
911 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
912 buffer[IPR_SERIAL_NUM_LEN] = '\0';
913 ipr_err(" Serial Number: %s\n", buffer);
917 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
918 * @vpd: vendor/product id/sn/wwn struct
920 * Return value:
921 * none
923 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
925 ipr_log_vpd(&vpd->vpd);
926 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
927 be32_to_cpu(vpd->wwid[1]));
931 * ipr_log_enhanced_cache_error - Log a cache error.
932 * @ioa_cfg: ioa config struct
933 * @hostrcb: hostrcb struct
935 * Return value:
936 * none
938 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
939 struct ipr_hostrcb *hostrcb)
941 struct ipr_hostrcb_type_12_error *error =
942 &hostrcb->hcam.u.error.u.type_12_error;
944 ipr_err("-----Current Configuration-----\n");
945 ipr_err("Cache Directory Card Information:\n");
946 ipr_log_ext_vpd(&error->ioa_vpd);
947 ipr_err("Adapter Card Information:\n");
948 ipr_log_ext_vpd(&error->cfc_vpd);
950 ipr_err("-----Expected Configuration-----\n");
951 ipr_err("Cache Directory Card Information:\n");
952 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
953 ipr_err("Adapter Card Information:\n");
954 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
956 ipr_err("Additional IOA Data: %08X %08X %08X\n",
957 be32_to_cpu(error->ioa_data[0]),
958 be32_to_cpu(error->ioa_data[1]),
959 be32_to_cpu(error->ioa_data[2]));
963 * ipr_log_cache_error - Log a cache error.
964 * @ioa_cfg: ioa config struct
965 * @hostrcb: hostrcb struct
967 * Return value:
968 * none
970 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
971 struct ipr_hostrcb *hostrcb)
973 struct ipr_hostrcb_type_02_error *error =
974 &hostrcb->hcam.u.error.u.type_02_error;
976 ipr_err("-----Current Configuration-----\n");
977 ipr_err("Cache Directory Card Information:\n");
978 ipr_log_vpd(&error->ioa_vpd);
979 ipr_err("Adapter Card Information:\n");
980 ipr_log_vpd(&error->cfc_vpd);
982 ipr_err("-----Expected Configuration-----\n");
983 ipr_err("Cache Directory Card Information:\n");
984 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
985 ipr_err("Adapter Card Information:\n");
986 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
988 ipr_err("Additional IOA Data: %08X %08X %08X\n",
989 be32_to_cpu(error->ioa_data[0]),
990 be32_to_cpu(error->ioa_data[1]),
991 be32_to_cpu(error->ioa_data[2]));
995 * ipr_log_enhanced_config_error - Log a configuration error.
996 * @ioa_cfg: ioa config struct
997 * @hostrcb: hostrcb struct
999 * Return value:
1000 * none
1002 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1003 struct ipr_hostrcb *hostrcb)
1005 int errors_logged, i;
1006 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1007 struct ipr_hostrcb_type_13_error *error;
1009 error = &hostrcb->hcam.u.error.u.type_13_error;
1010 errors_logged = be32_to_cpu(error->errors_logged);
1012 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1013 be32_to_cpu(error->errors_detected), errors_logged);
1015 dev_entry = error->dev;
1017 for (i = 0; i < errors_logged; i++, dev_entry++) {
1018 ipr_err_separator;
1020 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1021 ipr_log_ext_vpd(&dev_entry->vpd);
1023 ipr_err("-----New Device Information-----\n");
1024 ipr_log_ext_vpd(&dev_entry->new_vpd);
1026 ipr_err("Cache Directory Card Information:\n");
1027 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1029 ipr_err("Adapter Card Information:\n");
1030 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1035 * ipr_log_config_error - Log a configuration error.
1036 * @ioa_cfg: ioa config struct
1037 * @hostrcb: hostrcb struct
1039 * Return value:
1040 * none
1042 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1043 struct ipr_hostrcb *hostrcb)
1045 int errors_logged, i;
1046 struct ipr_hostrcb_device_data_entry *dev_entry;
1047 struct ipr_hostrcb_type_03_error *error;
1049 error = &hostrcb->hcam.u.error.u.type_03_error;
1050 errors_logged = be32_to_cpu(error->errors_logged);
1052 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1053 be32_to_cpu(error->errors_detected), errors_logged);
1055 dev_entry = error->dev;
1057 for (i = 0; i < errors_logged; i++, dev_entry++) {
1058 ipr_err_separator;
1060 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1061 ipr_log_vpd(&dev_entry->vpd);
1063 ipr_err("-----New Device Information-----\n");
1064 ipr_log_vpd(&dev_entry->new_vpd);
1066 ipr_err("Cache Directory Card Information:\n");
1067 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1069 ipr_err("Adapter Card Information:\n");
1070 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1072 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1073 be32_to_cpu(dev_entry->ioa_data[0]),
1074 be32_to_cpu(dev_entry->ioa_data[1]),
1075 be32_to_cpu(dev_entry->ioa_data[2]),
1076 be32_to_cpu(dev_entry->ioa_data[3]),
1077 be32_to_cpu(dev_entry->ioa_data[4]));
1082 * ipr_log_enhanced_array_error - Log an array configuration error.
1083 * @ioa_cfg: ioa config struct
1084 * @hostrcb: hostrcb struct
1086 * Return value:
1087 * none
1089 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1090 struct ipr_hostrcb *hostrcb)
1092 int i, num_entries;
1093 struct ipr_hostrcb_type_14_error *error;
1094 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1095 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1097 error = &hostrcb->hcam.u.error.u.type_14_error;
1099 ipr_err_separator;
1101 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1102 error->protection_level,
1103 ioa_cfg->host->host_no,
1104 error->last_func_vset_res_addr.bus,
1105 error->last_func_vset_res_addr.target,
1106 error->last_func_vset_res_addr.lun);
1108 ipr_err_separator;
1110 array_entry = error->array_member;
1111 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1112 sizeof(error->array_member));
1114 for (i = 0; i < num_entries; i++, array_entry++) {
1115 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1116 continue;
1118 if (be32_to_cpu(error->exposed_mode_adn) == i)
1119 ipr_err("Exposed Array Member %d:\n", i);
1120 else
1121 ipr_err("Array Member %d:\n", i);
1123 ipr_log_ext_vpd(&array_entry->vpd);
1124 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1125 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1126 "Expected Location");
1128 ipr_err_separator;
1133 * ipr_log_array_error - Log an array configuration error.
1134 * @ioa_cfg: ioa config struct
1135 * @hostrcb: hostrcb struct
1137 * Return value:
1138 * none
1140 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1141 struct ipr_hostrcb *hostrcb)
1143 int i;
1144 struct ipr_hostrcb_type_04_error *error;
1145 struct ipr_hostrcb_array_data_entry *array_entry;
1146 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1148 error = &hostrcb->hcam.u.error.u.type_04_error;
1150 ipr_err_separator;
1152 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1153 error->protection_level,
1154 ioa_cfg->host->host_no,
1155 error->last_func_vset_res_addr.bus,
1156 error->last_func_vset_res_addr.target,
1157 error->last_func_vset_res_addr.lun);
1159 ipr_err_separator;
1161 array_entry = error->array_member;
1163 for (i = 0; i < 18; i++) {
1164 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1165 continue;
1167 if (be32_to_cpu(error->exposed_mode_adn) == i)
1168 ipr_err("Exposed Array Member %d:\n", i);
1169 else
1170 ipr_err("Array Member %d:\n", i);
1172 ipr_log_vpd(&array_entry->vpd);
1174 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1175 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1176 "Expected Location");
1178 ipr_err_separator;
1180 if (i == 9)
1181 array_entry = error->array_member2;
1182 else
1183 array_entry++;
1188 * ipr_log_hex_data - Log additional hex IOA error data.
1189 * @data: IOA error data
1190 * @len: data length
1192 * Return value:
1193 * none
1195 static void ipr_log_hex_data(u32 *data, int len)
1197 int i;
1199 if (len == 0)
1200 return;
1202 for (i = 0; i < len / 4; i += 4) {
1203 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1204 be32_to_cpu(data[i]),
1205 be32_to_cpu(data[i+1]),
1206 be32_to_cpu(data[i+2]),
1207 be32_to_cpu(data[i+3]));
1212 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1213 * @ioa_cfg: ioa config struct
1214 * @hostrcb: hostrcb struct
1216 * Return value:
1217 * none
1219 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1220 struct ipr_hostrcb *hostrcb)
1222 struct ipr_hostrcb_type_17_error *error;
1224 error = &hostrcb->hcam.u.error.u.type_17_error;
1225 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1227 ipr_err("%s\n", error->failure_reason);
1228 ipr_err("Remote Adapter VPD:\n");
1229 ipr_log_ext_vpd(&error->vpd);
1230 ipr_log_hex_data(error->data,
1231 be32_to_cpu(hostrcb->hcam.length) -
1232 (offsetof(struct ipr_hostrcb_error, u) +
1233 offsetof(struct ipr_hostrcb_type_17_error, data)));
1237 * ipr_log_dual_ioa_error - Log a dual adapter error.
1238 * @ioa_cfg: ioa config struct
1239 * @hostrcb: hostrcb struct
1241 * Return value:
1242 * none
1244 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1245 struct ipr_hostrcb *hostrcb)
1247 struct ipr_hostrcb_type_07_error *error;
1249 error = &hostrcb->hcam.u.error.u.type_07_error;
1250 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1252 ipr_err("%s\n", error->failure_reason);
1253 ipr_err("Remote Adapter VPD:\n");
1254 ipr_log_vpd(&error->vpd);
1255 ipr_log_hex_data(error->data,
1256 be32_to_cpu(hostrcb->hcam.length) -
1257 (offsetof(struct ipr_hostrcb_error, u) +
1258 offsetof(struct ipr_hostrcb_type_07_error, data)));
1262 * ipr_log_generic_error - Log an adapter error.
1263 * @ioa_cfg: ioa config struct
1264 * @hostrcb: hostrcb struct
1266 * Return value:
1267 * none
1269 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1270 struct ipr_hostrcb *hostrcb)
1272 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1273 be32_to_cpu(hostrcb->hcam.length));
1277 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1278 * @ioasc: IOASC
1280 * This function will return the index of into the ipr_error_table
1281 * for the specified IOASC. If the IOASC is not in the table,
1282 * 0 will be returned, which points to the entry used for unknown errors.
1284 * Return value:
1285 * index into the ipr_error_table
1287 static u32 ipr_get_error(u32 ioasc)
1289 int i;
1291 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1292 if (ipr_error_table[i].ioasc == ioasc)
1293 return i;
1295 return 0;
1299 * ipr_handle_log_data - Log an adapter error.
1300 * @ioa_cfg: ioa config struct
1301 * @hostrcb: hostrcb struct
1303 * This function logs an adapter error to the system.
1305 * Return value:
1306 * none
1308 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1309 struct ipr_hostrcb *hostrcb)
1311 u32 ioasc;
1312 int error_index;
1314 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1315 return;
1317 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1318 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1320 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1322 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1323 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1324 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1325 scsi_report_bus_reset(ioa_cfg->host,
1326 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1329 error_index = ipr_get_error(ioasc);
1331 if (!ipr_error_table[error_index].log_hcam)
1332 return;
1334 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1335 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1336 "%s\n", ipr_error_table[error_index].error);
1337 } else {
1338 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1339 ipr_error_table[error_index].error);
1342 /* Set indication we have logged an error */
1343 ioa_cfg->errors_logged++;
1345 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1346 return;
1347 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1348 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1350 switch (hostrcb->hcam.overlay_id) {
1351 case IPR_HOST_RCB_OVERLAY_ID_2:
1352 ipr_log_cache_error(ioa_cfg, hostrcb);
1353 break;
1354 case IPR_HOST_RCB_OVERLAY_ID_3:
1355 ipr_log_config_error(ioa_cfg, hostrcb);
1356 break;
1357 case IPR_HOST_RCB_OVERLAY_ID_4:
1358 case IPR_HOST_RCB_OVERLAY_ID_6:
1359 ipr_log_array_error(ioa_cfg, hostrcb);
1360 break;
1361 case IPR_HOST_RCB_OVERLAY_ID_7:
1362 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1363 break;
1364 case IPR_HOST_RCB_OVERLAY_ID_12:
1365 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1366 break;
1367 case IPR_HOST_RCB_OVERLAY_ID_13:
1368 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1369 break;
1370 case IPR_HOST_RCB_OVERLAY_ID_14:
1371 case IPR_HOST_RCB_OVERLAY_ID_16:
1372 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1373 break;
1374 case IPR_HOST_RCB_OVERLAY_ID_17:
1375 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1376 break;
1377 case IPR_HOST_RCB_OVERLAY_ID_1:
1378 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1379 default:
1380 ipr_log_generic_error(ioa_cfg, hostrcb);
1381 break;
1386 * ipr_process_error - Op done function for an adapter error log.
1387 * @ipr_cmd: ipr command struct
1389 * This function is the op done function for an error log host
1390 * controlled async from the adapter. It will log the error and
1391 * send the HCAM back to the adapter.
1393 * Return value:
1394 * none
1396 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1398 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1399 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1400 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1402 list_del(&hostrcb->queue);
1403 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1405 if (!ioasc) {
1406 ipr_handle_log_data(ioa_cfg, hostrcb);
1407 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1408 dev_err(&ioa_cfg->pdev->dev,
1409 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1412 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1416 * ipr_timeout - An internally generated op has timed out.
1417 * @ipr_cmd: ipr command struct
1419 * This function blocks host requests and initiates an
1420 * adapter reset.
1422 * Return value:
1423 * none
1425 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1427 unsigned long lock_flags = 0;
1428 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1430 ENTER;
1431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1433 ioa_cfg->errors_logged++;
1434 dev_err(&ioa_cfg->pdev->dev,
1435 "Adapter being reset due to command timeout.\n");
1437 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1438 ioa_cfg->sdt_state = GET_DUMP;
1440 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1441 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1444 LEAVE;
1448 * ipr_oper_timeout - Adapter timed out transitioning to operational
1449 * @ipr_cmd: ipr command struct
1451 * This function blocks host requests and initiates an
1452 * adapter reset.
1454 * Return value:
1455 * none
1457 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1459 unsigned long lock_flags = 0;
1460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1462 ENTER;
1463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1465 ioa_cfg->errors_logged++;
1466 dev_err(&ioa_cfg->pdev->dev,
1467 "Adapter timed out transitioning to operational.\n");
1469 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1470 ioa_cfg->sdt_state = GET_DUMP;
1472 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1473 if (ipr_fastfail)
1474 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1475 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1479 LEAVE;
1483 * ipr_reset_reload - Reset/Reload the IOA
1484 * @ioa_cfg: ioa config struct
1485 * @shutdown_type: shutdown type
1487 * This function resets the adapter and re-initializes it.
1488 * This function assumes that all new host commands have been stopped.
1489 * Return value:
1490 * SUCCESS / FAILED
1492 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1493 enum ipr_shutdown_type shutdown_type)
1495 if (!ioa_cfg->in_reset_reload)
1496 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1498 spin_unlock_irq(ioa_cfg->host->host_lock);
1499 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1500 spin_lock_irq(ioa_cfg->host->host_lock);
1502 /* If we got hit with a host reset while we were already resetting
1503 the adapter for some reason, and the reset failed. */
1504 if (ioa_cfg->ioa_is_dead) {
1505 ipr_trace;
1506 return FAILED;
1509 return SUCCESS;
1513 * ipr_find_ses_entry - Find matching SES in SES table
1514 * @res: resource entry struct of SES
1516 * Return value:
1517 * pointer to SES table entry / NULL on failure
1519 static const struct ipr_ses_table_entry *
1520 ipr_find_ses_entry(struct ipr_resource_entry *res)
1522 int i, j, matches;
1523 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1525 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1526 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1527 if (ste->compare_product_id_byte[j] == 'X') {
1528 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1529 matches++;
1530 else
1531 break;
1532 } else
1533 matches++;
1536 if (matches == IPR_PROD_ID_LEN)
1537 return ste;
1540 return NULL;
1544 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1545 * @ioa_cfg: ioa config struct
1546 * @bus: SCSI bus
1547 * @bus_width: bus width
1549 * Return value:
1550 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1551 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1552 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1553 * max 160MHz = max 320MB/sec).
1555 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1557 struct ipr_resource_entry *res;
1558 const struct ipr_ses_table_entry *ste;
1559 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1561 /* Loop through each config table entry in the config table buffer */
1562 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1563 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1564 continue;
1566 if (bus != res->cfgte.res_addr.bus)
1567 continue;
1569 if (!(ste = ipr_find_ses_entry(res)))
1570 continue;
1572 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1575 return max_xfer_rate;
1579 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1580 * @ioa_cfg: ioa config struct
1581 * @max_delay: max delay in micro-seconds to wait
1583 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1585 * Return value:
1586 * 0 on success / other on failure
1588 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1590 volatile u32 pcii_reg;
1591 int delay = 1;
1593 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1594 while (delay < max_delay) {
1595 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1597 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1598 return 0;
1600 /* udelay cannot be used if delay is more than a few milliseconds */
1601 if ((delay / 1000) > MAX_UDELAY_MS)
1602 mdelay(delay / 1000);
1603 else
1604 udelay(delay);
1606 delay += delay;
1608 return -EIO;
1612 * ipr_get_ldump_data_section - Dump IOA memory
1613 * @ioa_cfg: ioa config struct
1614 * @start_addr: adapter address to dump
1615 * @dest: destination kernel buffer
1616 * @length_in_words: length to dump in 4 byte words
1618 * Return value:
1619 * 0 on success / -EIO on failure
1621 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1622 u32 start_addr,
1623 __be32 *dest, u32 length_in_words)
1625 volatile u32 temp_pcii_reg;
1626 int i, delay = 0;
1628 /* Write IOA interrupt reg starting LDUMP state */
1629 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1630 ioa_cfg->regs.set_uproc_interrupt_reg);
1632 /* Wait for IO debug acknowledge */
1633 if (ipr_wait_iodbg_ack(ioa_cfg,
1634 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1635 dev_err(&ioa_cfg->pdev->dev,
1636 "IOA dump long data transfer timeout\n");
1637 return -EIO;
1640 /* Signal LDUMP interlocked - clear IO debug ack */
1641 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1642 ioa_cfg->regs.clr_interrupt_reg);
1644 /* Write Mailbox with starting address */
1645 writel(start_addr, ioa_cfg->ioa_mailbox);
1647 /* Signal address valid - clear IOA Reset alert */
1648 writel(IPR_UPROCI_RESET_ALERT,
1649 ioa_cfg->regs.clr_uproc_interrupt_reg);
1651 for (i = 0; i < length_in_words; i++) {
1652 /* Wait for IO debug acknowledge */
1653 if (ipr_wait_iodbg_ack(ioa_cfg,
1654 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1655 dev_err(&ioa_cfg->pdev->dev,
1656 "IOA dump short data transfer timeout\n");
1657 return -EIO;
1660 /* Read data from mailbox and increment destination pointer */
1661 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1662 dest++;
1664 /* For all but the last word of data, signal data received */
1665 if (i < (length_in_words - 1)) {
1666 /* Signal dump data received - Clear IO debug Ack */
1667 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1668 ioa_cfg->regs.clr_interrupt_reg);
1672 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1673 writel(IPR_UPROCI_RESET_ALERT,
1674 ioa_cfg->regs.set_uproc_interrupt_reg);
1676 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1677 ioa_cfg->regs.clr_uproc_interrupt_reg);
1679 /* Signal dump data received - Clear IO debug Ack */
1680 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1681 ioa_cfg->regs.clr_interrupt_reg);
1683 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1684 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1685 temp_pcii_reg =
1686 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1688 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1689 return 0;
1691 udelay(10);
1692 delay += 10;
1695 return 0;
1698 #ifdef CONFIG_SCSI_IPR_DUMP
1700 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1701 * @ioa_cfg: ioa config struct
1702 * @pci_address: adapter address
1703 * @length: length of data to copy
1705 * Copy data from PCI adapter to kernel buffer.
1706 * Note: length MUST be a 4 byte multiple
1707 * Return value:
1708 * 0 on success / other on failure
1710 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1711 unsigned long pci_address, u32 length)
1713 int bytes_copied = 0;
1714 int cur_len, rc, rem_len, rem_page_len;
1715 __be32 *page;
1716 unsigned long lock_flags = 0;
1717 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1719 while (bytes_copied < length &&
1720 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1721 if (ioa_dump->page_offset >= PAGE_SIZE ||
1722 ioa_dump->page_offset == 0) {
1723 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1725 if (!page) {
1726 ipr_trace;
1727 return bytes_copied;
1730 ioa_dump->page_offset = 0;
1731 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1732 ioa_dump->next_page_index++;
1733 } else
1734 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1736 rem_len = length - bytes_copied;
1737 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1738 cur_len = min(rem_len, rem_page_len);
1740 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1741 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1742 rc = -EIO;
1743 } else {
1744 rc = ipr_get_ldump_data_section(ioa_cfg,
1745 pci_address + bytes_copied,
1746 &page[ioa_dump->page_offset / 4],
1747 (cur_len / sizeof(u32)));
1749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1751 if (!rc) {
1752 ioa_dump->page_offset += cur_len;
1753 bytes_copied += cur_len;
1754 } else {
1755 ipr_trace;
1756 break;
1758 schedule();
1761 return bytes_copied;
1765 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1766 * @hdr: dump entry header struct
1768 * Return value:
1769 * nothing
1771 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1773 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1774 hdr->num_elems = 1;
1775 hdr->offset = sizeof(*hdr);
1776 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1780 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1781 * @ioa_cfg: ioa config struct
1782 * @driver_dump: driver dump struct
1784 * Return value:
1785 * nothing
1787 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1788 struct ipr_driver_dump *driver_dump)
1790 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1792 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1793 driver_dump->ioa_type_entry.hdr.len =
1794 sizeof(struct ipr_dump_ioa_type_entry) -
1795 sizeof(struct ipr_dump_entry_header);
1796 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1797 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1798 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1799 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1800 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1801 ucode_vpd->minor_release[1];
1802 driver_dump->hdr.num_entries++;
1806 * ipr_dump_version_data - Fill in the driver version in the dump.
1807 * @ioa_cfg: ioa config struct
1808 * @driver_dump: driver dump struct
1810 * Return value:
1811 * nothing
1813 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1814 struct ipr_driver_dump *driver_dump)
1816 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1817 driver_dump->version_entry.hdr.len =
1818 sizeof(struct ipr_dump_version_entry) -
1819 sizeof(struct ipr_dump_entry_header);
1820 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1821 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1822 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1823 driver_dump->hdr.num_entries++;
1827 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1828 * @ioa_cfg: ioa config struct
1829 * @driver_dump: driver dump struct
1831 * Return value:
1832 * nothing
1834 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1835 struct ipr_driver_dump *driver_dump)
1837 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1838 driver_dump->trace_entry.hdr.len =
1839 sizeof(struct ipr_dump_trace_entry) -
1840 sizeof(struct ipr_dump_entry_header);
1841 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1842 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1843 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1844 driver_dump->hdr.num_entries++;
1848 * ipr_dump_location_data - Fill in the IOA location in the dump.
1849 * @ioa_cfg: ioa config struct
1850 * @driver_dump: driver dump struct
1852 * Return value:
1853 * nothing
1855 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1856 struct ipr_driver_dump *driver_dump)
1858 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1859 driver_dump->location_entry.hdr.len =
1860 sizeof(struct ipr_dump_location_entry) -
1861 sizeof(struct ipr_dump_entry_header);
1862 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1863 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1864 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1865 driver_dump->hdr.num_entries++;
1869 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1870 * @ioa_cfg: ioa config struct
1871 * @dump: dump struct
1873 * Return value:
1874 * nothing
1876 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1878 unsigned long start_addr, sdt_word;
1879 unsigned long lock_flags = 0;
1880 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1881 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1882 u32 num_entries, start_off, end_off;
1883 u32 bytes_to_copy, bytes_copied, rc;
1884 struct ipr_sdt *sdt;
1885 int i;
1887 ENTER;
1889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1891 if (ioa_cfg->sdt_state != GET_DUMP) {
1892 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1893 return;
1896 start_addr = readl(ioa_cfg->ioa_mailbox);
1898 if (!ipr_sdt_is_fmt2(start_addr)) {
1899 dev_err(&ioa_cfg->pdev->dev,
1900 "Invalid dump table format: %lx\n", start_addr);
1901 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1902 return;
1905 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1907 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1909 /* Initialize the overall dump header */
1910 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1911 driver_dump->hdr.num_entries = 1;
1912 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1913 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1914 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1915 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1917 ipr_dump_version_data(ioa_cfg, driver_dump);
1918 ipr_dump_location_data(ioa_cfg, driver_dump);
1919 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1920 ipr_dump_trace_data(ioa_cfg, driver_dump);
1922 /* Update dump_header */
1923 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1925 /* IOA Dump entry */
1926 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1927 ioa_dump->format = IPR_SDT_FMT2;
1928 ioa_dump->hdr.len = 0;
1929 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1930 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1932 /* First entries in sdt are actually a list of dump addresses and
1933 lengths to gather the real dump data. sdt represents the pointer
1934 to the ioa generated dump table. Dump data will be extracted based
1935 on entries in this table */
1936 sdt = &ioa_dump->sdt;
1938 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1939 sizeof(struct ipr_sdt) / sizeof(__be32));
1941 /* Smart Dump table is ready to use and the first entry is valid */
1942 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1943 dev_err(&ioa_cfg->pdev->dev,
1944 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1945 rc, be32_to_cpu(sdt->hdr.state));
1946 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1947 ioa_cfg->sdt_state = DUMP_OBTAINED;
1948 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1949 return;
1952 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1954 if (num_entries > IPR_NUM_SDT_ENTRIES)
1955 num_entries = IPR_NUM_SDT_ENTRIES;
1957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1959 for (i = 0; i < num_entries; i++) {
1960 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1961 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1962 break;
1965 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1966 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1967 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1968 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1970 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1971 bytes_to_copy = end_off - start_off;
1972 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1973 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1974 continue;
1977 /* Copy data from adapter to driver buffers */
1978 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1979 bytes_to_copy);
1981 ioa_dump->hdr.len += bytes_copied;
1983 if (bytes_copied != bytes_to_copy) {
1984 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1985 break;
1991 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1993 /* Update dump_header */
1994 driver_dump->hdr.len += ioa_dump->hdr.len;
1995 wmb();
1996 ioa_cfg->sdt_state = DUMP_OBTAINED;
1997 LEAVE;
2000 #else
2001 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2002 #endif
2005 * ipr_release_dump - Free adapter dump memory
2006 * @kref: kref struct
2008 * Return value:
2009 * nothing
2011 static void ipr_release_dump(struct kref *kref)
2013 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2014 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2015 unsigned long lock_flags = 0;
2016 int i;
2018 ENTER;
2019 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2020 ioa_cfg->dump = NULL;
2021 ioa_cfg->sdt_state = INACTIVE;
2022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2024 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2025 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2027 kfree(dump);
2028 LEAVE;
2032 * ipr_worker_thread - Worker thread
2033 * @data: ioa config struct
2035 * Called at task level from a work thread. This function takes care
2036 * of adding and removing device from the mid-layer as configuration
2037 * changes are detected by the adapter.
2039 * Return value:
2040 * nothing
2042 static void ipr_worker_thread(void *data)
2044 unsigned long lock_flags;
2045 struct ipr_resource_entry *res;
2046 struct scsi_device *sdev;
2047 struct ipr_dump *dump;
2048 struct ipr_ioa_cfg *ioa_cfg = data;
2049 u8 bus, target, lun;
2050 int did_work;
2052 ENTER;
2053 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2055 if (ioa_cfg->sdt_state == GET_DUMP) {
2056 dump = ioa_cfg->dump;
2057 if (!dump) {
2058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2059 return;
2061 kref_get(&dump->kref);
2062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2063 ipr_get_ioa_dump(ioa_cfg, dump);
2064 kref_put(&dump->kref, ipr_release_dump);
2066 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2067 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2068 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2069 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2070 return;
2073 restart:
2074 do {
2075 did_work = 0;
2076 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2078 return;
2081 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2082 if (res->del_from_ml && res->sdev) {
2083 did_work = 1;
2084 sdev = res->sdev;
2085 if (!scsi_device_get(sdev)) {
2086 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2088 scsi_remove_device(sdev);
2089 scsi_device_put(sdev);
2090 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2092 break;
2095 } while(did_work);
2097 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2098 if (res->add_to_ml) {
2099 bus = res->cfgte.res_addr.bus;
2100 target = res->cfgte.res_addr.target;
2101 lun = res->cfgte.res_addr.lun;
2102 res->add_to_ml = 0;
2103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2104 scsi_add_device(ioa_cfg->host, bus, target, lun);
2105 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2106 goto restart;
2110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2111 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2112 LEAVE;
2115 #ifdef CONFIG_SCSI_IPR_TRACE
2117 * ipr_read_trace - Dump the adapter trace
2118 * @kobj: kobject struct
2119 * @buf: buffer
2120 * @off: offset
2121 * @count: buffer size
2123 * Return value:
2124 * number of bytes printed to buffer
2126 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2127 loff_t off, size_t count)
2129 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2130 struct Scsi_Host *shost = class_to_shost(cdev);
2131 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2132 unsigned long lock_flags = 0;
2133 int size = IPR_TRACE_SIZE;
2134 char *src = (char *)ioa_cfg->trace;
2136 if (off > size)
2137 return 0;
2138 if (off + count > size) {
2139 size -= off;
2140 count = size;
2143 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2144 memcpy(buf, &src[off], count);
2145 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2146 return count;
2149 static struct bin_attribute ipr_trace_attr = {
2150 .attr = {
2151 .name = "trace",
2152 .mode = S_IRUGO,
2154 .size = 0,
2155 .read = ipr_read_trace,
2157 #endif
2159 static const struct {
2160 enum ipr_cache_state state;
2161 char *name;
2162 } cache_state [] = {
2163 { CACHE_NONE, "none" },
2164 { CACHE_DISABLED, "disabled" },
2165 { CACHE_ENABLED, "enabled" }
2169 * ipr_show_write_caching - Show the write caching attribute
2170 * @class_dev: class device struct
2171 * @buf: buffer
2173 * Return value:
2174 * number of bytes printed to buffer
2176 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2178 struct Scsi_Host *shost = class_to_shost(class_dev);
2179 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2180 unsigned long lock_flags = 0;
2181 int i, len = 0;
2183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2184 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2185 if (cache_state[i].state == ioa_cfg->cache_state) {
2186 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2187 break;
2190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2191 return len;
2196 * ipr_store_write_caching - Enable/disable adapter write cache
2197 * @class_dev: class_device struct
2198 * @buf: buffer
2199 * @count: buffer size
2201 * This function will enable/disable adapter write cache.
2203 * Return value:
2204 * count on success / other on failure
2206 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2207 const char *buf, size_t count)
2209 struct Scsi_Host *shost = class_to_shost(class_dev);
2210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2211 unsigned long lock_flags = 0;
2212 enum ipr_cache_state new_state = CACHE_INVALID;
2213 int i;
2215 if (!capable(CAP_SYS_ADMIN))
2216 return -EACCES;
2217 if (ioa_cfg->cache_state == CACHE_NONE)
2218 return -EINVAL;
2220 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2221 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2222 new_state = cache_state[i].state;
2223 break;
2227 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2228 return -EINVAL;
2230 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2231 if (ioa_cfg->cache_state == new_state) {
2232 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2233 return count;
2236 ioa_cfg->cache_state = new_state;
2237 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2238 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2239 if (!ioa_cfg->in_reset_reload)
2240 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2242 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2244 return count;
2247 static struct class_device_attribute ipr_ioa_cache_attr = {
2248 .attr = {
2249 .name = "write_cache",
2250 .mode = S_IRUGO | S_IWUSR,
2252 .show = ipr_show_write_caching,
2253 .store = ipr_store_write_caching
2257 * ipr_show_fw_version - Show the firmware version
2258 * @class_dev: class device struct
2259 * @buf: buffer
2261 * Return value:
2262 * number of bytes printed to buffer
2264 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2266 struct Scsi_Host *shost = class_to_shost(class_dev);
2267 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2268 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2269 unsigned long lock_flags = 0;
2270 int len;
2272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2273 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2274 ucode_vpd->major_release, ucode_vpd->card_type,
2275 ucode_vpd->minor_release[0],
2276 ucode_vpd->minor_release[1]);
2277 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2278 return len;
2281 static struct class_device_attribute ipr_fw_version_attr = {
2282 .attr = {
2283 .name = "fw_version",
2284 .mode = S_IRUGO,
2286 .show = ipr_show_fw_version,
2290 * ipr_show_log_level - Show the adapter's error logging level
2291 * @class_dev: class device struct
2292 * @buf: buffer
2294 * Return value:
2295 * number of bytes printed to buffer
2297 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2299 struct Scsi_Host *shost = class_to_shost(class_dev);
2300 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2301 unsigned long lock_flags = 0;
2302 int len;
2304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2305 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2307 return len;
2311 * ipr_store_log_level - Change the adapter's error logging level
2312 * @class_dev: class device struct
2313 * @buf: buffer
2315 * Return value:
2316 * number of bytes printed to buffer
2318 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2319 const char *buf, size_t count)
2321 struct Scsi_Host *shost = class_to_shost(class_dev);
2322 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2323 unsigned long lock_flags = 0;
2325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2326 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2328 return strlen(buf);
2331 static struct class_device_attribute ipr_log_level_attr = {
2332 .attr = {
2333 .name = "log_level",
2334 .mode = S_IRUGO | S_IWUSR,
2336 .show = ipr_show_log_level,
2337 .store = ipr_store_log_level
2341 * ipr_store_diagnostics - IOA Diagnostics interface
2342 * @class_dev: class_device struct
2343 * @buf: buffer
2344 * @count: buffer size
2346 * This function will reset the adapter and wait a reasonable
2347 * amount of time for any errors that the adapter might log.
2349 * Return value:
2350 * count on success / other on failure
2352 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2353 const char *buf, size_t count)
2355 struct Scsi_Host *shost = class_to_shost(class_dev);
2356 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2357 unsigned long lock_flags = 0;
2358 int rc = count;
2360 if (!capable(CAP_SYS_ADMIN))
2361 return -EACCES;
2363 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2364 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2365 ioa_cfg->errors_logged = 0;
2366 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2368 if (ioa_cfg->in_reset_reload) {
2369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2370 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2372 /* Wait for a second for any errors to be logged */
2373 msleep(1000);
2374 } else {
2375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2376 return -EIO;
2379 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2380 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2381 rc = -EIO;
2382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2384 return rc;
2387 static struct class_device_attribute ipr_diagnostics_attr = {
2388 .attr = {
2389 .name = "run_diagnostics",
2390 .mode = S_IWUSR,
2392 .store = ipr_store_diagnostics
2396 * ipr_show_adapter_state - Show the adapter's state
2397 * @class_dev: class device struct
2398 * @buf: buffer
2400 * Return value:
2401 * number of bytes printed to buffer
2403 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2405 struct Scsi_Host *shost = class_to_shost(class_dev);
2406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2407 unsigned long lock_flags = 0;
2408 int len;
2410 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2411 if (ioa_cfg->ioa_is_dead)
2412 len = snprintf(buf, PAGE_SIZE, "offline\n");
2413 else
2414 len = snprintf(buf, PAGE_SIZE, "online\n");
2415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2416 return len;
2420 * ipr_store_adapter_state - Change adapter state
2421 * @class_dev: class_device struct
2422 * @buf: buffer
2423 * @count: buffer size
2425 * This function will change the adapter's state.
2427 * Return value:
2428 * count on success / other on failure
2430 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2431 const char *buf, size_t count)
2433 struct Scsi_Host *shost = class_to_shost(class_dev);
2434 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2435 unsigned long lock_flags;
2436 int result = count;
2438 if (!capable(CAP_SYS_ADMIN))
2439 return -EACCES;
2441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2442 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2443 ioa_cfg->ioa_is_dead = 0;
2444 ioa_cfg->reset_retries = 0;
2445 ioa_cfg->in_ioa_bringdown = 0;
2446 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2448 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2449 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2451 return result;
2454 static struct class_device_attribute ipr_ioa_state_attr = {
2455 .attr = {
2456 .name = "state",
2457 .mode = S_IRUGO | S_IWUSR,
2459 .show = ipr_show_adapter_state,
2460 .store = ipr_store_adapter_state
2464 * ipr_store_reset_adapter - Reset the adapter
2465 * @class_dev: class_device struct
2466 * @buf: buffer
2467 * @count: buffer size
2469 * This function will reset the adapter.
2471 * Return value:
2472 * count on success / other on failure
2474 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2475 const char *buf, size_t count)
2477 struct Scsi_Host *shost = class_to_shost(class_dev);
2478 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2479 unsigned long lock_flags;
2480 int result = count;
2482 if (!capable(CAP_SYS_ADMIN))
2483 return -EACCES;
2485 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2486 if (!ioa_cfg->in_reset_reload)
2487 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2491 return result;
2494 static struct class_device_attribute ipr_ioa_reset_attr = {
2495 .attr = {
2496 .name = "reset_host",
2497 .mode = S_IWUSR,
2499 .store = ipr_store_reset_adapter
2503 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2504 * @buf_len: buffer length
2506 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2507 * list to use for microcode download
2509 * Return value:
2510 * pointer to sglist / NULL on failure
2512 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2514 int sg_size, order, bsize_elem, num_elem, i, j;
2515 struct ipr_sglist *sglist;
2516 struct scatterlist *scatterlist;
2517 struct page *page;
2519 /* Get the minimum size per scatter/gather element */
2520 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2522 /* Get the actual size per element */
2523 order = get_order(sg_size);
2525 /* Determine the actual number of bytes per element */
2526 bsize_elem = PAGE_SIZE * (1 << order);
2528 /* Determine the actual number of sg entries needed */
2529 if (buf_len % bsize_elem)
2530 num_elem = (buf_len / bsize_elem) + 1;
2531 else
2532 num_elem = buf_len / bsize_elem;
2534 /* Allocate a scatter/gather list for the DMA */
2535 sglist = kzalloc(sizeof(struct ipr_sglist) +
2536 (sizeof(struct scatterlist) * (num_elem - 1)),
2537 GFP_KERNEL);
2539 if (sglist == NULL) {
2540 ipr_trace;
2541 return NULL;
2544 scatterlist = sglist->scatterlist;
2546 sglist->order = order;
2547 sglist->num_sg = num_elem;
2549 /* Allocate a bunch of sg elements */
2550 for (i = 0; i < num_elem; i++) {
2551 page = alloc_pages(GFP_KERNEL, order);
2552 if (!page) {
2553 ipr_trace;
2555 /* Free up what we already allocated */
2556 for (j = i - 1; j >= 0; j--)
2557 __free_pages(scatterlist[j].page, order);
2558 kfree(sglist);
2559 return NULL;
2562 scatterlist[i].page = page;
2565 return sglist;
2569 * ipr_free_ucode_buffer - Frees a microcode download buffer
2570 * @p_dnld: scatter/gather list pointer
2572 * Free a DMA'able ucode download buffer previously allocated with
2573 * ipr_alloc_ucode_buffer
2575 * Return value:
2576 * nothing
2578 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2580 int i;
2582 for (i = 0; i < sglist->num_sg; i++)
2583 __free_pages(sglist->scatterlist[i].page, sglist->order);
2585 kfree(sglist);
2589 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2590 * @sglist: scatter/gather list pointer
2591 * @buffer: buffer pointer
2592 * @len: buffer length
2594 * Copy a microcode image from a user buffer into a buffer allocated by
2595 * ipr_alloc_ucode_buffer
2597 * Return value:
2598 * 0 on success / other on failure
2600 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2601 u8 *buffer, u32 len)
2603 int bsize_elem, i, result = 0;
2604 struct scatterlist *scatterlist;
2605 void *kaddr;
2607 /* Determine the actual number of bytes per element */
2608 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2610 scatterlist = sglist->scatterlist;
2612 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2613 kaddr = kmap(scatterlist[i].page);
2614 memcpy(kaddr, buffer, bsize_elem);
2615 kunmap(scatterlist[i].page);
2617 scatterlist[i].length = bsize_elem;
2619 if (result != 0) {
2620 ipr_trace;
2621 return result;
2625 if (len % bsize_elem) {
2626 kaddr = kmap(scatterlist[i].page);
2627 memcpy(kaddr, buffer, len % bsize_elem);
2628 kunmap(scatterlist[i].page);
2630 scatterlist[i].length = len % bsize_elem;
2633 sglist->buffer_len = len;
2634 return result;
2638 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2639 * @ipr_cmd: ipr command struct
2640 * @sglist: scatter/gather list
2642 * Builds a microcode download IOA data list (IOADL).
2645 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2646 struct ipr_sglist *sglist)
2648 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2649 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2650 struct scatterlist *scatterlist = sglist->scatterlist;
2651 int i;
2653 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2654 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2655 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2656 ioarcb->write_ioadl_len =
2657 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2659 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2660 ioadl[i].flags_and_data_len =
2661 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2662 ioadl[i].address =
2663 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2666 ioadl[i-1].flags_and_data_len |=
2667 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2671 * ipr_update_ioa_ucode - Update IOA's microcode
2672 * @ioa_cfg: ioa config struct
2673 * @sglist: scatter/gather list
2675 * Initiate an adapter reset to update the IOA's microcode
2677 * Return value:
2678 * 0 on success / -EIO on failure
2680 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2681 struct ipr_sglist *sglist)
2683 unsigned long lock_flags;
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687 if (ioa_cfg->ucode_sglist) {
2688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2689 dev_err(&ioa_cfg->pdev->dev,
2690 "Microcode download already in progress\n");
2691 return -EIO;
2694 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2695 sglist->num_sg, DMA_TO_DEVICE);
2697 if (!sglist->num_dma_sg) {
2698 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2699 dev_err(&ioa_cfg->pdev->dev,
2700 "Failed to map microcode download buffer!\n");
2701 return -EIO;
2704 ioa_cfg->ucode_sglist = sglist;
2705 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2706 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2707 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2709 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2710 ioa_cfg->ucode_sglist = NULL;
2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712 return 0;
2716 * ipr_store_update_fw - Update the firmware on the adapter
2717 * @class_dev: class_device struct
2718 * @buf: buffer
2719 * @count: buffer size
2721 * This function will update the firmware on the adapter.
2723 * Return value:
2724 * count on success / other on failure
2726 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2727 const char *buf, size_t count)
2729 struct Scsi_Host *shost = class_to_shost(class_dev);
2730 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2731 struct ipr_ucode_image_header *image_hdr;
2732 const struct firmware *fw_entry;
2733 struct ipr_sglist *sglist;
2734 char fname[100];
2735 char *src;
2736 int len, result, dnld_size;
2738 if (!capable(CAP_SYS_ADMIN))
2739 return -EACCES;
2741 len = snprintf(fname, 99, "%s", buf);
2742 fname[len-1] = '\0';
2744 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2745 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2746 return -EIO;
2749 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2751 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2752 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2753 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2754 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2755 release_firmware(fw_entry);
2756 return -EINVAL;
2759 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2760 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2761 sglist = ipr_alloc_ucode_buffer(dnld_size);
2763 if (!sglist) {
2764 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2765 release_firmware(fw_entry);
2766 return -ENOMEM;
2769 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2771 if (result) {
2772 dev_err(&ioa_cfg->pdev->dev,
2773 "Microcode buffer copy to DMA buffer failed\n");
2774 goto out;
2777 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2779 if (!result)
2780 result = count;
2781 out:
2782 ipr_free_ucode_buffer(sglist);
2783 release_firmware(fw_entry);
2784 return result;
2787 static struct class_device_attribute ipr_update_fw_attr = {
2788 .attr = {
2789 .name = "update_fw",
2790 .mode = S_IWUSR,
2792 .store = ipr_store_update_fw
2795 static struct class_device_attribute *ipr_ioa_attrs[] = {
2796 &ipr_fw_version_attr,
2797 &ipr_log_level_attr,
2798 &ipr_diagnostics_attr,
2799 &ipr_ioa_state_attr,
2800 &ipr_ioa_reset_attr,
2801 &ipr_update_fw_attr,
2802 &ipr_ioa_cache_attr,
2803 NULL,
2806 #ifdef CONFIG_SCSI_IPR_DUMP
2808 * ipr_read_dump - Dump the adapter
2809 * @kobj: kobject struct
2810 * @buf: buffer
2811 * @off: offset
2812 * @count: buffer size
2814 * Return value:
2815 * number of bytes printed to buffer
2817 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2818 loff_t off, size_t count)
2820 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2821 struct Scsi_Host *shost = class_to_shost(cdev);
2822 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2823 struct ipr_dump *dump;
2824 unsigned long lock_flags = 0;
2825 char *src;
2826 int len;
2827 size_t rc = count;
2829 if (!capable(CAP_SYS_ADMIN))
2830 return -EACCES;
2832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2833 dump = ioa_cfg->dump;
2835 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2836 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2837 return 0;
2839 kref_get(&dump->kref);
2840 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2842 if (off > dump->driver_dump.hdr.len) {
2843 kref_put(&dump->kref, ipr_release_dump);
2844 return 0;
2847 if (off + count > dump->driver_dump.hdr.len) {
2848 count = dump->driver_dump.hdr.len - off;
2849 rc = count;
2852 if (count && off < sizeof(dump->driver_dump)) {
2853 if (off + count > sizeof(dump->driver_dump))
2854 len = sizeof(dump->driver_dump) - off;
2855 else
2856 len = count;
2857 src = (u8 *)&dump->driver_dump + off;
2858 memcpy(buf, src, len);
2859 buf += len;
2860 off += len;
2861 count -= len;
2864 off -= sizeof(dump->driver_dump);
2866 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2867 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2868 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2869 else
2870 len = count;
2871 src = (u8 *)&dump->ioa_dump + off;
2872 memcpy(buf, src, len);
2873 buf += len;
2874 off += len;
2875 count -= len;
2878 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2880 while (count) {
2881 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2882 len = PAGE_ALIGN(off) - off;
2883 else
2884 len = count;
2885 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2886 src += off & ~PAGE_MASK;
2887 memcpy(buf, src, len);
2888 buf += len;
2889 off += len;
2890 count -= len;
2893 kref_put(&dump->kref, ipr_release_dump);
2894 return rc;
2898 * ipr_alloc_dump - Prepare for adapter dump
2899 * @ioa_cfg: ioa config struct
2901 * Return value:
2902 * 0 on success / other on failure
2904 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2906 struct ipr_dump *dump;
2907 unsigned long lock_flags = 0;
2909 ENTER;
2910 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2912 if (!dump) {
2913 ipr_err("Dump memory allocation failed\n");
2914 return -ENOMEM;
2917 kref_init(&dump->kref);
2918 dump->ioa_cfg = ioa_cfg;
2920 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2922 if (INACTIVE != ioa_cfg->sdt_state) {
2923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2924 kfree(dump);
2925 return 0;
2928 ioa_cfg->dump = dump;
2929 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2930 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2931 ioa_cfg->dump_taken = 1;
2932 schedule_work(&ioa_cfg->work_q);
2934 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2936 LEAVE;
2937 return 0;
2941 * ipr_free_dump - Free adapter dump memory
2942 * @ioa_cfg: ioa config struct
2944 * Return value:
2945 * 0 on success / other on failure
2947 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2949 struct ipr_dump *dump;
2950 unsigned long lock_flags = 0;
2952 ENTER;
2954 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2955 dump = ioa_cfg->dump;
2956 if (!dump) {
2957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2958 return 0;
2961 ioa_cfg->dump = NULL;
2962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964 kref_put(&dump->kref, ipr_release_dump);
2966 LEAVE;
2967 return 0;
2971 * ipr_write_dump - Setup dump state of adapter
2972 * @kobj: kobject struct
2973 * @buf: buffer
2974 * @off: offset
2975 * @count: buffer size
2977 * Return value:
2978 * number of bytes printed to buffer
2980 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2981 loff_t off, size_t count)
2983 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2984 struct Scsi_Host *shost = class_to_shost(cdev);
2985 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2986 int rc;
2988 if (!capable(CAP_SYS_ADMIN))
2989 return -EACCES;
2991 if (buf[0] == '1')
2992 rc = ipr_alloc_dump(ioa_cfg);
2993 else if (buf[0] == '0')
2994 rc = ipr_free_dump(ioa_cfg);
2995 else
2996 return -EINVAL;
2998 if (rc)
2999 return rc;
3000 else
3001 return count;
3004 static struct bin_attribute ipr_dump_attr = {
3005 .attr = {
3006 .name = "dump",
3007 .mode = S_IRUSR | S_IWUSR,
3009 .size = 0,
3010 .read = ipr_read_dump,
3011 .write = ipr_write_dump
3013 #else
3014 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3015 #endif
3018 * ipr_change_queue_depth - Change the device's queue depth
3019 * @sdev: scsi device struct
3020 * @qdepth: depth to set
3022 * Return value:
3023 * actual depth set
3025 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3027 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3028 return sdev->queue_depth;
3032 * ipr_change_queue_type - Change the device's queue type
3033 * @dsev: scsi device struct
3034 * @tag_type: type of tags to use
3036 * Return value:
3037 * actual queue type set
3039 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3041 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3042 struct ipr_resource_entry *res;
3043 unsigned long lock_flags = 0;
3045 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3046 res = (struct ipr_resource_entry *)sdev->hostdata;
3048 if (res) {
3049 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3051 * We don't bother quiescing the device here since the
3052 * adapter firmware does it for us.
3054 scsi_set_tag_type(sdev, tag_type);
3056 if (tag_type)
3057 scsi_activate_tcq(sdev, sdev->queue_depth);
3058 else
3059 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3060 } else
3061 tag_type = 0;
3062 } else
3063 tag_type = 0;
3065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066 return tag_type;
3070 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3071 * @dev: device struct
3072 * @buf: buffer
3074 * Return value:
3075 * number of bytes printed to buffer
3077 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3079 struct scsi_device *sdev = to_scsi_device(dev);
3080 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3081 struct ipr_resource_entry *res;
3082 unsigned long lock_flags = 0;
3083 ssize_t len = -ENXIO;
3085 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3086 res = (struct ipr_resource_entry *)sdev->hostdata;
3087 if (res)
3088 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3090 return len;
3093 static struct device_attribute ipr_adapter_handle_attr = {
3094 .attr = {
3095 .name = "adapter_handle",
3096 .mode = S_IRUSR,
3098 .show = ipr_show_adapter_handle
3101 static struct device_attribute *ipr_dev_attrs[] = {
3102 &ipr_adapter_handle_attr,
3103 NULL,
3107 * ipr_biosparam - Return the HSC mapping
3108 * @sdev: scsi device struct
3109 * @block_device: block device pointer
3110 * @capacity: capacity of the device
3111 * @parm: Array containing returned HSC values.
3113 * This function generates the HSC parms that fdisk uses.
3114 * We want to make sure we return something that places partitions
3115 * on 4k boundaries for best performance with the IOA.
3117 * Return value:
3118 * 0 on success
3120 static int ipr_biosparam(struct scsi_device *sdev,
3121 struct block_device *block_device,
3122 sector_t capacity, int *parm)
3124 int heads, sectors;
3125 sector_t cylinders;
3127 heads = 128;
3128 sectors = 32;
3130 cylinders = capacity;
3131 sector_div(cylinders, (128 * 32));
3133 /* return result */
3134 parm[0] = heads;
3135 parm[1] = sectors;
3136 parm[2] = cylinders;
3138 return 0;
3142 * ipr_slave_destroy - Unconfigure a SCSI device
3143 * @sdev: scsi device struct
3145 * Return value:
3146 * nothing
3148 static void ipr_slave_destroy(struct scsi_device *sdev)
3150 struct ipr_resource_entry *res;
3151 struct ipr_ioa_cfg *ioa_cfg;
3152 unsigned long lock_flags = 0;
3154 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3157 res = (struct ipr_resource_entry *) sdev->hostdata;
3158 if (res) {
3159 sdev->hostdata = NULL;
3160 res->sdev = NULL;
3162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3166 * ipr_slave_configure - Configure a SCSI device
3167 * @sdev: scsi device struct
3169 * This function configures the specified scsi device.
3171 * Return value:
3172 * 0 on success
3174 static int ipr_slave_configure(struct scsi_device *sdev)
3176 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3177 struct ipr_resource_entry *res;
3178 unsigned long lock_flags = 0;
3180 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3181 res = sdev->hostdata;
3182 if (res) {
3183 if (ipr_is_af_dasd_device(res))
3184 sdev->type = TYPE_RAID;
3185 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3186 sdev->scsi_level = 4;
3187 sdev->no_uld_attach = 1;
3189 if (ipr_is_vset_device(res)) {
3190 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3191 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3193 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3194 sdev->allow_restart = 1;
3195 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3198 return 0;
3202 * ipr_slave_alloc - Prepare for commands to a device.
3203 * @sdev: scsi device struct
3205 * This function saves a pointer to the resource entry
3206 * in the scsi device struct if the device exists. We
3207 * can then use this pointer in ipr_queuecommand when
3208 * handling new commands.
3210 * Return value:
3211 * 0 on success / -ENXIO if device does not exist
3213 static int ipr_slave_alloc(struct scsi_device *sdev)
3215 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3216 struct ipr_resource_entry *res;
3217 unsigned long lock_flags;
3218 int rc = -ENXIO;
3220 sdev->hostdata = NULL;
3222 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3225 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3226 (res->cfgte.res_addr.target == sdev->id) &&
3227 (res->cfgte.res_addr.lun == sdev->lun)) {
3228 res->sdev = sdev;
3229 res->add_to_ml = 0;
3230 res->in_erp = 0;
3231 sdev->hostdata = res;
3232 if (!ipr_is_naca_model(res))
3233 res->needs_sync_complete = 1;
3234 rc = 0;
3235 break;
3239 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241 return rc;
3245 * ipr_eh_host_reset - Reset the host adapter
3246 * @scsi_cmd: scsi command struct
3248 * Return value:
3249 * SUCCESS / FAILED
3251 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3253 struct ipr_ioa_cfg *ioa_cfg;
3254 int rc;
3256 ENTER;
3257 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3259 dev_err(&ioa_cfg->pdev->dev,
3260 "Adapter being reset as a result of error recovery.\n");
3262 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3263 ioa_cfg->sdt_state = GET_DUMP;
3265 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3267 LEAVE;
3268 return rc;
3271 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3273 int rc;
3275 spin_lock_irq(cmd->device->host->host_lock);
3276 rc = __ipr_eh_host_reset(cmd);
3277 spin_unlock_irq(cmd->device->host->host_lock);
3279 return rc;
3283 * ipr_device_reset - Reset the device
3284 * @ioa_cfg: ioa config struct
3285 * @res: resource entry struct
3287 * This function issues a device reset to the affected device.
3288 * If the device is a SCSI device, a LUN reset will be sent
3289 * to the device first. If that does not work, a target reset
3290 * will be sent.
3292 * Return value:
3293 * 0 on success / non-zero on failure
3295 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3296 struct ipr_resource_entry *res)
3298 struct ipr_cmnd *ipr_cmd;
3299 struct ipr_ioarcb *ioarcb;
3300 struct ipr_cmd_pkt *cmd_pkt;
3301 u32 ioasc;
3303 ENTER;
3304 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3305 ioarcb = &ipr_cmd->ioarcb;
3306 cmd_pkt = &ioarcb->cmd_pkt;
3308 ioarcb->res_handle = res->cfgte.res_handle;
3309 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3310 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3312 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3313 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3314 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3316 LEAVE;
3317 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3321 * ipr_eh_dev_reset - Reset the device
3322 * @scsi_cmd: scsi command struct
3324 * This function issues a device reset to the affected device.
3325 * A LUN reset will be sent to the device first. If that does
3326 * not work, a target reset will be sent.
3328 * Return value:
3329 * SUCCESS / FAILED
3331 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3333 struct ipr_cmnd *ipr_cmd;
3334 struct ipr_ioa_cfg *ioa_cfg;
3335 struct ipr_resource_entry *res;
3336 int rc;
3338 ENTER;
3339 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3340 res = scsi_cmd->device->hostdata;
3342 if (!res)
3343 return FAILED;
3346 * If we are currently going through reset/reload, return failed. This will force the
3347 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3348 * reset to complete
3350 if (ioa_cfg->in_reset_reload)
3351 return FAILED;
3352 if (ioa_cfg->ioa_is_dead)
3353 return FAILED;
3355 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3356 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3357 if (ipr_cmd->scsi_cmd)
3358 ipr_cmd->done = ipr_scsi_eh_done;
3362 res->resetting_device = 1;
3363 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3364 rc = ipr_device_reset(ioa_cfg, res);
3365 res->resetting_device = 0;
3367 LEAVE;
3368 return (rc ? FAILED : SUCCESS);
3371 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3373 int rc;
3375 spin_lock_irq(cmd->device->host->host_lock);
3376 rc = __ipr_eh_dev_reset(cmd);
3377 spin_unlock_irq(cmd->device->host->host_lock);
3379 return rc;
3383 * ipr_bus_reset_done - Op done function for bus reset.
3384 * @ipr_cmd: ipr command struct
3386 * This function is the op done function for a bus reset
3388 * Return value:
3389 * none
3391 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3393 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3394 struct ipr_resource_entry *res;
3396 ENTER;
3397 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3398 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3399 sizeof(res->cfgte.res_handle))) {
3400 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3401 break;
3406 * If abort has not completed, indicate the reset has, else call the
3407 * abort's done function to wake the sleeping eh thread
3409 if (ipr_cmd->sibling->sibling)
3410 ipr_cmd->sibling->sibling = NULL;
3411 else
3412 ipr_cmd->sibling->done(ipr_cmd->sibling);
3414 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3415 LEAVE;
3419 * ipr_abort_timeout - An abort task has timed out
3420 * @ipr_cmd: ipr command struct
3422 * This function handles when an abort task times out. If this
3423 * happens we issue a bus reset since we have resources tied
3424 * up that must be freed before returning to the midlayer.
3426 * Return value:
3427 * none
3429 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3431 struct ipr_cmnd *reset_cmd;
3432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3433 struct ipr_cmd_pkt *cmd_pkt;
3434 unsigned long lock_flags = 0;
3436 ENTER;
3437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3438 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440 return;
3443 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3444 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3445 ipr_cmd->sibling = reset_cmd;
3446 reset_cmd->sibling = ipr_cmd;
3447 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3448 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3449 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3450 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3451 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3453 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3454 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455 LEAVE;
3459 * ipr_cancel_op - Cancel specified op
3460 * @scsi_cmd: scsi command struct
3462 * This function cancels specified op.
3464 * Return value:
3465 * SUCCESS / FAILED
3467 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3469 struct ipr_cmnd *ipr_cmd;
3470 struct ipr_ioa_cfg *ioa_cfg;
3471 struct ipr_resource_entry *res;
3472 struct ipr_cmd_pkt *cmd_pkt;
3473 u32 ioasc;
3474 int op_found = 0;
3476 ENTER;
3477 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3478 res = scsi_cmd->device->hostdata;
3480 /* If we are currently going through reset/reload, return failed.
3481 * This will force the mid-layer to call ipr_eh_host_reset,
3482 * which will then go to sleep and wait for the reset to complete
3484 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3485 return FAILED;
3486 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3487 return FAILED;
3489 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3490 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3491 ipr_cmd->done = ipr_scsi_eh_done;
3492 op_found = 1;
3493 break;
3497 if (!op_found)
3498 return SUCCESS;
3500 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3501 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3502 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3503 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3504 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3505 ipr_cmd->u.sdev = scsi_cmd->device;
3507 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3508 scsi_cmd->cmnd[0]);
3509 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3510 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3513 * If the abort task timed out and we sent a bus reset, we will get
3514 * one the following responses to the abort
3516 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3517 ioasc = 0;
3518 ipr_trace;
3521 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3522 if (!ipr_is_naca_model(res))
3523 res->needs_sync_complete = 1;
3525 LEAVE;
3526 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3530 * ipr_eh_abort - Abort a single op
3531 * @scsi_cmd: scsi command struct
3533 * Return value:
3534 * SUCCESS / FAILED
3536 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3538 unsigned long flags;
3539 int rc;
3541 ENTER;
3543 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3544 rc = ipr_cancel_op(scsi_cmd);
3545 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3547 LEAVE;
3548 return rc;
3552 * ipr_handle_other_interrupt - Handle "other" interrupts
3553 * @ioa_cfg: ioa config struct
3554 * @int_reg: interrupt register
3556 * Return value:
3557 * IRQ_NONE / IRQ_HANDLED
3559 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3560 volatile u32 int_reg)
3562 irqreturn_t rc = IRQ_HANDLED;
3564 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3565 /* Mask the interrupt */
3566 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3568 /* Clear the interrupt */
3569 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3570 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3572 list_del(&ioa_cfg->reset_cmd->queue);
3573 del_timer(&ioa_cfg->reset_cmd->timer);
3574 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3575 } else {
3576 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3577 ioa_cfg->ioa_unit_checked = 1;
3578 else
3579 dev_err(&ioa_cfg->pdev->dev,
3580 "Permanent IOA failure. 0x%08X\n", int_reg);
3582 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3583 ioa_cfg->sdt_state = GET_DUMP;
3585 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3586 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3589 return rc;
3593 * ipr_isr - Interrupt service routine
3594 * @irq: irq number
3595 * @devp: pointer to ioa config struct
3596 * @regs: pt_regs struct
3598 * Return value:
3599 * IRQ_NONE / IRQ_HANDLED
3601 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3604 unsigned long lock_flags = 0;
3605 volatile u32 int_reg, int_mask_reg;
3606 u32 ioasc;
3607 u16 cmd_index;
3608 struct ipr_cmnd *ipr_cmd;
3609 irqreturn_t rc = IRQ_NONE;
3611 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3613 /* If interrupts are disabled, ignore the interrupt */
3614 if (!ioa_cfg->allow_interrupts) {
3615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616 return IRQ_NONE;
3619 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3620 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3622 /* If an interrupt on the adapter did not occur, ignore it */
3623 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3624 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3625 return IRQ_NONE;
3628 while (1) {
3629 ipr_cmd = NULL;
3631 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3632 ioa_cfg->toggle_bit) {
3634 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3635 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3637 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3638 ioa_cfg->errors_logged++;
3639 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3641 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3642 ioa_cfg->sdt_state = GET_DUMP;
3644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3646 return IRQ_HANDLED;
3649 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3651 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3653 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3655 list_del(&ipr_cmd->queue);
3656 del_timer(&ipr_cmd->timer);
3657 ipr_cmd->done(ipr_cmd);
3659 rc = IRQ_HANDLED;
3661 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3662 ioa_cfg->hrrq_curr++;
3663 } else {
3664 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3665 ioa_cfg->toggle_bit ^= 1u;
3669 if (ipr_cmd != NULL) {
3670 /* Clear the PCI interrupt */
3671 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3672 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3673 } else
3674 break;
3677 if (unlikely(rc == IRQ_NONE))
3678 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3681 return rc;
3685 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3686 * @ioa_cfg: ioa config struct
3687 * @ipr_cmd: ipr command struct
3689 * Return value:
3690 * 0 on success / -1 on failure
3692 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3693 struct ipr_cmnd *ipr_cmd)
3695 int i;
3696 struct scatterlist *sglist;
3697 u32 length;
3698 u32 ioadl_flags = 0;
3699 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3700 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3701 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3703 length = scsi_cmd->request_bufflen;
3705 if (length == 0)
3706 return 0;
3708 if (scsi_cmd->use_sg) {
3709 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3710 scsi_cmd->request_buffer,
3711 scsi_cmd->use_sg,
3712 scsi_cmd->sc_data_direction);
3714 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3715 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3716 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3717 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3718 ioarcb->write_ioadl_len =
3719 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3720 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3721 ioadl_flags = IPR_IOADL_FLAGS_READ;
3722 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3723 ioarcb->read_ioadl_len =
3724 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3727 sglist = scsi_cmd->request_buffer;
3729 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3730 ioadl[i].flags_and_data_len =
3731 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3732 ioadl[i].address =
3733 cpu_to_be32(sg_dma_address(&sglist[i]));
3736 if (likely(ipr_cmd->dma_use_sg)) {
3737 ioadl[i-1].flags_and_data_len |=
3738 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3739 return 0;
3740 } else
3741 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3742 } else {
3743 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3744 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3745 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3746 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3747 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3748 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3749 ioadl_flags = IPR_IOADL_FLAGS_READ;
3750 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3751 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3754 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3755 scsi_cmd->request_buffer, length,
3756 scsi_cmd->sc_data_direction);
3758 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3759 ipr_cmd->dma_use_sg = 1;
3760 ioadl[0].flags_and_data_len =
3761 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3762 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3763 return 0;
3764 } else
3765 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3768 return -1;
3772 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3773 * @scsi_cmd: scsi command struct
3775 * Return value:
3776 * task attributes
3778 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3780 u8 tag[2];
3781 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3783 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3784 switch (tag[0]) {
3785 case MSG_SIMPLE_TAG:
3786 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3787 break;
3788 case MSG_HEAD_TAG:
3789 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3790 break;
3791 case MSG_ORDERED_TAG:
3792 rc = IPR_FLAGS_LO_ORDERED_TASK;
3793 break;
3797 return rc;
3801 * ipr_erp_done - Process completion of ERP for a device
3802 * @ipr_cmd: ipr command struct
3804 * This function copies the sense buffer into the scsi_cmd
3805 * struct and pushes the scsi_done function.
3807 * Return value:
3808 * nothing
3810 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3812 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3813 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3814 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3815 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3817 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3818 scsi_cmd->result |= (DID_ERROR << 16);
3819 scmd_printk(KERN_ERR, scsi_cmd,
3820 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3821 } else {
3822 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3823 SCSI_SENSE_BUFFERSIZE);
3826 if (res) {
3827 if (!ipr_is_naca_model(res))
3828 res->needs_sync_complete = 1;
3829 res->in_erp = 0;
3831 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3832 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3833 scsi_cmd->scsi_done(scsi_cmd);
3837 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3838 * @ipr_cmd: ipr command struct
3840 * Return value:
3841 * none
3843 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3845 struct ipr_ioarcb *ioarcb;
3846 struct ipr_ioasa *ioasa;
3848 ioarcb = &ipr_cmd->ioarcb;
3849 ioasa = &ipr_cmd->ioasa;
3851 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3852 ioarcb->write_data_transfer_length = 0;
3853 ioarcb->read_data_transfer_length = 0;
3854 ioarcb->write_ioadl_len = 0;
3855 ioarcb->read_ioadl_len = 0;
3856 ioasa->ioasc = 0;
3857 ioasa->residual_data_len = 0;
3861 * ipr_erp_request_sense - Send request sense to a device
3862 * @ipr_cmd: ipr command struct
3864 * This function sends a request sense to a device as a result
3865 * of a check condition.
3867 * Return value:
3868 * nothing
3870 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3872 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3873 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3875 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3876 ipr_erp_done(ipr_cmd);
3877 return;
3880 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3882 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3883 cmd_pkt->cdb[0] = REQUEST_SENSE;
3884 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3885 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3886 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3887 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3889 ipr_cmd->ioadl[0].flags_and_data_len =
3890 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3891 ipr_cmd->ioadl[0].address =
3892 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3894 ipr_cmd->ioarcb.read_ioadl_len =
3895 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3896 ipr_cmd->ioarcb.read_data_transfer_length =
3897 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3899 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3900 IPR_REQUEST_SENSE_TIMEOUT * 2);
3904 * ipr_erp_cancel_all - Send cancel all to a device
3905 * @ipr_cmd: ipr command struct
3907 * This function sends a cancel all to a device to clear the
3908 * queue. If we are running TCQ on the device, QERR is set to 1,
3909 * which means all outstanding ops have been dropped on the floor.
3910 * Cancel all will return them to us.
3912 * Return value:
3913 * nothing
3915 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3917 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3918 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3919 struct ipr_cmd_pkt *cmd_pkt;
3921 res->in_erp = 1;
3923 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3925 if (!scsi_get_tag_type(scsi_cmd->device)) {
3926 ipr_erp_request_sense(ipr_cmd);
3927 return;
3930 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3931 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3932 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3934 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3935 IPR_CANCEL_ALL_TIMEOUT);
3939 * ipr_dump_ioasa - Dump contents of IOASA
3940 * @ioa_cfg: ioa config struct
3941 * @ipr_cmd: ipr command struct
3942 * @res: resource entry struct
3944 * This function is invoked by the interrupt handler when ops
3945 * fail. It will log the IOASA if appropriate. Only called
3946 * for GPDD ops.
3948 * Return value:
3949 * none
3951 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3952 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
3954 int i;
3955 u16 data_len;
3956 u32 ioasc;
3957 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3958 __be32 *ioasa_data = (__be32 *)ioasa;
3959 int error_index;
3961 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3963 if (0 == ioasc)
3964 return;
3966 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3967 return;
3969 error_index = ipr_get_error(ioasc);
3971 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3972 /* Don't log an error if the IOA already logged one */
3973 if (ioasa->ilid != 0)
3974 return;
3976 if (ipr_error_table[error_index].log_ioasa == 0)
3977 return;
3980 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
3982 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3983 data_len = sizeof(struct ipr_ioasa);
3984 else
3985 data_len = be16_to_cpu(ioasa->ret_stat_len);
3987 ipr_err("IOASA Dump:\n");
3989 for (i = 0; i < data_len / 4; i += 4) {
3990 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3991 be32_to_cpu(ioasa_data[i]),
3992 be32_to_cpu(ioasa_data[i+1]),
3993 be32_to_cpu(ioasa_data[i+2]),
3994 be32_to_cpu(ioasa_data[i+3]));
3999 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4000 * @ioasa: IOASA
4001 * @sense_buf: sense data buffer
4003 * Return value:
4004 * none
4006 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4008 u32 failing_lba;
4009 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4010 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4011 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4012 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4014 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4016 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4017 return;
4019 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4021 if (ipr_is_vset_device(res) &&
4022 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4023 ioasa->u.vset.failing_lba_hi != 0) {
4024 sense_buf[0] = 0x72;
4025 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4026 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4027 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4029 sense_buf[7] = 12;
4030 sense_buf[8] = 0;
4031 sense_buf[9] = 0x0A;
4032 sense_buf[10] = 0x80;
4034 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4036 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4037 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4038 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4039 sense_buf[15] = failing_lba & 0x000000ff;
4041 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4043 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4044 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4045 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4046 sense_buf[19] = failing_lba & 0x000000ff;
4047 } else {
4048 sense_buf[0] = 0x70;
4049 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4050 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4051 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4053 /* Illegal request */
4054 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4055 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4056 sense_buf[7] = 10; /* additional length */
4058 /* IOARCB was in error */
4059 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4060 sense_buf[15] = 0xC0;
4061 else /* Parameter data was invalid */
4062 sense_buf[15] = 0x80;
4064 sense_buf[16] =
4065 ((IPR_FIELD_POINTER_MASK &
4066 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4067 sense_buf[17] =
4068 (IPR_FIELD_POINTER_MASK &
4069 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4070 } else {
4071 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4072 if (ipr_is_vset_device(res))
4073 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4074 else
4075 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4077 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4078 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4079 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4080 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4081 sense_buf[6] = failing_lba & 0x000000ff;
4084 sense_buf[7] = 6; /* additional length */
4090 * ipr_get_autosense - Copy autosense data to sense buffer
4091 * @ipr_cmd: ipr command struct
4093 * This function copies the autosense buffer to the buffer
4094 * in the scsi_cmd, if there is autosense available.
4096 * Return value:
4097 * 1 if autosense was available / 0 if not
4099 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4101 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4103 if ((be32_to_cpu(ioasa->ioasc_specific) &
4104 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4105 return 0;
4107 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4108 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4109 SCSI_SENSE_BUFFERSIZE));
4110 return 1;
4114 * ipr_erp_start - Process an error response for a SCSI op
4115 * @ioa_cfg: ioa config struct
4116 * @ipr_cmd: ipr command struct
4118 * This function determines whether or not to initiate ERP
4119 * on the affected device.
4121 * Return value:
4122 * nothing
4124 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4125 struct ipr_cmnd *ipr_cmd)
4127 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4128 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4129 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4131 if (!res) {
4132 ipr_scsi_eh_done(ipr_cmd);
4133 return;
4136 if (ipr_is_gscsi(res))
4137 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4138 else
4139 ipr_gen_sense(ipr_cmd);
4141 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4142 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4143 if (ipr_is_naca_model(res))
4144 scsi_cmd->result |= (DID_ABORT << 16);
4145 else
4146 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4147 break;
4148 case IPR_IOASC_IR_RESOURCE_HANDLE:
4149 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4150 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4151 break;
4152 case IPR_IOASC_HW_SEL_TIMEOUT:
4153 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4154 if (!ipr_is_naca_model(res))
4155 res->needs_sync_complete = 1;
4156 break;
4157 case IPR_IOASC_SYNC_REQUIRED:
4158 if (!res->in_erp)
4159 res->needs_sync_complete = 1;
4160 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4161 break;
4162 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4163 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4164 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4165 break;
4166 case IPR_IOASC_BUS_WAS_RESET:
4167 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4169 * Report the bus reset and ask for a retry. The device
4170 * will give CC/UA the next command.
4172 if (!res->resetting_device)
4173 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4174 scsi_cmd->result |= (DID_ERROR << 16);
4175 if (!ipr_is_naca_model(res))
4176 res->needs_sync_complete = 1;
4177 break;
4178 case IPR_IOASC_HW_DEV_BUS_STATUS:
4179 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4180 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4181 if (!ipr_get_autosense(ipr_cmd)) {
4182 if (!ipr_is_naca_model(res)) {
4183 ipr_erp_cancel_all(ipr_cmd);
4184 return;
4188 if (!ipr_is_naca_model(res))
4189 res->needs_sync_complete = 1;
4190 break;
4191 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4192 break;
4193 default:
4194 scsi_cmd->result |= (DID_ERROR << 16);
4195 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4196 res->needs_sync_complete = 1;
4197 break;
4200 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4201 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4202 scsi_cmd->scsi_done(scsi_cmd);
4206 * ipr_scsi_done - mid-layer done function
4207 * @ipr_cmd: ipr command struct
4209 * This function is invoked by the interrupt handler for
4210 * ops generated by the SCSI mid-layer
4212 * Return value:
4213 * none
4215 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4217 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4218 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4219 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4221 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4223 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4224 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4225 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4226 scsi_cmd->scsi_done(scsi_cmd);
4227 } else
4228 ipr_erp_start(ioa_cfg, ipr_cmd);
4232 * ipr_queuecommand - Queue a mid-layer request
4233 * @scsi_cmd: scsi command struct
4234 * @done: done function
4236 * This function queues a request generated by the mid-layer.
4238 * Return value:
4239 * 0 on success
4240 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4241 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4243 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4244 void (*done) (struct scsi_cmnd *))
4246 struct ipr_ioa_cfg *ioa_cfg;
4247 struct ipr_resource_entry *res;
4248 struct ipr_ioarcb *ioarcb;
4249 struct ipr_cmnd *ipr_cmd;
4250 int rc = 0;
4252 scsi_cmd->scsi_done = done;
4253 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4254 res = scsi_cmd->device->hostdata;
4255 scsi_cmd->result = (DID_OK << 16);
4258 * We are currently blocking all devices due to a host reset
4259 * We have told the host to stop giving us new requests, but
4260 * ERP ops don't count. FIXME
4262 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4263 return SCSI_MLQUEUE_HOST_BUSY;
4266 * FIXME - Create scsi_set_host_offline interface
4267 * and the ioa_is_dead check can be removed
4269 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4270 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4271 scsi_cmd->result = (DID_NO_CONNECT << 16);
4272 scsi_cmd->scsi_done(scsi_cmd);
4273 return 0;
4276 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4277 ioarcb = &ipr_cmd->ioarcb;
4278 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4280 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4281 ipr_cmd->scsi_cmd = scsi_cmd;
4282 ioarcb->res_handle = res->cfgte.res_handle;
4283 ipr_cmd->done = ipr_scsi_done;
4284 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4286 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4287 if (scsi_cmd->underflow == 0)
4288 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4290 if (res->needs_sync_complete) {
4291 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4292 res->needs_sync_complete = 0;
4295 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4296 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4297 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4298 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4301 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4302 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4303 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4305 if (likely(rc == 0))
4306 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4308 if (likely(rc == 0)) {
4309 mb();
4310 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4311 ioa_cfg->regs.ioarrin_reg);
4312 } else {
4313 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4314 return SCSI_MLQUEUE_HOST_BUSY;
4317 return 0;
4321 * ipr_info - Get information about the card/driver
4322 * @scsi_host: scsi host struct
4324 * Return value:
4325 * pointer to buffer with description string
4327 static const char * ipr_ioa_info(struct Scsi_Host *host)
4329 static char buffer[512];
4330 struct ipr_ioa_cfg *ioa_cfg;
4331 unsigned long lock_flags = 0;
4333 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4335 spin_lock_irqsave(host->host_lock, lock_flags);
4336 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4337 spin_unlock_irqrestore(host->host_lock, lock_flags);
4339 return buffer;
4342 static struct scsi_host_template driver_template = {
4343 .module = THIS_MODULE,
4344 .name = "IPR",
4345 .info = ipr_ioa_info,
4346 .queuecommand = ipr_queuecommand,
4347 .eh_abort_handler = ipr_eh_abort,
4348 .eh_device_reset_handler = ipr_eh_dev_reset,
4349 .eh_host_reset_handler = ipr_eh_host_reset,
4350 .slave_alloc = ipr_slave_alloc,
4351 .slave_configure = ipr_slave_configure,
4352 .slave_destroy = ipr_slave_destroy,
4353 .change_queue_depth = ipr_change_queue_depth,
4354 .change_queue_type = ipr_change_queue_type,
4355 .bios_param = ipr_biosparam,
4356 .can_queue = IPR_MAX_COMMANDS,
4357 .this_id = -1,
4358 .sg_tablesize = IPR_MAX_SGLIST,
4359 .max_sectors = IPR_IOA_MAX_SECTORS,
4360 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4361 .use_clustering = ENABLE_CLUSTERING,
4362 .shost_attrs = ipr_ioa_attrs,
4363 .sdev_attrs = ipr_dev_attrs,
4364 .proc_name = IPR_NAME
4367 #ifdef CONFIG_PPC_PSERIES
4368 static const u16 ipr_blocked_processors[] = {
4369 PV_NORTHSTAR,
4370 PV_PULSAR,
4371 PV_POWER4,
4372 PV_ICESTAR,
4373 PV_SSTAR,
4374 PV_POWER4p,
4375 PV_630,
4376 PV_630p
4380 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4381 * @ioa_cfg: ioa cfg struct
4383 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4384 * certain pSeries hardware. This function determines if the given
4385 * adapter is in one of these confgurations or not.
4387 * Return value:
4388 * 1 if adapter is not supported / 0 if adapter is supported
4390 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4392 u8 rev_id;
4393 int i;
4395 if (ioa_cfg->type == 0x5702) {
4396 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4397 &rev_id) == PCIBIOS_SUCCESSFUL) {
4398 if (rev_id < 4) {
4399 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4400 if (__is_processor(ipr_blocked_processors[i]))
4401 return 1;
4406 return 0;
4408 #else
4409 #define ipr_invalid_adapter(ioa_cfg) 0
4410 #endif
4413 * ipr_ioa_bringdown_done - IOA bring down completion.
4414 * @ipr_cmd: ipr command struct
4416 * This function processes the completion of an adapter bring down.
4417 * It wakes any reset sleepers.
4419 * Return value:
4420 * IPR_RC_JOB_RETURN
4422 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4426 ENTER;
4427 ioa_cfg->in_reset_reload = 0;
4428 ioa_cfg->reset_retries = 0;
4429 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4430 wake_up_all(&ioa_cfg->reset_wait_q);
4432 spin_unlock_irq(ioa_cfg->host->host_lock);
4433 scsi_unblock_requests(ioa_cfg->host);
4434 spin_lock_irq(ioa_cfg->host->host_lock);
4435 LEAVE;
4437 return IPR_RC_JOB_RETURN;
4441 * ipr_ioa_reset_done - IOA reset completion.
4442 * @ipr_cmd: ipr command struct
4444 * This function processes the completion of an adapter reset.
4445 * It schedules any necessary mid-layer add/removes and
4446 * wakes any reset sleepers.
4448 * Return value:
4449 * IPR_RC_JOB_RETURN
4451 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4454 struct ipr_resource_entry *res;
4455 struct ipr_hostrcb *hostrcb, *temp;
4456 int i = 0;
4458 ENTER;
4459 ioa_cfg->in_reset_reload = 0;
4460 ioa_cfg->allow_cmds = 1;
4461 ioa_cfg->reset_cmd = NULL;
4462 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4464 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4465 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4466 ipr_trace;
4467 break;
4470 schedule_work(&ioa_cfg->work_q);
4472 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4473 list_del(&hostrcb->queue);
4474 if (i++ < IPR_NUM_LOG_HCAMS)
4475 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4476 else
4477 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4480 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4482 ioa_cfg->reset_retries = 0;
4483 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4484 wake_up_all(&ioa_cfg->reset_wait_q);
4486 spin_unlock_irq(ioa_cfg->host->host_lock);
4487 scsi_unblock_requests(ioa_cfg->host);
4488 spin_lock_irq(ioa_cfg->host->host_lock);
4490 if (!ioa_cfg->allow_cmds)
4491 scsi_block_requests(ioa_cfg->host);
4493 LEAVE;
4494 return IPR_RC_JOB_RETURN;
4498 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4499 * @supported_dev: supported device struct
4500 * @vpids: vendor product id struct
4502 * Return value:
4503 * none
4505 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4506 struct ipr_std_inq_vpids *vpids)
4508 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4509 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4510 supported_dev->num_records = 1;
4511 supported_dev->data_length =
4512 cpu_to_be16(sizeof(struct ipr_supported_device));
4513 supported_dev->reserved = 0;
4517 * ipr_set_supported_devs - Send Set Supported Devices for a device
4518 * @ipr_cmd: ipr command struct
4520 * This function send a Set Supported Devices to the adapter
4522 * Return value:
4523 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4525 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4527 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4528 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4529 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4530 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4531 struct ipr_resource_entry *res = ipr_cmd->u.res;
4533 ipr_cmd->job_step = ipr_ioa_reset_done;
4535 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4536 if (!ipr_is_scsi_disk(res))
4537 continue;
4539 ipr_cmd->u.res = res;
4540 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4542 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4543 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4544 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4546 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4547 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4548 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4550 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4551 sizeof(struct ipr_supported_device));
4552 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4553 offsetof(struct ipr_misc_cbs, supp_dev));
4554 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4555 ioarcb->write_data_transfer_length =
4556 cpu_to_be32(sizeof(struct ipr_supported_device));
4558 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4559 IPR_SET_SUP_DEVICE_TIMEOUT);
4561 ipr_cmd->job_step = ipr_set_supported_devs;
4562 return IPR_RC_JOB_RETURN;
4565 return IPR_RC_JOB_CONTINUE;
4569 * ipr_setup_write_cache - Disable write cache if needed
4570 * @ipr_cmd: ipr command struct
4572 * This function sets up adapters write cache to desired setting
4574 * Return value:
4575 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4577 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4581 ipr_cmd->job_step = ipr_set_supported_devs;
4582 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4583 struct ipr_resource_entry, queue);
4585 if (ioa_cfg->cache_state != CACHE_DISABLED)
4586 return IPR_RC_JOB_CONTINUE;
4588 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4589 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4590 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4591 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4593 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4595 return IPR_RC_JOB_RETURN;
4599 * ipr_get_mode_page - Locate specified mode page
4600 * @mode_pages: mode page buffer
4601 * @page_code: page code to find
4602 * @len: minimum required length for mode page
4604 * Return value:
4605 * pointer to mode page / NULL on failure
4607 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4608 u32 page_code, u32 len)
4610 struct ipr_mode_page_hdr *mode_hdr;
4611 u32 page_length;
4612 u32 length;
4614 if (!mode_pages || (mode_pages->hdr.length == 0))
4615 return NULL;
4617 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4618 mode_hdr = (struct ipr_mode_page_hdr *)
4619 (mode_pages->data + mode_pages->hdr.block_desc_len);
4621 while (length) {
4622 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4623 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4624 return mode_hdr;
4625 break;
4626 } else {
4627 page_length = (sizeof(struct ipr_mode_page_hdr) +
4628 mode_hdr->page_length);
4629 length -= page_length;
4630 mode_hdr = (struct ipr_mode_page_hdr *)
4631 ((unsigned long)mode_hdr + page_length);
4634 return NULL;
4638 * ipr_check_term_power - Check for term power errors
4639 * @ioa_cfg: ioa config struct
4640 * @mode_pages: IOAFP mode pages buffer
4642 * Check the IOAFP's mode page 28 for term power errors
4644 * Return value:
4645 * nothing
4647 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4648 struct ipr_mode_pages *mode_pages)
4650 int i;
4651 int entry_length;
4652 struct ipr_dev_bus_entry *bus;
4653 struct ipr_mode_page28 *mode_page;
4655 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4656 sizeof(struct ipr_mode_page28));
4658 entry_length = mode_page->entry_length;
4660 bus = mode_page->bus;
4662 for (i = 0; i < mode_page->num_entries; i++) {
4663 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4664 dev_err(&ioa_cfg->pdev->dev,
4665 "Term power is absent on scsi bus %d\n",
4666 bus->res_addr.bus);
4669 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4674 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4675 * @ioa_cfg: ioa config struct
4677 * Looks through the config table checking for SES devices. If
4678 * the SES device is in the SES table indicating a maximum SCSI
4679 * bus speed, the speed is limited for the bus.
4681 * Return value:
4682 * none
4684 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4686 u32 max_xfer_rate;
4687 int i;
4689 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4690 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4691 ioa_cfg->bus_attr[i].bus_width);
4693 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4694 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4699 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4700 * @ioa_cfg: ioa config struct
4701 * @mode_pages: mode page 28 buffer
4703 * Updates mode page 28 based on driver configuration
4705 * Return value:
4706 * none
4708 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4709 struct ipr_mode_pages *mode_pages)
4711 int i, entry_length;
4712 struct ipr_dev_bus_entry *bus;
4713 struct ipr_bus_attributes *bus_attr;
4714 struct ipr_mode_page28 *mode_page;
4716 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4717 sizeof(struct ipr_mode_page28));
4719 entry_length = mode_page->entry_length;
4721 /* Loop for each device bus entry */
4722 for (i = 0, bus = mode_page->bus;
4723 i < mode_page->num_entries;
4724 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4725 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4726 dev_err(&ioa_cfg->pdev->dev,
4727 "Invalid resource address reported: 0x%08X\n",
4728 IPR_GET_PHYS_LOC(bus->res_addr));
4729 continue;
4732 bus_attr = &ioa_cfg->bus_attr[i];
4733 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4734 bus->bus_width = bus_attr->bus_width;
4735 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4736 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4737 if (bus_attr->qas_enabled)
4738 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4739 else
4740 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4745 * ipr_build_mode_select - Build a mode select command
4746 * @ipr_cmd: ipr command struct
4747 * @res_handle: resource handle to send command to
4748 * @parm: Byte 2 of Mode Sense command
4749 * @dma_addr: DMA buffer address
4750 * @xfer_len: data transfer length
4752 * Return value:
4753 * none
4755 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4756 __be32 res_handle, u8 parm, u32 dma_addr,
4757 u8 xfer_len)
4759 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4760 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4762 ioarcb->res_handle = res_handle;
4763 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4764 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4765 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4766 ioarcb->cmd_pkt.cdb[1] = parm;
4767 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4769 ioadl->flags_and_data_len =
4770 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4771 ioadl->address = cpu_to_be32(dma_addr);
4772 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4773 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4777 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4778 * @ipr_cmd: ipr command struct
4780 * This function sets up the SCSI bus attributes and sends
4781 * a Mode Select for Page 28 to activate them.
4783 * Return value:
4784 * IPR_RC_JOB_RETURN
4786 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4789 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4790 int length;
4792 ENTER;
4793 ipr_scsi_bus_speed_limit(ioa_cfg);
4794 ipr_check_term_power(ioa_cfg, mode_pages);
4795 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4796 length = mode_pages->hdr.length + 1;
4797 mode_pages->hdr.length = 0;
4799 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4800 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4801 length);
4803 ipr_cmd->job_step = ipr_setup_write_cache;
4804 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4806 LEAVE;
4807 return IPR_RC_JOB_RETURN;
4811 * ipr_build_mode_sense - Builds a mode sense command
4812 * @ipr_cmd: ipr command struct
4813 * @res: resource entry struct
4814 * @parm: Byte 2 of mode sense command
4815 * @dma_addr: DMA address of mode sense buffer
4816 * @xfer_len: Size of DMA buffer
4818 * Return value:
4819 * none
4821 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4822 __be32 res_handle,
4823 u8 parm, u32 dma_addr, u8 xfer_len)
4825 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4826 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4828 ioarcb->res_handle = res_handle;
4829 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4830 ioarcb->cmd_pkt.cdb[2] = parm;
4831 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4832 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4834 ioadl->flags_and_data_len =
4835 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4836 ioadl->address = cpu_to_be32(dma_addr);
4837 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4838 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4842 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4843 * @ipr_cmd: ipr command struct
4845 * This function handles the failure of an IOA bringup command.
4847 * Return value:
4848 * IPR_RC_JOB_RETURN
4850 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4852 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4853 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4855 dev_err(&ioa_cfg->pdev->dev,
4856 "0x%02X failed with IOASC: 0x%08X\n",
4857 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4859 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4860 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4861 return IPR_RC_JOB_RETURN;
4865 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4866 * @ipr_cmd: ipr command struct
4868 * This function handles the failure of a Mode Sense to the IOAFP.
4869 * Some adapters do not handle all mode pages.
4871 * Return value:
4872 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4874 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4876 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4878 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4879 ipr_cmd->job_step = ipr_setup_write_cache;
4880 return IPR_RC_JOB_CONTINUE;
4883 return ipr_reset_cmd_failed(ipr_cmd);
4887 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4888 * @ipr_cmd: ipr command struct
4890 * This function send a Page 28 mode sense to the IOA to
4891 * retrieve SCSI bus attributes.
4893 * Return value:
4894 * IPR_RC_JOB_RETURN
4896 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4900 ENTER;
4901 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4902 0x28, ioa_cfg->vpd_cbs_dma +
4903 offsetof(struct ipr_misc_cbs, mode_pages),
4904 sizeof(struct ipr_mode_pages));
4906 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4907 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4909 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4911 LEAVE;
4912 return IPR_RC_JOB_RETURN;
4916 * ipr_init_res_table - Initialize the resource table
4917 * @ipr_cmd: ipr command struct
4919 * This function looks through the existing resource table, comparing
4920 * it with the config table. This function will take care of old/new
4921 * devices and schedule adding/removing them from the mid-layer
4922 * as appropriate.
4924 * Return value:
4925 * IPR_RC_JOB_CONTINUE
4927 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4930 struct ipr_resource_entry *res, *temp;
4931 struct ipr_config_table_entry *cfgte;
4932 int found, i;
4933 LIST_HEAD(old_res);
4935 ENTER;
4936 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4937 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4939 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4940 list_move_tail(&res->queue, &old_res);
4942 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4943 cfgte = &ioa_cfg->cfg_table->dev[i];
4944 found = 0;
4946 list_for_each_entry_safe(res, temp, &old_res, queue) {
4947 if (!memcmp(&res->cfgte.res_addr,
4948 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4949 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4950 found = 1;
4951 break;
4955 if (!found) {
4956 if (list_empty(&ioa_cfg->free_res_q)) {
4957 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4958 break;
4961 found = 1;
4962 res = list_entry(ioa_cfg->free_res_q.next,
4963 struct ipr_resource_entry, queue);
4964 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4965 ipr_init_res_entry(res);
4966 res->add_to_ml = 1;
4969 if (found)
4970 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4973 list_for_each_entry_safe(res, temp, &old_res, queue) {
4974 if (res->sdev) {
4975 res->del_from_ml = 1;
4976 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
4977 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4978 } else {
4979 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4983 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4985 LEAVE;
4986 return IPR_RC_JOB_CONTINUE;
4990 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4991 * @ipr_cmd: ipr command struct
4993 * This function sends a Query IOA Configuration command
4994 * to the adapter to retrieve the IOA configuration table.
4996 * Return value:
4997 * IPR_RC_JOB_RETURN
4999 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5002 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5003 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5004 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5006 ENTER;
5007 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5008 ucode_vpd->major_release, ucode_vpd->card_type,
5009 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5010 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5011 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5013 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5014 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5015 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5017 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5018 ioarcb->read_data_transfer_length =
5019 cpu_to_be32(sizeof(struct ipr_config_table));
5021 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5022 ioadl->flags_and_data_len =
5023 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5025 ipr_cmd->job_step = ipr_init_res_table;
5027 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5029 LEAVE;
5030 return IPR_RC_JOB_RETURN;
5034 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5035 * @ipr_cmd: ipr command struct
5037 * This utility function sends an inquiry to the adapter.
5039 * Return value:
5040 * none
5042 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5043 u32 dma_addr, u8 xfer_len)
5045 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5046 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5048 ENTER;
5049 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5050 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5052 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5053 ioarcb->cmd_pkt.cdb[1] = flags;
5054 ioarcb->cmd_pkt.cdb[2] = page;
5055 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5057 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5058 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5060 ioadl->address = cpu_to_be32(dma_addr);
5061 ioadl->flags_and_data_len =
5062 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5064 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5065 LEAVE;
5069 * ipr_inquiry_page_supported - Is the given inquiry page supported
5070 * @page0: inquiry page 0 buffer
5071 * @page: page code.
5073 * This function determines if the specified inquiry page is supported.
5075 * Return value:
5076 * 1 if page is supported / 0 if not
5078 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5080 int i;
5082 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5083 if (page0->page[i] == page)
5084 return 1;
5086 return 0;
5090 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5091 * @ipr_cmd: ipr command struct
5093 * This function sends a Page 3 inquiry to the adapter
5094 * to retrieve software VPD information.
5096 * Return value:
5097 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5099 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5101 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5102 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5104 ENTER;
5106 if (!ipr_inquiry_page_supported(page0, 1))
5107 ioa_cfg->cache_state = CACHE_NONE;
5109 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5111 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5112 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5113 sizeof(struct ipr_inquiry_page3));
5115 LEAVE;
5116 return IPR_RC_JOB_RETURN;
5120 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5121 * @ipr_cmd: ipr command struct
5123 * This function sends a Page 0 inquiry to the adapter
5124 * to retrieve supported inquiry pages.
5126 * Return value:
5127 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5129 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5132 char type[5];
5134 ENTER;
5136 /* Grab the type out of the VPD and store it away */
5137 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5138 type[4] = '\0';
5139 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5141 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5143 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5144 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5145 sizeof(struct ipr_inquiry_page0));
5147 LEAVE;
5148 return IPR_RC_JOB_RETURN;
5152 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5153 * @ipr_cmd: ipr command struct
5155 * This function sends a standard inquiry to the adapter.
5157 * Return value:
5158 * IPR_RC_JOB_RETURN
5160 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5164 ENTER;
5165 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5167 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5168 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5169 sizeof(struct ipr_ioa_vpd));
5171 LEAVE;
5172 return IPR_RC_JOB_RETURN;
5176 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5177 * @ipr_cmd: ipr command struct
5179 * This function send an Identify Host Request Response Queue
5180 * command to establish the HRRQ with the adapter.
5182 * Return value:
5183 * IPR_RC_JOB_RETURN
5185 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5188 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5190 ENTER;
5191 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5193 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5194 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5196 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5197 ioarcb->cmd_pkt.cdb[2] =
5198 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5199 ioarcb->cmd_pkt.cdb[3] =
5200 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5201 ioarcb->cmd_pkt.cdb[4] =
5202 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5203 ioarcb->cmd_pkt.cdb[5] =
5204 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5205 ioarcb->cmd_pkt.cdb[7] =
5206 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5207 ioarcb->cmd_pkt.cdb[8] =
5208 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5210 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5212 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5214 LEAVE;
5215 return IPR_RC_JOB_RETURN;
5219 * ipr_reset_timer_done - Adapter reset timer function
5220 * @ipr_cmd: ipr command struct
5222 * Description: This function is used in adapter reset processing
5223 * for timing events. If the reset_cmd pointer in the IOA
5224 * config struct is not this adapter's we are doing nested
5225 * resets and fail_all_ops will take care of freeing the
5226 * command block.
5228 * Return value:
5229 * none
5231 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5234 unsigned long lock_flags = 0;
5236 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5238 if (ioa_cfg->reset_cmd == ipr_cmd) {
5239 list_del(&ipr_cmd->queue);
5240 ipr_cmd->done(ipr_cmd);
5243 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5247 * ipr_reset_start_timer - Start a timer for adapter reset job
5248 * @ipr_cmd: ipr command struct
5249 * @timeout: timeout value
5251 * Description: This function is used in adapter reset processing
5252 * for timing events. If the reset_cmd pointer in the IOA
5253 * config struct is not this adapter's we are doing nested
5254 * resets and fail_all_ops will take care of freeing the
5255 * command block.
5257 * Return value:
5258 * none
5260 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5261 unsigned long timeout)
5263 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5264 ipr_cmd->done = ipr_reset_ioa_job;
5266 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5267 ipr_cmd->timer.expires = jiffies + timeout;
5268 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5269 add_timer(&ipr_cmd->timer);
5273 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5274 * @ioa_cfg: ioa cfg struct
5276 * Return value:
5277 * nothing
5279 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5281 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5283 /* Initialize Host RRQ pointers */
5284 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5285 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5286 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5287 ioa_cfg->toggle_bit = 1;
5289 /* Zero out config table */
5290 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5294 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5295 * @ipr_cmd: ipr command struct
5297 * This function reinitializes some control blocks and
5298 * enables destructive diagnostics on the adapter.
5300 * Return value:
5301 * IPR_RC_JOB_RETURN
5303 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5305 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5306 volatile u32 int_reg;
5308 ENTER;
5309 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5310 ipr_init_ioa_mem(ioa_cfg);
5312 ioa_cfg->allow_interrupts = 1;
5313 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5315 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5316 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5317 ioa_cfg->regs.clr_interrupt_mask_reg);
5318 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5319 return IPR_RC_JOB_CONTINUE;
5322 /* Enable destructive diagnostics on IOA */
5323 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5325 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5326 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5328 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5330 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5331 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5332 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5333 ipr_cmd->done = ipr_reset_ioa_job;
5334 add_timer(&ipr_cmd->timer);
5335 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5337 LEAVE;
5338 return IPR_RC_JOB_RETURN;
5342 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5343 * @ipr_cmd: ipr command struct
5345 * This function is invoked when an adapter dump has run out
5346 * of processing time.
5348 * Return value:
5349 * IPR_RC_JOB_CONTINUE
5351 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5353 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5355 if (ioa_cfg->sdt_state == GET_DUMP)
5356 ioa_cfg->sdt_state = ABORT_DUMP;
5358 ipr_cmd->job_step = ipr_reset_alert;
5360 return IPR_RC_JOB_CONTINUE;
5364 * ipr_unit_check_no_data - Log a unit check/no data error log
5365 * @ioa_cfg: ioa config struct
5367 * Logs an error indicating the adapter unit checked, but for some
5368 * reason, we were unable to fetch the unit check buffer.
5370 * Return value:
5371 * nothing
5373 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5375 ioa_cfg->errors_logged++;
5376 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5380 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5381 * @ioa_cfg: ioa config struct
5383 * Fetches the unit check buffer from the adapter by clocking the data
5384 * through the mailbox register.
5386 * Return value:
5387 * nothing
5389 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5391 unsigned long mailbox;
5392 struct ipr_hostrcb *hostrcb;
5393 struct ipr_uc_sdt sdt;
5394 int rc, length;
5396 mailbox = readl(ioa_cfg->ioa_mailbox);
5398 if (!ipr_sdt_is_fmt2(mailbox)) {
5399 ipr_unit_check_no_data(ioa_cfg);
5400 return;
5403 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5404 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5405 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5407 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5408 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5409 ipr_unit_check_no_data(ioa_cfg);
5410 return;
5413 /* Find length of the first sdt entry (UC buffer) */
5414 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5415 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5417 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5418 struct ipr_hostrcb, queue);
5419 list_del(&hostrcb->queue);
5420 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5422 rc = ipr_get_ldump_data_section(ioa_cfg,
5423 be32_to_cpu(sdt.entry[0].bar_str_offset),
5424 (__be32 *)&hostrcb->hcam,
5425 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5427 if (!rc)
5428 ipr_handle_log_data(ioa_cfg, hostrcb);
5429 else
5430 ipr_unit_check_no_data(ioa_cfg);
5432 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5436 * ipr_reset_restore_cfg_space - Restore PCI config space.
5437 * @ipr_cmd: ipr command struct
5439 * Description: This function restores the saved PCI config space of
5440 * the adapter, fails all outstanding ops back to the callers, and
5441 * fetches the dump/unit check if applicable to this reset.
5443 * Return value:
5444 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5446 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5448 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5449 int rc;
5451 ENTER;
5452 pci_unblock_user_cfg_access(ioa_cfg->pdev);
5453 rc = pci_restore_state(ioa_cfg->pdev);
5455 if (rc != PCIBIOS_SUCCESSFUL) {
5456 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5457 return IPR_RC_JOB_CONTINUE;
5460 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5461 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5462 return IPR_RC_JOB_CONTINUE;
5465 ipr_fail_all_ops(ioa_cfg);
5467 if (ioa_cfg->ioa_unit_checked) {
5468 ioa_cfg->ioa_unit_checked = 0;
5469 ipr_get_unit_check_buffer(ioa_cfg);
5470 ipr_cmd->job_step = ipr_reset_alert;
5471 ipr_reset_start_timer(ipr_cmd, 0);
5472 return IPR_RC_JOB_RETURN;
5475 if (ioa_cfg->in_ioa_bringdown) {
5476 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5477 } else {
5478 ipr_cmd->job_step = ipr_reset_enable_ioa;
5480 if (GET_DUMP == ioa_cfg->sdt_state) {
5481 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5482 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5483 schedule_work(&ioa_cfg->work_q);
5484 return IPR_RC_JOB_RETURN;
5488 ENTER;
5489 return IPR_RC_JOB_CONTINUE;
5493 * ipr_reset_start_bist - Run BIST on the adapter.
5494 * @ipr_cmd: ipr command struct
5496 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5498 * Return value:
5499 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5501 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5503 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5504 int rc;
5506 ENTER;
5507 pci_block_user_cfg_access(ioa_cfg->pdev);
5508 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5510 if (rc != PCIBIOS_SUCCESSFUL) {
5511 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5512 rc = IPR_RC_JOB_CONTINUE;
5513 } else {
5514 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5515 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5516 rc = IPR_RC_JOB_RETURN;
5519 LEAVE;
5520 return rc;
5524 * ipr_reset_allowed - Query whether or not IOA can be reset
5525 * @ioa_cfg: ioa config struct
5527 * Return value:
5528 * 0 if reset not allowed / non-zero if reset is allowed
5530 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5532 volatile u32 temp_reg;
5534 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5535 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5539 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5540 * @ipr_cmd: ipr command struct
5542 * Description: This function waits for adapter permission to run BIST,
5543 * then runs BIST. If the adapter does not give permission after a
5544 * reasonable time, we will reset the adapter anyway. The impact of
5545 * resetting the adapter without warning the adapter is the risk of
5546 * losing the persistent error log on the adapter. If the adapter is
5547 * reset while it is writing to the flash on the adapter, the flash
5548 * segment will have bad ECC and be zeroed.
5550 * Return value:
5551 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5553 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5556 int rc = IPR_RC_JOB_RETURN;
5558 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5559 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5560 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5561 } else {
5562 ipr_cmd->job_step = ipr_reset_start_bist;
5563 rc = IPR_RC_JOB_CONTINUE;
5566 return rc;
5570 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5571 * @ipr_cmd: ipr command struct
5573 * Description: This function alerts the adapter that it will be reset.
5574 * If memory space is not currently enabled, proceed directly
5575 * to running BIST on the adapter. The timer must always be started
5576 * so we guarantee we do not run BIST from ipr_isr.
5578 * Return value:
5579 * IPR_RC_JOB_RETURN
5581 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5584 u16 cmd_reg;
5585 int rc;
5587 ENTER;
5588 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5590 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5591 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5592 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5593 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5594 } else {
5595 ipr_cmd->job_step = ipr_reset_start_bist;
5598 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5599 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5601 LEAVE;
5602 return IPR_RC_JOB_RETURN;
5606 * ipr_reset_ucode_download_done - Microcode download completion
5607 * @ipr_cmd: ipr command struct
5609 * Description: This function unmaps the microcode download buffer.
5611 * Return value:
5612 * IPR_RC_JOB_CONTINUE
5614 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5617 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5619 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5620 sglist->num_sg, DMA_TO_DEVICE);
5622 ipr_cmd->job_step = ipr_reset_alert;
5623 return IPR_RC_JOB_CONTINUE;
5627 * ipr_reset_ucode_download - Download microcode to the adapter
5628 * @ipr_cmd: ipr command struct
5630 * Description: This function checks to see if it there is microcode
5631 * to download to the adapter. If there is, a download is performed.
5633 * Return value:
5634 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5636 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5639 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5641 ENTER;
5642 ipr_cmd->job_step = ipr_reset_alert;
5644 if (!sglist)
5645 return IPR_RC_JOB_CONTINUE;
5647 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5648 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5649 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5650 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5651 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5652 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5653 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5655 ipr_build_ucode_ioadl(ipr_cmd, sglist);
5656 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5658 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5659 IPR_WRITE_BUFFER_TIMEOUT);
5661 LEAVE;
5662 return IPR_RC_JOB_RETURN;
5666 * ipr_reset_shutdown_ioa - Shutdown the adapter
5667 * @ipr_cmd: ipr command struct
5669 * Description: This function issues an adapter shutdown of the
5670 * specified type to the specified adapter as part of the
5671 * adapter reset job.
5673 * Return value:
5674 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5676 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5678 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5679 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5680 unsigned long timeout;
5681 int rc = IPR_RC_JOB_CONTINUE;
5683 ENTER;
5684 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5685 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5686 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5687 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5688 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5690 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5691 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5692 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5693 timeout = IPR_INTERNAL_TIMEOUT;
5694 else
5695 timeout = IPR_SHUTDOWN_TIMEOUT;
5697 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5699 rc = IPR_RC_JOB_RETURN;
5700 ipr_cmd->job_step = ipr_reset_ucode_download;
5701 } else
5702 ipr_cmd->job_step = ipr_reset_alert;
5704 LEAVE;
5705 return rc;
5709 * ipr_reset_ioa_job - Adapter reset job
5710 * @ipr_cmd: ipr command struct
5712 * Description: This function is the job router for the adapter reset job.
5714 * Return value:
5715 * none
5717 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5719 u32 rc, ioasc;
5720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5722 do {
5723 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5725 if (ioa_cfg->reset_cmd != ipr_cmd) {
5727 * We are doing nested adapter resets and this is
5728 * not the current reset job.
5730 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5731 return;
5734 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5735 rc = ipr_cmd->job_step_failed(ipr_cmd);
5736 if (rc == IPR_RC_JOB_RETURN)
5737 return;
5740 ipr_reinit_ipr_cmnd(ipr_cmd);
5741 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5742 rc = ipr_cmd->job_step(ipr_cmd);
5743 } while(rc == IPR_RC_JOB_CONTINUE);
5747 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5748 * @ioa_cfg: ioa config struct
5749 * @job_step: first job step of reset job
5750 * @shutdown_type: shutdown type
5752 * Description: This function will initiate the reset of the given adapter
5753 * starting at the selected job step.
5754 * If the caller needs to wait on the completion of the reset,
5755 * the caller must sleep on the reset_wait_q.
5757 * Return value:
5758 * none
5760 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5761 int (*job_step) (struct ipr_cmnd *),
5762 enum ipr_shutdown_type shutdown_type)
5764 struct ipr_cmnd *ipr_cmd;
5766 ioa_cfg->in_reset_reload = 1;
5767 ioa_cfg->allow_cmds = 0;
5768 scsi_block_requests(ioa_cfg->host);
5770 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5771 ioa_cfg->reset_cmd = ipr_cmd;
5772 ipr_cmd->job_step = job_step;
5773 ipr_cmd->u.shutdown_type = shutdown_type;
5775 ipr_reset_ioa_job(ipr_cmd);
5779 * ipr_initiate_ioa_reset - Initiate an adapter reset
5780 * @ioa_cfg: ioa config struct
5781 * @shutdown_type: shutdown type
5783 * Description: This function will initiate the reset of the given adapter.
5784 * If the caller needs to wait on the completion of the reset,
5785 * the caller must sleep on the reset_wait_q.
5787 * Return value:
5788 * none
5790 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5791 enum ipr_shutdown_type shutdown_type)
5793 if (ioa_cfg->ioa_is_dead)
5794 return;
5796 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5797 ioa_cfg->sdt_state = ABORT_DUMP;
5799 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5800 dev_err(&ioa_cfg->pdev->dev,
5801 "IOA taken offline - error recovery failed\n");
5803 ioa_cfg->reset_retries = 0;
5804 ioa_cfg->ioa_is_dead = 1;
5806 if (ioa_cfg->in_ioa_bringdown) {
5807 ioa_cfg->reset_cmd = NULL;
5808 ioa_cfg->in_reset_reload = 0;
5809 ipr_fail_all_ops(ioa_cfg);
5810 wake_up_all(&ioa_cfg->reset_wait_q);
5812 spin_unlock_irq(ioa_cfg->host->host_lock);
5813 scsi_unblock_requests(ioa_cfg->host);
5814 spin_lock_irq(ioa_cfg->host->host_lock);
5815 return;
5816 } else {
5817 ioa_cfg->in_ioa_bringdown = 1;
5818 shutdown_type = IPR_SHUTDOWN_NONE;
5822 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5823 shutdown_type);
5827 * ipr_reset_freeze - Hold off all I/O activity
5828 * @ipr_cmd: ipr command struct
5830 * Description: If the PCI slot is frozen, hold off all I/O
5831 * activity; then, as soon as the slot is available again,
5832 * initiate an adapter reset.
5834 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5836 /* Disallow new interrupts, avoid loop */
5837 ipr_cmd->ioa_cfg->allow_interrupts = 0;
5838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5839 ipr_cmd->done = ipr_reset_ioa_job;
5840 return IPR_RC_JOB_RETURN;
5844 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5845 * @pdev: PCI device struct
5847 * Description: This routine is called to tell us that the PCI bus
5848 * is down. Can't do anything here, except put the device driver
5849 * into a holding pattern, waiting for the PCI bus to come back.
5851 static void ipr_pci_frozen(struct pci_dev *pdev)
5853 unsigned long flags = 0;
5854 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5856 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5857 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5862 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5863 * @pdev: PCI device struct
5865 * Description: This routine is called by the pci error recovery
5866 * code after the PCI slot has been reset, just before we
5867 * should resume normal operations.
5869 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5871 unsigned long flags = 0;
5872 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5874 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5875 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5876 IPR_SHUTDOWN_NONE);
5877 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5878 return PCI_ERS_RESULT_RECOVERED;
5882 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5883 * @pdev: PCI device struct
5885 * Description: This routine is called when the PCI bus has
5886 * permanently failed.
5888 static void ipr_pci_perm_failure(struct pci_dev *pdev)
5890 unsigned long flags = 0;
5891 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5893 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5894 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5895 ioa_cfg->sdt_state = ABORT_DUMP;
5896 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5897 ioa_cfg->in_ioa_bringdown = 1;
5898 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5903 * ipr_pci_error_detected - Called when a PCI error is detected.
5904 * @pdev: PCI device struct
5905 * @state: PCI channel state
5907 * Description: Called when a PCI error is detected.
5909 * Return value:
5910 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5912 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5913 pci_channel_state_t state)
5915 switch (state) {
5916 case pci_channel_io_frozen:
5917 ipr_pci_frozen(pdev);
5918 return PCI_ERS_RESULT_NEED_RESET;
5919 case pci_channel_io_perm_failure:
5920 ipr_pci_perm_failure(pdev);
5921 return PCI_ERS_RESULT_DISCONNECT;
5922 break;
5923 default:
5924 break;
5926 return PCI_ERS_RESULT_NEED_RESET;
5930 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5931 * @ioa_cfg: ioa cfg struct
5933 * Description: This is the second phase of adapter intialization
5934 * This function takes care of initilizing the adapter to the point
5935 * where it can accept new commands.
5937 * Return value:
5938 * 0 on sucess / -EIO on failure
5940 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5942 int rc = 0;
5943 unsigned long host_lock_flags = 0;
5945 ENTER;
5946 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5947 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5948 if (ioa_cfg->needs_hard_reset) {
5949 ioa_cfg->needs_hard_reset = 0;
5950 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5951 } else
5952 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5953 IPR_SHUTDOWN_NONE);
5955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5956 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5957 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5959 if (ioa_cfg->ioa_is_dead) {
5960 rc = -EIO;
5961 } else if (ipr_invalid_adapter(ioa_cfg)) {
5962 if (!ipr_testmode)
5963 rc = -EIO;
5965 dev_err(&ioa_cfg->pdev->dev,
5966 "Adapter not supported in this hardware configuration.\n");
5969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5971 LEAVE;
5972 return rc;
5976 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5977 * @ioa_cfg: ioa config struct
5979 * Return value:
5980 * none
5982 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5984 int i;
5986 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5987 if (ioa_cfg->ipr_cmnd_list[i])
5988 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5989 ioa_cfg->ipr_cmnd_list[i],
5990 ioa_cfg->ipr_cmnd_list_dma[i]);
5992 ioa_cfg->ipr_cmnd_list[i] = NULL;
5995 if (ioa_cfg->ipr_cmd_pool)
5996 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5998 ioa_cfg->ipr_cmd_pool = NULL;
6002 * ipr_free_mem - Frees memory allocated for an adapter
6003 * @ioa_cfg: ioa cfg struct
6005 * Return value:
6006 * nothing
6008 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6010 int i;
6012 kfree(ioa_cfg->res_entries);
6013 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6014 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6015 ipr_free_cmd_blks(ioa_cfg);
6016 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6017 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6018 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6019 ioa_cfg->cfg_table,
6020 ioa_cfg->cfg_table_dma);
6022 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6023 pci_free_consistent(ioa_cfg->pdev,
6024 sizeof(struct ipr_hostrcb),
6025 ioa_cfg->hostrcb[i],
6026 ioa_cfg->hostrcb_dma[i]);
6029 ipr_free_dump(ioa_cfg);
6030 kfree(ioa_cfg->trace);
6034 * ipr_free_all_resources - Free all allocated resources for an adapter.
6035 * @ipr_cmd: ipr command struct
6037 * This function frees all allocated resources for the
6038 * specified adapter.
6040 * Return value:
6041 * none
6043 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6045 struct pci_dev *pdev = ioa_cfg->pdev;
6047 ENTER;
6048 free_irq(pdev->irq, ioa_cfg);
6049 iounmap(ioa_cfg->hdw_dma_regs);
6050 pci_release_regions(pdev);
6051 ipr_free_mem(ioa_cfg);
6052 scsi_host_put(ioa_cfg->host);
6053 pci_disable_device(pdev);
6054 LEAVE;
6058 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6059 * @ioa_cfg: ioa config struct
6061 * Return value:
6062 * 0 on success / -ENOMEM on allocation failure
6064 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6066 struct ipr_cmnd *ipr_cmd;
6067 struct ipr_ioarcb *ioarcb;
6068 dma_addr_t dma_addr;
6069 int i;
6071 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6072 sizeof(struct ipr_cmnd), 8, 0);
6074 if (!ioa_cfg->ipr_cmd_pool)
6075 return -ENOMEM;
6077 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6078 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6080 if (!ipr_cmd) {
6081 ipr_free_cmd_blks(ioa_cfg);
6082 return -ENOMEM;
6085 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6086 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6087 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6089 ioarcb = &ipr_cmd->ioarcb;
6090 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6091 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6092 ioarcb->write_ioadl_addr =
6093 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6094 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6095 ioarcb->ioasa_host_pci_addr =
6096 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6097 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6098 ipr_cmd->cmd_index = i;
6099 ipr_cmd->ioa_cfg = ioa_cfg;
6100 ipr_cmd->sense_buffer_dma = dma_addr +
6101 offsetof(struct ipr_cmnd, sense_buffer);
6103 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6106 return 0;
6110 * ipr_alloc_mem - Allocate memory for an adapter
6111 * @ioa_cfg: ioa config struct
6113 * Return value:
6114 * 0 on success / non-zero for error
6116 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6118 struct pci_dev *pdev = ioa_cfg->pdev;
6119 int i, rc = -ENOMEM;
6121 ENTER;
6122 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6123 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6125 if (!ioa_cfg->res_entries)
6126 goto out;
6128 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6129 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6131 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6132 sizeof(struct ipr_misc_cbs),
6133 &ioa_cfg->vpd_cbs_dma);
6135 if (!ioa_cfg->vpd_cbs)
6136 goto out_free_res_entries;
6138 if (ipr_alloc_cmd_blks(ioa_cfg))
6139 goto out_free_vpd_cbs;
6141 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6142 sizeof(u32) * IPR_NUM_CMD_BLKS,
6143 &ioa_cfg->host_rrq_dma);
6145 if (!ioa_cfg->host_rrq)
6146 goto out_ipr_free_cmd_blocks;
6148 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6149 sizeof(struct ipr_config_table),
6150 &ioa_cfg->cfg_table_dma);
6152 if (!ioa_cfg->cfg_table)
6153 goto out_free_host_rrq;
6155 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6156 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6157 sizeof(struct ipr_hostrcb),
6158 &ioa_cfg->hostrcb_dma[i]);
6160 if (!ioa_cfg->hostrcb[i])
6161 goto out_free_hostrcb_dma;
6163 ioa_cfg->hostrcb[i]->hostrcb_dma =
6164 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6165 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6168 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6169 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6171 if (!ioa_cfg->trace)
6172 goto out_free_hostrcb_dma;
6174 rc = 0;
6175 out:
6176 LEAVE;
6177 return rc;
6179 out_free_hostrcb_dma:
6180 while (i-- > 0) {
6181 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6182 ioa_cfg->hostrcb[i],
6183 ioa_cfg->hostrcb_dma[i]);
6185 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6186 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6187 out_free_host_rrq:
6188 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6189 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6190 out_ipr_free_cmd_blocks:
6191 ipr_free_cmd_blks(ioa_cfg);
6192 out_free_vpd_cbs:
6193 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6194 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6195 out_free_res_entries:
6196 kfree(ioa_cfg->res_entries);
6197 goto out;
6201 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6202 * @ioa_cfg: ioa config struct
6204 * Return value:
6205 * none
6207 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6209 int i;
6211 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6212 ioa_cfg->bus_attr[i].bus = i;
6213 ioa_cfg->bus_attr[i].qas_enabled = 0;
6214 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6215 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6216 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6217 else
6218 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6223 * ipr_init_ioa_cfg - Initialize IOA config struct
6224 * @ioa_cfg: ioa config struct
6225 * @host: scsi host struct
6226 * @pdev: PCI dev struct
6228 * Return value:
6229 * none
6231 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6232 struct Scsi_Host *host, struct pci_dev *pdev)
6234 const struct ipr_interrupt_offsets *p;
6235 struct ipr_interrupts *t;
6236 void __iomem *base;
6238 ioa_cfg->host = host;
6239 ioa_cfg->pdev = pdev;
6240 ioa_cfg->log_level = ipr_log_level;
6241 ioa_cfg->doorbell = IPR_DOORBELL;
6242 if (!ipr_auto_create)
6243 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6244 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6245 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6246 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6247 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6248 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6249 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6250 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6251 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6253 INIT_LIST_HEAD(&ioa_cfg->free_q);
6254 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6255 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6256 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6257 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6258 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6259 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6260 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6261 ioa_cfg->sdt_state = INACTIVE;
6262 if (ipr_enable_cache)
6263 ioa_cfg->cache_state = CACHE_ENABLED;
6264 else
6265 ioa_cfg->cache_state = CACHE_DISABLED;
6267 ipr_initialize_bus_attr(ioa_cfg);
6269 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6270 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6271 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6272 host->unique_id = host->host_no;
6273 host->max_cmd_len = IPR_MAX_CDB_LEN;
6274 pci_set_drvdata(pdev, ioa_cfg);
6276 p = &ioa_cfg->chip_cfg->regs;
6277 t = &ioa_cfg->regs;
6278 base = ioa_cfg->hdw_dma_regs;
6280 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6281 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6282 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6283 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6284 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6285 t->ioarrin_reg = base + p->ioarrin_reg;
6286 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6287 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6288 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6292 * ipr_get_chip_cfg - Find adapter chip configuration
6293 * @dev_id: PCI device id struct
6295 * Return value:
6296 * ptr to chip config on success / NULL on failure
6298 static const struct ipr_chip_cfg_t * __devinit
6299 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6301 int i;
6303 if (dev_id->driver_data)
6304 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6306 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6307 if (ipr_chip[i].vendor == dev_id->vendor &&
6308 ipr_chip[i].device == dev_id->device)
6309 return ipr_chip[i].cfg;
6310 return NULL;
6314 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6315 * @pdev: PCI device struct
6316 * @dev_id: PCI device id struct
6318 * Return value:
6319 * 0 on success / non-zero on failure
6321 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6322 const struct pci_device_id *dev_id)
6324 struct ipr_ioa_cfg *ioa_cfg;
6325 struct Scsi_Host *host;
6326 unsigned long ipr_regs_pci;
6327 void __iomem *ipr_regs;
6328 u32 rc = PCIBIOS_SUCCESSFUL;
6329 volatile u32 mask, uproc;
6331 ENTER;
6333 if ((rc = pci_enable_device(pdev))) {
6334 dev_err(&pdev->dev, "Cannot enable adapter\n");
6335 goto out;
6338 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6340 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6342 if (!host) {
6343 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6344 rc = -ENOMEM;
6345 goto out_disable;
6348 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6349 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6351 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6353 if (!ioa_cfg->chip_cfg) {
6354 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6355 dev_id->vendor, dev_id->device);
6356 goto out_scsi_host_put;
6359 ipr_regs_pci = pci_resource_start(pdev, 0);
6361 rc = pci_request_regions(pdev, IPR_NAME);
6362 if (rc < 0) {
6363 dev_err(&pdev->dev,
6364 "Couldn't register memory range of registers\n");
6365 goto out_scsi_host_put;
6368 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6370 if (!ipr_regs) {
6371 dev_err(&pdev->dev,
6372 "Couldn't map memory range of registers\n");
6373 rc = -ENOMEM;
6374 goto out_release_regions;
6377 ioa_cfg->hdw_dma_regs = ipr_regs;
6378 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6379 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6381 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6383 pci_set_master(pdev);
6385 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6386 if (rc < 0) {
6387 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6388 goto cleanup_nomem;
6391 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6392 ioa_cfg->chip_cfg->cache_line_size);
6394 if (rc != PCIBIOS_SUCCESSFUL) {
6395 dev_err(&pdev->dev, "Write of cache line size failed\n");
6396 rc = -EIO;
6397 goto cleanup_nomem;
6400 /* Save away PCI config space for use following IOA reset */
6401 rc = pci_save_state(pdev);
6403 if (rc != PCIBIOS_SUCCESSFUL) {
6404 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6405 rc = -EIO;
6406 goto cleanup_nomem;
6409 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6410 goto cleanup_nomem;
6412 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6413 goto cleanup_nomem;
6415 rc = ipr_alloc_mem(ioa_cfg);
6416 if (rc < 0) {
6417 dev_err(&pdev->dev,
6418 "Couldn't allocate enough memory for device driver!\n");
6419 goto cleanup_nomem;
6423 * If HRRQ updated interrupt is not masked, or reset alert is set,
6424 * the card is in an unknown state and needs a hard reset
6426 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6427 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6428 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6429 ioa_cfg->needs_hard_reset = 1;
6431 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6432 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6434 if (rc) {
6435 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6436 pdev->irq, rc);
6437 goto cleanup_nolog;
6440 spin_lock(&ipr_driver_lock);
6441 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6442 spin_unlock(&ipr_driver_lock);
6444 LEAVE;
6445 out:
6446 return rc;
6448 cleanup_nolog:
6449 ipr_free_mem(ioa_cfg);
6450 cleanup_nomem:
6451 iounmap(ipr_regs);
6452 out_release_regions:
6453 pci_release_regions(pdev);
6454 out_scsi_host_put:
6455 scsi_host_put(host);
6456 out_disable:
6457 pci_disable_device(pdev);
6458 goto out;
6462 * ipr_scan_vsets - Scans for VSET devices
6463 * @ioa_cfg: ioa config struct
6465 * Description: Since the VSET resources do not follow SAM in that we can have
6466 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6468 * Return value:
6469 * none
6471 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6473 int target, lun;
6475 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6476 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6477 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6481 * ipr_initiate_ioa_bringdown - Bring down an adapter
6482 * @ioa_cfg: ioa config struct
6483 * @shutdown_type: shutdown type
6485 * Description: This function will initiate bringing down the adapter.
6486 * This consists of issuing an IOA shutdown to the adapter
6487 * to flush the cache, and running BIST.
6488 * If the caller needs to wait on the completion of the reset,
6489 * the caller must sleep on the reset_wait_q.
6491 * Return value:
6492 * none
6494 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6495 enum ipr_shutdown_type shutdown_type)
6497 ENTER;
6498 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6499 ioa_cfg->sdt_state = ABORT_DUMP;
6500 ioa_cfg->reset_retries = 0;
6501 ioa_cfg->in_ioa_bringdown = 1;
6502 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6503 LEAVE;
6507 * __ipr_remove - Remove a single adapter
6508 * @pdev: pci device struct
6510 * Adapter hot plug remove entry point.
6512 * Return value:
6513 * none
6515 static void __ipr_remove(struct pci_dev *pdev)
6517 unsigned long host_lock_flags = 0;
6518 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6519 ENTER;
6521 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6522 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6524 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6525 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6526 flush_scheduled_work();
6527 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6529 spin_lock(&ipr_driver_lock);
6530 list_del(&ioa_cfg->queue);
6531 spin_unlock(&ipr_driver_lock);
6533 if (ioa_cfg->sdt_state == ABORT_DUMP)
6534 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6537 ipr_free_all_resources(ioa_cfg);
6539 LEAVE;
6543 * ipr_remove - IOA hot plug remove entry point
6544 * @pdev: pci device struct
6546 * Adapter hot plug remove entry point.
6548 * Return value:
6549 * none
6551 static void ipr_remove(struct pci_dev *pdev)
6553 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6555 ENTER;
6557 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6558 &ipr_trace_attr);
6559 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6560 &ipr_dump_attr);
6561 scsi_remove_host(ioa_cfg->host);
6563 __ipr_remove(pdev);
6565 LEAVE;
6569 * ipr_probe - Adapter hot plug add entry point
6571 * Return value:
6572 * 0 on success / non-zero on failure
6574 static int __devinit ipr_probe(struct pci_dev *pdev,
6575 const struct pci_device_id *dev_id)
6577 struct ipr_ioa_cfg *ioa_cfg;
6578 int rc;
6580 rc = ipr_probe_ioa(pdev, dev_id);
6582 if (rc)
6583 return rc;
6585 ioa_cfg = pci_get_drvdata(pdev);
6586 rc = ipr_probe_ioa_part2(ioa_cfg);
6588 if (rc) {
6589 __ipr_remove(pdev);
6590 return rc;
6593 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6595 if (rc) {
6596 __ipr_remove(pdev);
6597 return rc;
6600 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6601 &ipr_trace_attr);
6603 if (rc) {
6604 scsi_remove_host(ioa_cfg->host);
6605 __ipr_remove(pdev);
6606 return rc;
6609 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6610 &ipr_dump_attr);
6612 if (rc) {
6613 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6614 &ipr_trace_attr);
6615 scsi_remove_host(ioa_cfg->host);
6616 __ipr_remove(pdev);
6617 return rc;
6620 scsi_scan_host(ioa_cfg->host);
6621 ipr_scan_vsets(ioa_cfg);
6622 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6623 ioa_cfg->allow_ml_add_del = 1;
6624 ioa_cfg->host->max_channel = IPR_VSET_BUS;
6625 schedule_work(&ioa_cfg->work_q);
6626 return 0;
6630 * ipr_shutdown - Shutdown handler.
6631 * @pdev: pci device struct
6633 * This function is invoked upon system shutdown/reboot. It will issue
6634 * an adapter shutdown to the adapter to flush the write cache.
6636 * Return value:
6637 * none
6639 static void ipr_shutdown(struct pci_dev *pdev)
6641 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6642 unsigned long lock_flags = 0;
6644 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6645 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6647 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6650 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6651 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6652 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6653 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6654 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6656 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6657 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6658 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6659 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6660 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6662 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6663 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6664 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6665 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6666 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6668 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6669 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6670 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6671 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6672 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6674 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6675 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6677 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6678 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6680 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6681 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6682 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6683 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6686 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6687 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6688 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6689 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6690 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6692 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6693 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6695 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6698 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6700 static struct pci_error_handlers ipr_err_handler = {
6701 .error_detected = ipr_pci_error_detected,
6702 .slot_reset = ipr_pci_slot_reset,
6705 static struct pci_driver ipr_driver = {
6706 .name = IPR_NAME,
6707 .id_table = ipr_pci_table,
6708 .probe = ipr_probe,
6709 .remove = ipr_remove,
6710 .shutdown = ipr_shutdown,
6711 .err_handler = &ipr_err_handler,
6715 * ipr_init - Module entry point
6717 * Return value:
6718 * 0 on success / negative value on failure
6720 static int __init ipr_init(void)
6722 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6723 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6725 return pci_module_init(&ipr_driver);
6729 * ipr_exit - Module unload
6731 * Module unload entry point.
6733 * Return value:
6734 * none
6736 static void __exit ipr_exit(void)
6738 pci_unregister_driver(&ipr_driver);
6741 module_init(ipr_init);
6742 module_exit(ipr_exit);