[PATCH] pci/search: cleanups, add to kernel-api.tmpl
[linux-2.6/kvm.git] / drivers / scsi / ipr.c
blob01080b3acf5e7a7f0d97cc6a409f72cb15aaa236
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Notes:
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <asm/io.h>
74 #include <asm/irq.h>
75 #include <asm/processor.h>
76 #include <scsi/scsi.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_tcq.h>
79 #include <scsi/scsi_eh.h>
80 #include <scsi/scsi_cmnd.h>
81 #include "ipr.h"
84 * Global Data
86 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
87 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
88 static unsigned int ipr_max_speed = 1;
89 static int ipr_testmode = 0;
90 static unsigned int ipr_fastfail = 0;
91 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
92 static unsigned int ipr_enable_cache = 1;
93 static unsigned int ipr_debug = 0;
94 static int ipr_auto_create = 1;
95 static DEFINE_SPINLOCK(ipr_driver_lock);
97 /* This table describes the differences between DMA controller chips */
98 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone, Citrine, and Obsidian */
100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
131 static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 static int ipr_max_bus_speeds [] = {
141 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146 module_param_named(max_speed, ipr_max_speed, uint, 0);
147 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148 module_param_named(log_level, ipr_log_level, uint, 0);
149 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150 module_param_named(testmode, ipr_testmode, int, 0);
151 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152 module_param_named(fastfail, ipr_fastfail, int, 0);
153 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
156 module_param_named(enable_cache, ipr_enable_cache, int, 0);
157 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
158 module_param_named(debug, ipr_debug, int, 0);
159 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
160 module_param_named(auto_create, ipr_auto_create, int, 0);
161 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(IPR_DRIVER_VERSION);
165 /* A constant array of IOASCs/URCs/Error Messages */
166 static const
167 struct ipr_error_table_t ipr_error_table[] = {
168 {0x00000000, 1, 1,
169 "8155: An unknown error was received"},
170 {0x00330000, 0, 0,
171 "Soft underlength error"},
172 {0x005A0000, 0, 0,
173 "Command to be cancelled not found"},
174 {0x00808000, 0, 0,
175 "Qualified success"},
176 {0x01080000, 1, 1,
177 "FFFE: Soft device bus error recovered by the IOA"},
178 {0x01170600, 0, 1,
179 "FFF9: Device sector reassign successful"},
180 {0x01170900, 0, 1,
181 "FFF7: Media error recovered by device rewrite procedures"},
182 {0x01180200, 0, 1,
183 "7001: IOA sector reassignment successful"},
184 {0x01180500, 0, 1,
185 "FFF9: Soft media error. Sector reassignment recommended"},
186 {0x01180600, 0, 1,
187 "FFF7: Media error recovered by IOA rewrite procedures"},
188 {0x01418000, 0, 1,
189 "FF3D: Soft PCI bus error recovered by the IOA"},
190 {0x01440000, 1, 1,
191 "FFF6: Device hardware error recovered by the IOA"},
192 {0x01448100, 0, 1,
193 "FFF6: Device hardware error recovered by the device"},
194 {0x01448200, 1, 1,
195 "FF3D: Soft IOA error recovered by the IOA"},
196 {0x01448300, 0, 1,
197 "FFFA: Undefined device response recovered by the IOA"},
198 {0x014A0000, 1, 1,
199 "FFF6: Device bus error, message or command phase"},
200 {0x015D0000, 0, 1,
201 "FFF6: Failure prediction threshold exceeded"},
202 {0x015D9200, 0, 1,
203 "8009: Impending cache battery pack failure"},
204 {0x02040400, 0, 0,
205 "34FF: Disk device format in progress"},
206 {0x023F0000, 0, 0,
207 "Synchronization required"},
208 {0x024E0000, 0, 0,
209 "No ready, IOA shutdown"},
210 {0x025A0000, 0, 0,
211 "Not ready, IOA has been shutdown"},
212 {0x02670100, 0, 1,
213 "3020: Storage subsystem configuration error"},
214 {0x03110B00, 0, 0,
215 "FFF5: Medium error, data unreadable, recommend reassign"},
216 {0x03110C00, 0, 0,
217 "7000: Medium error, data unreadable, do not reassign"},
218 {0x03310000, 0, 1,
219 "FFF3: Disk media format bad"},
220 {0x04050000, 0, 1,
221 "3002: Addressed device failed to respond to selection"},
222 {0x04080000, 1, 1,
223 "3100: Device bus error"},
224 {0x04080100, 0, 1,
225 "3109: IOA timed out a device command"},
226 {0x04088000, 0, 0,
227 "3120: SCSI bus is not operational"},
228 {0x04118000, 0, 1,
229 "9000: IOA reserved area data check"},
230 {0x04118100, 0, 1,
231 "9001: IOA reserved area invalid data pattern"},
232 {0x04118200, 0, 1,
233 "9002: IOA reserved area LRC error"},
234 {0x04320000, 0, 1,
235 "102E: Out of alternate sectors for disk storage"},
236 {0x04330000, 1, 1,
237 "FFF4: Data transfer underlength error"},
238 {0x04338000, 1, 1,
239 "FFF4: Data transfer overlength error"},
240 {0x043E0100, 0, 1,
241 "3400: Logical unit failure"},
242 {0x04408500, 0, 1,
243 "FFF4: Device microcode is corrupt"},
244 {0x04418000, 1, 1,
245 "8150: PCI bus error"},
246 {0x04430000, 1, 0,
247 "Unsupported device bus message received"},
248 {0x04440000, 1, 1,
249 "FFF4: Disk device problem"},
250 {0x04448200, 1, 1,
251 "8150: Permanent IOA failure"},
252 {0x04448300, 0, 1,
253 "3010: Disk device returned wrong response to IOA"},
254 {0x04448400, 0, 1,
255 "8151: IOA microcode error"},
256 {0x04448500, 0, 0,
257 "Device bus status error"},
258 {0x04448600, 0, 1,
259 "8157: IOA error requiring IOA reset to recover"},
260 {0x04490000, 0, 0,
261 "Message reject received from the device"},
262 {0x04449200, 0, 1,
263 "8008: A permanent cache battery pack failure occurred"},
264 {0x0444A000, 0, 1,
265 "9090: Disk unit has been modified after the last known status"},
266 {0x0444A200, 0, 1,
267 "9081: IOA detected device error"},
268 {0x0444A300, 0, 1,
269 "9082: IOA detected device error"},
270 {0x044A0000, 1, 1,
271 "3110: Device bus error, message or command phase"},
272 {0x04670400, 0, 1,
273 "9091: Incorrect hardware configuration change has been detected"},
274 {0x04678000, 0, 1,
275 "9073: Invalid multi-adapter configuration"},
276 {0x046E0000, 0, 1,
277 "FFF4: Command to logical unit failed"},
278 {0x05240000, 1, 0,
279 "Illegal request, invalid request type or request packet"},
280 {0x05250000, 0, 0,
281 "Illegal request, invalid resource handle"},
282 {0x05258000, 0, 0,
283 "Illegal request, commands not allowed to this device"},
284 {0x05258100, 0, 0,
285 "Illegal request, command not allowed to a secondary adapter"},
286 {0x05260000, 0, 0,
287 "Illegal request, invalid field in parameter list"},
288 {0x05260100, 0, 0,
289 "Illegal request, parameter not supported"},
290 {0x05260200, 0, 0,
291 "Illegal request, parameter value invalid"},
292 {0x052C0000, 0, 0,
293 "Illegal request, command sequence error"},
294 {0x052C8000, 1, 0,
295 "Illegal request, dual adapter support not enabled"},
296 {0x06040500, 0, 1,
297 "9031: Array protection temporarily suspended, protection resuming"},
298 {0x06040600, 0, 1,
299 "9040: Array protection temporarily suspended, protection resuming"},
300 {0x06290000, 0, 1,
301 "FFFB: SCSI bus was reset"},
302 {0x06290500, 0, 0,
303 "FFFE: SCSI bus transition to single ended"},
304 {0x06290600, 0, 0,
305 "FFFE: SCSI bus transition to LVD"},
306 {0x06298000, 0, 1,
307 "FFFB: SCSI bus was reset by another initiator"},
308 {0x063F0300, 0, 1,
309 "3029: A device replacement has occurred"},
310 {0x064C8000, 0, 1,
311 "9051: IOA cache data exists for a missing or failed device"},
312 {0x064C8100, 0, 1,
313 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
314 {0x06670100, 0, 1,
315 "9025: Disk unit is not supported at its physical location"},
316 {0x06670600, 0, 1,
317 "3020: IOA detected a SCSI bus configuration error"},
318 {0x06678000, 0, 1,
319 "3150: SCSI bus configuration error"},
320 {0x06678100, 0, 1,
321 "9074: Asymmetric advanced function disk configuration"},
322 {0x06690200, 0, 1,
323 "9041: Array protection temporarily suspended"},
324 {0x06698200, 0, 1,
325 "9042: Corrupt array parity detected on specified device"},
326 {0x066B0200, 0, 1,
327 "9030: Array no longer protected due to missing or failed disk unit"},
328 {0x066B8000, 0, 1,
329 "9071: Link operational transition"},
330 {0x066B8100, 0, 1,
331 "9072: Link not operational transition"},
332 {0x066B8200, 0, 1,
333 "9032: Array exposed but still protected"},
334 {0x07270000, 0, 0,
335 "Failure due to other device"},
336 {0x07278000, 0, 1,
337 "9008: IOA does not support functions expected by devices"},
338 {0x07278100, 0, 1,
339 "9010: Cache data associated with attached devices cannot be found"},
340 {0x07278200, 0, 1,
341 "9011: Cache data belongs to devices other than those attached"},
342 {0x07278400, 0, 1,
343 "9020: Array missing 2 or more devices with only 1 device present"},
344 {0x07278500, 0, 1,
345 "9021: Array missing 2 or more devices with 2 or more devices present"},
346 {0x07278600, 0, 1,
347 "9022: Exposed array is missing a required device"},
348 {0x07278700, 0, 1,
349 "9023: Array member(s) not at required physical locations"},
350 {0x07278800, 0, 1,
351 "9024: Array not functional due to present hardware configuration"},
352 {0x07278900, 0, 1,
353 "9026: Array not functional due to present hardware configuration"},
354 {0x07278A00, 0, 1,
355 "9027: Array is missing a device and parity is out of sync"},
356 {0x07278B00, 0, 1,
357 "9028: Maximum number of arrays already exist"},
358 {0x07278C00, 0, 1,
359 "9050: Required cache data cannot be located for a disk unit"},
360 {0x07278D00, 0, 1,
361 "9052: Cache data exists for a device that has been modified"},
362 {0x07278F00, 0, 1,
363 "9054: IOA resources not available due to previous problems"},
364 {0x07279100, 0, 1,
365 "9092: Disk unit requires initialization before use"},
366 {0x07279200, 0, 1,
367 "9029: Incorrect hardware configuration change has been detected"},
368 {0x07279600, 0, 1,
369 "9060: One or more disk pairs are missing from an array"},
370 {0x07279700, 0, 1,
371 "9061: One or more disks are missing from an array"},
372 {0x07279800, 0, 1,
373 "9062: One or more disks are missing from an array"},
374 {0x07279900, 0, 1,
375 "9063: Maximum number of functional arrays has been exceeded"},
376 {0x0B260000, 0, 0,
377 "Aborted command, invalid descriptor"},
378 {0x0B5A0000, 0, 0,
379 "Command terminated by host"}
382 static const struct ipr_ses_table_entry ipr_ses_table[] = {
383 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
384 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
385 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
386 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
387 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
388 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
389 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
390 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
391 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
392 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
394 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
395 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
399 * Function Prototypes
401 static int ipr_reset_alert(struct ipr_cmnd *);
402 static void ipr_process_ccn(struct ipr_cmnd *);
403 static void ipr_process_error(struct ipr_cmnd *);
404 static void ipr_reset_ioa_job(struct ipr_cmnd *);
405 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
406 enum ipr_shutdown_type);
408 #ifdef CONFIG_SCSI_IPR_TRACE
410 * ipr_trc_hook - Add a trace entry to the driver trace
411 * @ipr_cmd: ipr command struct
412 * @type: trace type
413 * @add_data: additional data
415 * Return value:
416 * none
418 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
419 u8 type, u32 add_data)
421 struct ipr_trace_entry *trace_entry;
422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
424 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
425 trace_entry->time = jiffies;
426 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
427 trace_entry->type = type;
428 trace_entry->cmd_index = ipr_cmd->cmd_index;
429 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
430 trace_entry->u.add_data = add_data;
432 #else
433 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
434 #endif
437 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
438 * @ipr_cmd: ipr command struct
440 * Return value:
441 * none
443 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
445 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
446 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
448 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
449 ioarcb->write_data_transfer_length = 0;
450 ioarcb->read_data_transfer_length = 0;
451 ioarcb->write_ioadl_len = 0;
452 ioarcb->read_ioadl_len = 0;
453 ioasa->ioasc = 0;
454 ioasa->residual_data_len = 0;
456 ipr_cmd->scsi_cmd = NULL;
457 ipr_cmd->sense_buffer[0] = 0;
458 ipr_cmd->dma_use_sg = 0;
462 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
463 * @ipr_cmd: ipr command struct
465 * Return value:
466 * none
468 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
470 ipr_reinit_ipr_cmnd(ipr_cmd);
471 ipr_cmd->u.scratch = 0;
472 ipr_cmd->sibling = NULL;
473 init_timer(&ipr_cmd->timer);
477 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
478 * @ioa_cfg: ioa config struct
480 * Return value:
481 * pointer to ipr command struct
483 static
484 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
486 struct ipr_cmnd *ipr_cmd;
488 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
489 list_del(&ipr_cmd->queue);
490 ipr_init_ipr_cmnd(ipr_cmd);
492 return ipr_cmd;
496 * ipr_unmap_sglist - Unmap scatterlist if mapped
497 * @ioa_cfg: ioa config struct
498 * @ipr_cmd: ipr command struct
500 * Return value:
501 * nothing
503 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
504 struct ipr_cmnd *ipr_cmd)
506 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
508 if (ipr_cmd->dma_use_sg) {
509 if (scsi_cmd->use_sg > 0) {
510 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
511 scsi_cmd->use_sg,
512 scsi_cmd->sc_data_direction);
513 } else {
514 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
515 scsi_cmd->request_bufflen,
516 scsi_cmd->sc_data_direction);
522 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
523 * @ioa_cfg: ioa config struct
524 * @clr_ints: interrupts to clear
526 * This function masks all interrupts on the adapter, then clears the
527 * interrupts specified in the mask
529 * Return value:
530 * none
532 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
533 u32 clr_ints)
535 volatile u32 int_reg;
537 /* Stop new interrupts */
538 ioa_cfg->allow_interrupts = 0;
540 /* Set interrupt mask to stop all new interrupts */
541 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
543 /* Clear any pending interrupts */
544 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
545 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
549 * ipr_save_pcix_cmd_reg - Save PCI-X command register
550 * @ioa_cfg: ioa config struct
552 * Return value:
553 * 0 on success / -EIO on failure
555 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
557 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
559 if (pcix_cmd_reg == 0) {
560 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
561 return -EIO;
564 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
565 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
566 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
567 return -EIO;
570 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
571 return 0;
575 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
576 * @ioa_cfg: ioa config struct
578 * Return value:
579 * 0 on success / -EIO on failure
581 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
583 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
585 if (pcix_cmd_reg) {
586 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
587 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
588 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
589 return -EIO;
591 } else {
592 dev_err(&ioa_cfg->pdev->dev,
593 "Failed to setup PCI-X command register\n");
594 return -EIO;
597 return 0;
601 * ipr_scsi_eh_done - mid-layer done function for aborted ops
602 * @ipr_cmd: ipr command struct
604 * This function is invoked by the interrupt handler for
605 * ops generated by the SCSI mid-layer which are being aborted.
607 * Return value:
608 * none
610 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
612 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
613 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
615 scsi_cmd->result |= (DID_ERROR << 16);
617 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
618 scsi_cmd->scsi_done(scsi_cmd);
619 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
623 * ipr_fail_all_ops - Fails all outstanding ops.
624 * @ioa_cfg: ioa config struct
626 * This function fails all outstanding ops.
628 * Return value:
629 * none
631 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
633 struct ipr_cmnd *ipr_cmd, *temp;
635 ENTER;
636 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
637 list_del(&ipr_cmd->queue);
639 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
640 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
642 if (ipr_cmd->scsi_cmd)
643 ipr_cmd->done = ipr_scsi_eh_done;
645 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
646 del_timer(&ipr_cmd->timer);
647 ipr_cmd->done(ipr_cmd);
650 LEAVE;
654 * ipr_do_req - Send driver initiated requests.
655 * @ipr_cmd: ipr command struct
656 * @done: done function
657 * @timeout_func: timeout function
658 * @timeout: timeout value
660 * This function sends the specified command to the adapter with the
661 * timeout given. The done function is invoked on command completion.
663 * Return value:
664 * none
666 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
667 void (*done) (struct ipr_cmnd *),
668 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
672 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
674 ipr_cmd->done = done;
676 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
677 ipr_cmd->timer.expires = jiffies + timeout;
678 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
680 add_timer(&ipr_cmd->timer);
682 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
684 mb();
685 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
686 ioa_cfg->regs.ioarrin_reg);
690 * ipr_internal_cmd_done - Op done function for an internally generated op.
691 * @ipr_cmd: ipr command struct
693 * This function is the op done function for an internally generated,
694 * blocking op. It simply wakes the sleeping thread.
696 * Return value:
697 * none
699 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
701 if (ipr_cmd->sibling)
702 ipr_cmd->sibling = NULL;
703 else
704 complete(&ipr_cmd->completion);
708 * ipr_send_blocking_cmd - Send command and sleep on its completion.
709 * @ipr_cmd: ipr command struct
710 * @timeout_func: function to invoke if command times out
711 * @timeout: timeout
713 * Return value:
714 * none
716 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
717 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
718 u32 timeout)
720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
722 init_completion(&ipr_cmd->completion);
723 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
725 spin_unlock_irq(ioa_cfg->host->host_lock);
726 wait_for_completion(&ipr_cmd->completion);
727 spin_lock_irq(ioa_cfg->host->host_lock);
731 * ipr_send_hcam - Send an HCAM to the adapter.
732 * @ioa_cfg: ioa config struct
733 * @type: HCAM type
734 * @hostrcb: hostrcb struct
736 * This function will send a Host Controlled Async command to the adapter.
737 * If HCAMs are currently not allowed to be issued to the adapter, it will
738 * place the hostrcb on the free queue.
740 * Return value:
741 * none
743 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
744 struct ipr_hostrcb *hostrcb)
746 struct ipr_cmnd *ipr_cmd;
747 struct ipr_ioarcb *ioarcb;
749 if (ioa_cfg->allow_cmds) {
750 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
751 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
752 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
754 ipr_cmd->u.hostrcb = hostrcb;
755 ioarcb = &ipr_cmd->ioarcb;
757 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
758 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
759 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
760 ioarcb->cmd_pkt.cdb[1] = type;
761 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
762 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
764 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
765 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
766 ipr_cmd->ioadl[0].flags_and_data_len =
767 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
768 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
770 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
771 ipr_cmd->done = ipr_process_ccn;
772 else
773 ipr_cmd->done = ipr_process_error;
775 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
777 mb();
778 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
779 ioa_cfg->regs.ioarrin_reg);
780 } else {
781 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
786 * ipr_init_res_entry - Initialize a resource entry struct.
787 * @res: resource entry struct
789 * Return value:
790 * none
792 static void ipr_init_res_entry(struct ipr_resource_entry *res)
794 res->needs_sync_complete = 0;
795 res->in_erp = 0;
796 res->add_to_ml = 0;
797 res->del_from_ml = 0;
798 res->resetting_device = 0;
799 res->sdev = NULL;
803 * ipr_handle_config_change - Handle a config change from the adapter
804 * @ioa_cfg: ioa config struct
805 * @hostrcb: hostrcb
807 * Return value:
808 * none
810 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
811 struct ipr_hostrcb *hostrcb)
813 struct ipr_resource_entry *res = NULL;
814 struct ipr_config_table_entry *cfgte;
815 u32 is_ndn = 1;
817 cfgte = &hostrcb->hcam.u.ccn.cfgte;
819 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
820 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
821 sizeof(cfgte->res_addr))) {
822 is_ndn = 0;
823 break;
827 if (is_ndn) {
828 if (list_empty(&ioa_cfg->free_res_q)) {
829 ipr_send_hcam(ioa_cfg,
830 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
831 hostrcb);
832 return;
835 res = list_entry(ioa_cfg->free_res_q.next,
836 struct ipr_resource_entry, queue);
838 list_del(&res->queue);
839 ipr_init_res_entry(res);
840 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
843 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
845 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
846 if (res->sdev) {
847 res->del_from_ml = 1;
848 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
849 if (ioa_cfg->allow_ml_add_del)
850 schedule_work(&ioa_cfg->work_q);
851 } else
852 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
853 } else if (!res->sdev) {
854 res->add_to_ml = 1;
855 if (ioa_cfg->allow_ml_add_del)
856 schedule_work(&ioa_cfg->work_q);
859 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
863 * ipr_process_ccn - Op done function for a CCN.
864 * @ipr_cmd: ipr command struct
866 * This function is the op done function for a configuration
867 * change notification host controlled async from the adapter.
869 * Return value:
870 * none
872 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
875 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
876 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
878 list_del(&hostrcb->queue);
879 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
881 if (ioasc) {
882 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
883 dev_err(&ioa_cfg->pdev->dev,
884 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
886 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
887 } else {
888 ipr_handle_config_change(ioa_cfg, hostrcb);
893 * ipr_log_vpd - Log the passed VPD to the error log.
894 * @vpd: vendor/product id/sn struct
896 * Return value:
897 * none
899 static void ipr_log_vpd(struct ipr_vpd *vpd)
901 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
902 + IPR_SERIAL_NUM_LEN];
904 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
905 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
906 IPR_PROD_ID_LEN);
907 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
908 ipr_err("Vendor/Product ID: %s\n", buffer);
910 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
911 buffer[IPR_SERIAL_NUM_LEN] = '\0';
912 ipr_err(" Serial Number: %s\n", buffer);
916 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
917 * @vpd: vendor/product id/sn/wwn struct
919 * Return value:
920 * none
922 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
924 ipr_log_vpd(&vpd->vpd);
925 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
926 be32_to_cpu(vpd->wwid[1]));
930 * ipr_log_enhanced_cache_error - Log a cache error.
931 * @ioa_cfg: ioa config struct
932 * @hostrcb: hostrcb struct
934 * Return value:
935 * none
937 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
938 struct ipr_hostrcb *hostrcb)
940 struct ipr_hostrcb_type_12_error *error =
941 &hostrcb->hcam.u.error.u.type_12_error;
943 ipr_err("-----Current Configuration-----\n");
944 ipr_err("Cache Directory Card Information:\n");
945 ipr_log_ext_vpd(&error->ioa_vpd);
946 ipr_err("Adapter Card Information:\n");
947 ipr_log_ext_vpd(&error->cfc_vpd);
949 ipr_err("-----Expected Configuration-----\n");
950 ipr_err("Cache Directory Card Information:\n");
951 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
952 ipr_err("Adapter Card Information:\n");
953 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
955 ipr_err("Additional IOA Data: %08X %08X %08X\n",
956 be32_to_cpu(error->ioa_data[0]),
957 be32_to_cpu(error->ioa_data[1]),
958 be32_to_cpu(error->ioa_data[2]));
962 * ipr_log_cache_error - Log a cache error.
963 * @ioa_cfg: ioa config struct
964 * @hostrcb: hostrcb struct
966 * Return value:
967 * none
969 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
970 struct ipr_hostrcb *hostrcb)
972 struct ipr_hostrcb_type_02_error *error =
973 &hostrcb->hcam.u.error.u.type_02_error;
975 ipr_err("-----Current Configuration-----\n");
976 ipr_err("Cache Directory Card Information:\n");
977 ipr_log_vpd(&error->ioa_vpd);
978 ipr_err("Adapter Card Information:\n");
979 ipr_log_vpd(&error->cfc_vpd);
981 ipr_err("-----Expected Configuration-----\n");
982 ipr_err("Cache Directory Card Information:\n");
983 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
984 ipr_err("Adapter Card Information:\n");
985 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
987 ipr_err("Additional IOA Data: %08X %08X %08X\n",
988 be32_to_cpu(error->ioa_data[0]),
989 be32_to_cpu(error->ioa_data[1]),
990 be32_to_cpu(error->ioa_data[2]));
994 * ipr_log_enhanced_config_error - Log a configuration error.
995 * @ioa_cfg: ioa config struct
996 * @hostrcb: hostrcb struct
998 * Return value:
999 * none
1001 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1002 struct ipr_hostrcb *hostrcb)
1004 int errors_logged, i;
1005 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1006 struct ipr_hostrcb_type_13_error *error;
1008 error = &hostrcb->hcam.u.error.u.type_13_error;
1009 errors_logged = be32_to_cpu(error->errors_logged);
1011 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1012 be32_to_cpu(error->errors_detected), errors_logged);
1014 dev_entry = error->dev;
1016 for (i = 0; i < errors_logged; i++, dev_entry++) {
1017 ipr_err_separator;
1019 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1020 ipr_log_ext_vpd(&dev_entry->vpd);
1022 ipr_err("-----New Device Information-----\n");
1023 ipr_log_ext_vpd(&dev_entry->new_vpd);
1025 ipr_err("Cache Directory Card Information:\n");
1026 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1028 ipr_err("Adapter Card Information:\n");
1029 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1034 * ipr_log_config_error - Log a configuration error.
1035 * @ioa_cfg: ioa config struct
1036 * @hostrcb: hostrcb struct
1038 * Return value:
1039 * none
1041 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1042 struct ipr_hostrcb *hostrcb)
1044 int errors_logged, i;
1045 struct ipr_hostrcb_device_data_entry *dev_entry;
1046 struct ipr_hostrcb_type_03_error *error;
1048 error = &hostrcb->hcam.u.error.u.type_03_error;
1049 errors_logged = be32_to_cpu(error->errors_logged);
1051 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1052 be32_to_cpu(error->errors_detected), errors_logged);
1054 dev_entry = error->dev;
1056 for (i = 0; i < errors_logged; i++, dev_entry++) {
1057 ipr_err_separator;
1059 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1060 ipr_log_vpd(&dev_entry->vpd);
1062 ipr_err("-----New Device Information-----\n");
1063 ipr_log_vpd(&dev_entry->new_vpd);
1065 ipr_err("Cache Directory Card Information:\n");
1066 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1068 ipr_err("Adapter Card Information:\n");
1069 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1071 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1072 be32_to_cpu(dev_entry->ioa_data[0]),
1073 be32_to_cpu(dev_entry->ioa_data[1]),
1074 be32_to_cpu(dev_entry->ioa_data[2]),
1075 be32_to_cpu(dev_entry->ioa_data[3]),
1076 be32_to_cpu(dev_entry->ioa_data[4]));
1081 * ipr_log_enhanced_array_error - Log an array configuration error.
1082 * @ioa_cfg: ioa config struct
1083 * @hostrcb: hostrcb struct
1085 * Return value:
1086 * none
1088 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1089 struct ipr_hostrcb *hostrcb)
1091 int i, num_entries;
1092 struct ipr_hostrcb_type_14_error *error;
1093 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1094 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1096 error = &hostrcb->hcam.u.error.u.type_14_error;
1098 ipr_err_separator;
1100 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1101 error->protection_level,
1102 ioa_cfg->host->host_no,
1103 error->last_func_vset_res_addr.bus,
1104 error->last_func_vset_res_addr.target,
1105 error->last_func_vset_res_addr.lun);
1107 ipr_err_separator;
1109 array_entry = error->array_member;
1110 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1111 sizeof(error->array_member));
1113 for (i = 0; i < num_entries; i++, array_entry++) {
1114 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1115 continue;
1117 if (be32_to_cpu(error->exposed_mode_adn) == i)
1118 ipr_err("Exposed Array Member %d:\n", i);
1119 else
1120 ipr_err("Array Member %d:\n", i);
1122 ipr_log_ext_vpd(&array_entry->vpd);
1123 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1124 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1125 "Expected Location");
1127 ipr_err_separator;
1132 * ipr_log_array_error - Log an array configuration error.
1133 * @ioa_cfg: ioa config struct
1134 * @hostrcb: hostrcb struct
1136 * Return value:
1137 * none
1139 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1140 struct ipr_hostrcb *hostrcb)
1142 int i;
1143 struct ipr_hostrcb_type_04_error *error;
1144 struct ipr_hostrcb_array_data_entry *array_entry;
1145 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1147 error = &hostrcb->hcam.u.error.u.type_04_error;
1149 ipr_err_separator;
1151 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1152 error->protection_level,
1153 ioa_cfg->host->host_no,
1154 error->last_func_vset_res_addr.bus,
1155 error->last_func_vset_res_addr.target,
1156 error->last_func_vset_res_addr.lun);
1158 ipr_err_separator;
1160 array_entry = error->array_member;
1162 for (i = 0; i < 18; i++) {
1163 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1164 continue;
1166 if (be32_to_cpu(error->exposed_mode_adn) == i)
1167 ipr_err("Exposed Array Member %d:\n", i);
1168 else
1169 ipr_err("Array Member %d:\n", i);
1171 ipr_log_vpd(&array_entry->vpd);
1173 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1174 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1175 "Expected Location");
1177 ipr_err_separator;
1179 if (i == 9)
1180 array_entry = error->array_member2;
1181 else
1182 array_entry++;
1187 * ipr_log_hex_data - Log additional hex IOA error data.
1188 * @data: IOA error data
1189 * @len: data length
1191 * Return value:
1192 * none
1194 static void ipr_log_hex_data(u32 *data, int len)
1196 int i;
1198 if (len == 0)
1199 return;
1201 for (i = 0; i < len / 4; i += 4) {
1202 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1203 be32_to_cpu(data[i]),
1204 be32_to_cpu(data[i+1]),
1205 be32_to_cpu(data[i+2]),
1206 be32_to_cpu(data[i+3]));
1211 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1212 * @ioa_cfg: ioa config struct
1213 * @hostrcb: hostrcb struct
1215 * Return value:
1216 * none
1218 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1219 struct ipr_hostrcb *hostrcb)
1221 struct ipr_hostrcb_type_17_error *error;
1223 error = &hostrcb->hcam.u.error.u.type_17_error;
1224 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1226 ipr_err("%s\n", error->failure_reason);
1227 ipr_err("Remote Adapter VPD:\n");
1228 ipr_log_ext_vpd(&error->vpd);
1229 ipr_log_hex_data(error->data,
1230 be32_to_cpu(hostrcb->hcam.length) -
1231 (offsetof(struct ipr_hostrcb_error, u) +
1232 offsetof(struct ipr_hostrcb_type_17_error, data)));
1236 * ipr_log_dual_ioa_error - Log a dual adapter error.
1237 * @ioa_cfg: ioa config struct
1238 * @hostrcb: hostrcb struct
1240 * Return value:
1241 * none
1243 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1244 struct ipr_hostrcb *hostrcb)
1246 struct ipr_hostrcb_type_07_error *error;
1248 error = &hostrcb->hcam.u.error.u.type_07_error;
1249 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1251 ipr_err("%s\n", error->failure_reason);
1252 ipr_err("Remote Adapter VPD:\n");
1253 ipr_log_vpd(&error->vpd);
1254 ipr_log_hex_data(error->data,
1255 be32_to_cpu(hostrcb->hcam.length) -
1256 (offsetof(struct ipr_hostrcb_error, u) +
1257 offsetof(struct ipr_hostrcb_type_07_error, data)));
1261 * ipr_log_generic_error - Log an adapter error.
1262 * @ioa_cfg: ioa config struct
1263 * @hostrcb: hostrcb struct
1265 * Return value:
1266 * none
1268 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1269 struct ipr_hostrcb *hostrcb)
1271 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1272 be32_to_cpu(hostrcb->hcam.length));
1276 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1277 * @ioasc: IOASC
1279 * This function will return the index of into the ipr_error_table
1280 * for the specified IOASC. If the IOASC is not in the table,
1281 * 0 will be returned, which points to the entry used for unknown errors.
1283 * Return value:
1284 * index into the ipr_error_table
1286 static u32 ipr_get_error(u32 ioasc)
1288 int i;
1290 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1291 if (ipr_error_table[i].ioasc == ioasc)
1292 return i;
1294 return 0;
1298 * ipr_handle_log_data - Log an adapter error.
1299 * @ioa_cfg: ioa config struct
1300 * @hostrcb: hostrcb struct
1302 * This function logs an adapter error to the system.
1304 * Return value:
1305 * none
1307 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1308 struct ipr_hostrcb *hostrcb)
1310 u32 ioasc;
1311 int error_index;
1313 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1314 return;
1316 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1317 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1319 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1321 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1322 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1323 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1324 scsi_report_bus_reset(ioa_cfg->host,
1325 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1328 error_index = ipr_get_error(ioasc);
1330 if (!ipr_error_table[error_index].log_hcam)
1331 return;
1333 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1334 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1335 "%s\n", ipr_error_table[error_index].error);
1336 } else {
1337 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1338 ipr_error_table[error_index].error);
1341 /* Set indication we have logged an error */
1342 ioa_cfg->errors_logged++;
1344 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1345 return;
1346 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1347 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1349 switch (hostrcb->hcam.overlay_id) {
1350 case IPR_HOST_RCB_OVERLAY_ID_2:
1351 ipr_log_cache_error(ioa_cfg, hostrcb);
1352 break;
1353 case IPR_HOST_RCB_OVERLAY_ID_3:
1354 ipr_log_config_error(ioa_cfg, hostrcb);
1355 break;
1356 case IPR_HOST_RCB_OVERLAY_ID_4:
1357 case IPR_HOST_RCB_OVERLAY_ID_6:
1358 ipr_log_array_error(ioa_cfg, hostrcb);
1359 break;
1360 case IPR_HOST_RCB_OVERLAY_ID_7:
1361 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1362 break;
1363 case IPR_HOST_RCB_OVERLAY_ID_12:
1364 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1365 break;
1366 case IPR_HOST_RCB_OVERLAY_ID_13:
1367 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1368 break;
1369 case IPR_HOST_RCB_OVERLAY_ID_14:
1370 case IPR_HOST_RCB_OVERLAY_ID_16:
1371 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1372 break;
1373 case IPR_HOST_RCB_OVERLAY_ID_17:
1374 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1375 break;
1376 case IPR_HOST_RCB_OVERLAY_ID_1:
1377 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1378 default:
1379 ipr_log_generic_error(ioa_cfg, hostrcb);
1380 break;
1385 * ipr_process_error - Op done function for an adapter error log.
1386 * @ipr_cmd: ipr command struct
1388 * This function is the op done function for an error log host
1389 * controlled async from the adapter. It will log the error and
1390 * send the HCAM back to the adapter.
1392 * Return value:
1393 * none
1395 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1397 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1398 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1399 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1401 list_del(&hostrcb->queue);
1402 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1404 if (!ioasc) {
1405 ipr_handle_log_data(ioa_cfg, hostrcb);
1406 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1407 dev_err(&ioa_cfg->pdev->dev,
1408 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1411 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1415 * ipr_timeout - An internally generated op has timed out.
1416 * @ipr_cmd: ipr command struct
1418 * This function blocks host requests and initiates an
1419 * adapter reset.
1421 * Return value:
1422 * none
1424 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1426 unsigned long lock_flags = 0;
1427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1429 ENTER;
1430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1432 ioa_cfg->errors_logged++;
1433 dev_err(&ioa_cfg->pdev->dev,
1434 "Adapter being reset due to command timeout.\n");
1436 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1437 ioa_cfg->sdt_state = GET_DUMP;
1439 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1440 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1443 LEAVE;
1447 * ipr_oper_timeout - Adapter timed out transitioning to operational
1448 * @ipr_cmd: ipr command struct
1450 * This function blocks host requests and initiates an
1451 * adapter reset.
1453 * Return value:
1454 * none
1456 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1458 unsigned long lock_flags = 0;
1459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1461 ENTER;
1462 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1464 ioa_cfg->errors_logged++;
1465 dev_err(&ioa_cfg->pdev->dev,
1466 "Adapter timed out transitioning to operational.\n");
1468 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1469 ioa_cfg->sdt_state = GET_DUMP;
1471 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1472 if (ipr_fastfail)
1473 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1474 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1477 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1478 LEAVE;
1482 * ipr_reset_reload - Reset/Reload the IOA
1483 * @ioa_cfg: ioa config struct
1484 * @shutdown_type: shutdown type
1486 * This function resets the adapter and re-initializes it.
1487 * This function assumes that all new host commands have been stopped.
1488 * Return value:
1489 * SUCCESS / FAILED
1491 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1492 enum ipr_shutdown_type shutdown_type)
1494 if (!ioa_cfg->in_reset_reload)
1495 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1497 spin_unlock_irq(ioa_cfg->host->host_lock);
1498 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1499 spin_lock_irq(ioa_cfg->host->host_lock);
1501 /* If we got hit with a host reset while we were already resetting
1502 the adapter for some reason, and the reset failed. */
1503 if (ioa_cfg->ioa_is_dead) {
1504 ipr_trace;
1505 return FAILED;
1508 return SUCCESS;
1512 * ipr_find_ses_entry - Find matching SES in SES table
1513 * @res: resource entry struct of SES
1515 * Return value:
1516 * pointer to SES table entry / NULL on failure
1518 static const struct ipr_ses_table_entry *
1519 ipr_find_ses_entry(struct ipr_resource_entry *res)
1521 int i, j, matches;
1522 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1524 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1525 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1526 if (ste->compare_product_id_byte[j] == 'X') {
1527 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1528 matches++;
1529 else
1530 break;
1531 } else
1532 matches++;
1535 if (matches == IPR_PROD_ID_LEN)
1536 return ste;
1539 return NULL;
1543 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1544 * @ioa_cfg: ioa config struct
1545 * @bus: SCSI bus
1546 * @bus_width: bus width
1548 * Return value:
1549 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1550 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1551 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1552 * max 160MHz = max 320MB/sec).
1554 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1556 struct ipr_resource_entry *res;
1557 const struct ipr_ses_table_entry *ste;
1558 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1560 /* Loop through each config table entry in the config table buffer */
1561 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1562 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1563 continue;
1565 if (bus != res->cfgte.res_addr.bus)
1566 continue;
1568 if (!(ste = ipr_find_ses_entry(res)))
1569 continue;
1571 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1574 return max_xfer_rate;
1578 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1579 * @ioa_cfg: ioa config struct
1580 * @max_delay: max delay in micro-seconds to wait
1582 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1584 * Return value:
1585 * 0 on success / other on failure
1587 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1589 volatile u32 pcii_reg;
1590 int delay = 1;
1592 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1593 while (delay < max_delay) {
1594 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1596 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1597 return 0;
1599 /* udelay cannot be used if delay is more than a few milliseconds */
1600 if ((delay / 1000) > MAX_UDELAY_MS)
1601 mdelay(delay / 1000);
1602 else
1603 udelay(delay);
1605 delay += delay;
1607 return -EIO;
1611 * ipr_get_ldump_data_section - Dump IOA memory
1612 * @ioa_cfg: ioa config struct
1613 * @start_addr: adapter address to dump
1614 * @dest: destination kernel buffer
1615 * @length_in_words: length to dump in 4 byte words
1617 * Return value:
1618 * 0 on success / -EIO on failure
1620 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1621 u32 start_addr,
1622 __be32 *dest, u32 length_in_words)
1624 volatile u32 temp_pcii_reg;
1625 int i, delay = 0;
1627 /* Write IOA interrupt reg starting LDUMP state */
1628 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1629 ioa_cfg->regs.set_uproc_interrupt_reg);
1631 /* Wait for IO debug acknowledge */
1632 if (ipr_wait_iodbg_ack(ioa_cfg,
1633 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1634 dev_err(&ioa_cfg->pdev->dev,
1635 "IOA dump long data transfer timeout\n");
1636 return -EIO;
1639 /* Signal LDUMP interlocked - clear IO debug ack */
1640 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1641 ioa_cfg->regs.clr_interrupt_reg);
1643 /* Write Mailbox with starting address */
1644 writel(start_addr, ioa_cfg->ioa_mailbox);
1646 /* Signal address valid - clear IOA Reset alert */
1647 writel(IPR_UPROCI_RESET_ALERT,
1648 ioa_cfg->regs.clr_uproc_interrupt_reg);
1650 for (i = 0; i < length_in_words; i++) {
1651 /* Wait for IO debug acknowledge */
1652 if (ipr_wait_iodbg_ack(ioa_cfg,
1653 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1654 dev_err(&ioa_cfg->pdev->dev,
1655 "IOA dump short data transfer timeout\n");
1656 return -EIO;
1659 /* Read data from mailbox and increment destination pointer */
1660 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1661 dest++;
1663 /* For all but the last word of data, signal data received */
1664 if (i < (length_in_words - 1)) {
1665 /* Signal dump data received - Clear IO debug Ack */
1666 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1667 ioa_cfg->regs.clr_interrupt_reg);
1671 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1672 writel(IPR_UPROCI_RESET_ALERT,
1673 ioa_cfg->regs.set_uproc_interrupt_reg);
1675 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1676 ioa_cfg->regs.clr_uproc_interrupt_reg);
1678 /* Signal dump data received - Clear IO debug Ack */
1679 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1680 ioa_cfg->regs.clr_interrupt_reg);
1682 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1683 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1684 temp_pcii_reg =
1685 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1687 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1688 return 0;
1690 udelay(10);
1691 delay += 10;
1694 return 0;
1697 #ifdef CONFIG_SCSI_IPR_DUMP
1699 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1700 * @ioa_cfg: ioa config struct
1701 * @pci_address: adapter address
1702 * @length: length of data to copy
1704 * Copy data from PCI adapter to kernel buffer.
1705 * Note: length MUST be a 4 byte multiple
1706 * Return value:
1707 * 0 on success / other on failure
1709 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1710 unsigned long pci_address, u32 length)
1712 int bytes_copied = 0;
1713 int cur_len, rc, rem_len, rem_page_len;
1714 __be32 *page;
1715 unsigned long lock_flags = 0;
1716 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1718 while (bytes_copied < length &&
1719 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1720 if (ioa_dump->page_offset >= PAGE_SIZE ||
1721 ioa_dump->page_offset == 0) {
1722 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1724 if (!page) {
1725 ipr_trace;
1726 return bytes_copied;
1729 ioa_dump->page_offset = 0;
1730 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1731 ioa_dump->next_page_index++;
1732 } else
1733 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1735 rem_len = length - bytes_copied;
1736 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1737 cur_len = min(rem_len, rem_page_len);
1739 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1740 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1741 rc = -EIO;
1742 } else {
1743 rc = ipr_get_ldump_data_section(ioa_cfg,
1744 pci_address + bytes_copied,
1745 &page[ioa_dump->page_offset / 4],
1746 (cur_len / sizeof(u32)));
1748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1750 if (!rc) {
1751 ioa_dump->page_offset += cur_len;
1752 bytes_copied += cur_len;
1753 } else {
1754 ipr_trace;
1755 break;
1757 schedule();
1760 return bytes_copied;
1764 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1765 * @hdr: dump entry header struct
1767 * Return value:
1768 * nothing
1770 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1772 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1773 hdr->num_elems = 1;
1774 hdr->offset = sizeof(*hdr);
1775 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1779 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1780 * @ioa_cfg: ioa config struct
1781 * @driver_dump: driver dump struct
1783 * Return value:
1784 * nothing
1786 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1787 struct ipr_driver_dump *driver_dump)
1789 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1791 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1792 driver_dump->ioa_type_entry.hdr.len =
1793 sizeof(struct ipr_dump_ioa_type_entry) -
1794 sizeof(struct ipr_dump_entry_header);
1795 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1796 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1797 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1798 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1799 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1800 ucode_vpd->minor_release[1];
1801 driver_dump->hdr.num_entries++;
1805 * ipr_dump_version_data - Fill in the driver version in the dump.
1806 * @ioa_cfg: ioa config struct
1807 * @driver_dump: driver dump struct
1809 * Return value:
1810 * nothing
1812 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1813 struct ipr_driver_dump *driver_dump)
1815 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1816 driver_dump->version_entry.hdr.len =
1817 sizeof(struct ipr_dump_version_entry) -
1818 sizeof(struct ipr_dump_entry_header);
1819 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1820 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1821 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1822 driver_dump->hdr.num_entries++;
1826 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1827 * @ioa_cfg: ioa config struct
1828 * @driver_dump: driver dump struct
1830 * Return value:
1831 * nothing
1833 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1834 struct ipr_driver_dump *driver_dump)
1836 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1837 driver_dump->trace_entry.hdr.len =
1838 sizeof(struct ipr_dump_trace_entry) -
1839 sizeof(struct ipr_dump_entry_header);
1840 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1841 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1842 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1843 driver_dump->hdr.num_entries++;
1847 * ipr_dump_location_data - Fill in the IOA location in the dump.
1848 * @ioa_cfg: ioa config struct
1849 * @driver_dump: driver dump struct
1851 * Return value:
1852 * nothing
1854 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1855 struct ipr_driver_dump *driver_dump)
1857 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1858 driver_dump->location_entry.hdr.len =
1859 sizeof(struct ipr_dump_location_entry) -
1860 sizeof(struct ipr_dump_entry_header);
1861 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1862 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1863 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1864 driver_dump->hdr.num_entries++;
1868 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1869 * @ioa_cfg: ioa config struct
1870 * @dump: dump struct
1872 * Return value:
1873 * nothing
1875 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1877 unsigned long start_addr, sdt_word;
1878 unsigned long lock_flags = 0;
1879 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1880 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1881 u32 num_entries, start_off, end_off;
1882 u32 bytes_to_copy, bytes_copied, rc;
1883 struct ipr_sdt *sdt;
1884 int i;
1886 ENTER;
1888 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1890 if (ioa_cfg->sdt_state != GET_DUMP) {
1891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1892 return;
1895 start_addr = readl(ioa_cfg->ioa_mailbox);
1897 if (!ipr_sdt_is_fmt2(start_addr)) {
1898 dev_err(&ioa_cfg->pdev->dev,
1899 "Invalid dump table format: %lx\n", start_addr);
1900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1901 return;
1904 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1906 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1908 /* Initialize the overall dump header */
1909 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1910 driver_dump->hdr.num_entries = 1;
1911 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1912 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1913 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1914 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1916 ipr_dump_version_data(ioa_cfg, driver_dump);
1917 ipr_dump_location_data(ioa_cfg, driver_dump);
1918 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1919 ipr_dump_trace_data(ioa_cfg, driver_dump);
1921 /* Update dump_header */
1922 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1924 /* IOA Dump entry */
1925 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1926 ioa_dump->format = IPR_SDT_FMT2;
1927 ioa_dump->hdr.len = 0;
1928 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1929 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1931 /* First entries in sdt are actually a list of dump addresses and
1932 lengths to gather the real dump data. sdt represents the pointer
1933 to the ioa generated dump table. Dump data will be extracted based
1934 on entries in this table */
1935 sdt = &ioa_dump->sdt;
1937 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1938 sizeof(struct ipr_sdt) / sizeof(__be32));
1940 /* Smart Dump table is ready to use and the first entry is valid */
1941 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1942 dev_err(&ioa_cfg->pdev->dev,
1943 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1944 rc, be32_to_cpu(sdt->hdr.state));
1945 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1946 ioa_cfg->sdt_state = DUMP_OBTAINED;
1947 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1948 return;
1951 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1953 if (num_entries > IPR_NUM_SDT_ENTRIES)
1954 num_entries = IPR_NUM_SDT_ENTRIES;
1956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1958 for (i = 0; i < num_entries; i++) {
1959 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1960 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1961 break;
1964 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1965 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1966 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1967 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1969 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1970 bytes_to_copy = end_off - start_off;
1971 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1972 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1973 continue;
1976 /* Copy data from adapter to driver buffers */
1977 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1978 bytes_to_copy);
1980 ioa_dump->hdr.len += bytes_copied;
1982 if (bytes_copied != bytes_to_copy) {
1983 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1984 break;
1990 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1992 /* Update dump_header */
1993 driver_dump->hdr.len += ioa_dump->hdr.len;
1994 wmb();
1995 ioa_cfg->sdt_state = DUMP_OBTAINED;
1996 LEAVE;
1999 #else
2000 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2001 #endif
2004 * ipr_release_dump - Free adapter dump memory
2005 * @kref: kref struct
2007 * Return value:
2008 * nothing
2010 static void ipr_release_dump(struct kref *kref)
2012 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2013 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2014 unsigned long lock_flags = 0;
2015 int i;
2017 ENTER;
2018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2019 ioa_cfg->dump = NULL;
2020 ioa_cfg->sdt_state = INACTIVE;
2021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2023 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2024 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2026 kfree(dump);
2027 LEAVE;
2031 * ipr_worker_thread - Worker thread
2032 * @data: ioa config struct
2034 * Called at task level from a work thread. This function takes care
2035 * of adding and removing device from the mid-layer as configuration
2036 * changes are detected by the adapter.
2038 * Return value:
2039 * nothing
2041 static void ipr_worker_thread(void *data)
2043 unsigned long lock_flags;
2044 struct ipr_resource_entry *res;
2045 struct scsi_device *sdev;
2046 struct ipr_dump *dump;
2047 struct ipr_ioa_cfg *ioa_cfg = data;
2048 u8 bus, target, lun;
2049 int did_work;
2051 ENTER;
2052 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2054 if (ioa_cfg->sdt_state == GET_DUMP) {
2055 dump = ioa_cfg->dump;
2056 if (!dump) {
2057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2058 return;
2060 kref_get(&dump->kref);
2061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2062 ipr_get_ioa_dump(ioa_cfg, dump);
2063 kref_put(&dump->kref, ipr_release_dump);
2065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2066 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2067 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2069 return;
2072 restart:
2073 do {
2074 did_work = 0;
2075 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2077 return;
2080 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2081 if (res->del_from_ml && res->sdev) {
2082 did_work = 1;
2083 sdev = res->sdev;
2084 if (!scsi_device_get(sdev)) {
2085 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2086 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2087 scsi_remove_device(sdev);
2088 scsi_device_put(sdev);
2089 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2091 break;
2094 } while(did_work);
2096 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2097 if (res->add_to_ml) {
2098 bus = res->cfgte.res_addr.bus;
2099 target = res->cfgte.res_addr.target;
2100 lun = res->cfgte.res_addr.lun;
2101 res->add_to_ml = 0;
2102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2103 scsi_add_device(ioa_cfg->host, bus, target, lun);
2104 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2105 goto restart;
2109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2110 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2111 LEAVE;
2114 #ifdef CONFIG_SCSI_IPR_TRACE
2116 * ipr_read_trace - Dump the adapter trace
2117 * @kobj: kobject struct
2118 * @buf: buffer
2119 * @off: offset
2120 * @count: buffer size
2122 * Return value:
2123 * number of bytes printed to buffer
2125 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2126 loff_t off, size_t count)
2128 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2129 struct Scsi_Host *shost = class_to_shost(cdev);
2130 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2131 unsigned long lock_flags = 0;
2132 int size = IPR_TRACE_SIZE;
2133 char *src = (char *)ioa_cfg->trace;
2135 if (off > size)
2136 return 0;
2137 if (off + count > size) {
2138 size -= off;
2139 count = size;
2142 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2143 memcpy(buf, &src[off], count);
2144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2145 return count;
2148 static struct bin_attribute ipr_trace_attr = {
2149 .attr = {
2150 .name = "trace",
2151 .mode = S_IRUGO,
2153 .size = 0,
2154 .read = ipr_read_trace,
2156 #endif
2158 static const struct {
2159 enum ipr_cache_state state;
2160 char *name;
2161 } cache_state [] = {
2162 { CACHE_NONE, "none" },
2163 { CACHE_DISABLED, "disabled" },
2164 { CACHE_ENABLED, "enabled" }
2168 * ipr_show_write_caching - Show the write caching attribute
2169 * @class_dev: class device struct
2170 * @buf: buffer
2172 * Return value:
2173 * number of bytes printed to buffer
2175 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2177 struct Scsi_Host *shost = class_to_shost(class_dev);
2178 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2179 unsigned long lock_flags = 0;
2180 int i, len = 0;
2182 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2183 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2184 if (cache_state[i].state == ioa_cfg->cache_state) {
2185 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2186 break;
2189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2190 return len;
2195 * ipr_store_write_caching - Enable/disable adapter write cache
2196 * @class_dev: class_device struct
2197 * @buf: buffer
2198 * @count: buffer size
2200 * This function will enable/disable adapter write cache.
2202 * Return value:
2203 * count on success / other on failure
2205 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2206 const char *buf, size_t count)
2208 struct Scsi_Host *shost = class_to_shost(class_dev);
2209 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2210 unsigned long lock_flags = 0;
2211 enum ipr_cache_state new_state = CACHE_INVALID;
2212 int i;
2214 if (!capable(CAP_SYS_ADMIN))
2215 return -EACCES;
2216 if (ioa_cfg->cache_state == CACHE_NONE)
2217 return -EINVAL;
2219 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2220 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2221 new_state = cache_state[i].state;
2222 break;
2226 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2227 return -EINVAL;
2229 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2230 if (ioa_cfg->cache_state == new_state) {
2231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2232 return count;
2235 ioa_cfg->cache_state = new_state;
2236 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2237 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2238 if (!ioa_cfg->in_reset_reload)
2239 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2241 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2243 return count;
2246 static struct class_device_attribute ipr_ioa_cache_attr = {
2247 .attr = {
2248 .name = "write_cache",
2249 .mode = S_IRUGO | S_IWUSR,
2251 .show = ipr_show_write_caching,
2252 .store = ipr_store_write_caching
2256 * ipr_show_fw_version - Show the firmware version
2257 * @class_dev: class device struct
2258 * @buf: buffer
2260 * Return value:
2261 * number of bytes printed to buffer
2263 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2265 struct Scsi_Host *shost = class_to_shost(class_dev);
2266 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2267 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2268 unsigned long lock_flags = 0;
2269 int len;
2271 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2272 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2273 ucode_vpd->major_release, ucode_vpd->card_type,
2274 ucode_vpd->minor_release[0],
2275 ucode_vpd->minor_release[1]);
2276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2277 return len;
2280 static struct class_device_attribute ipr_fw_version_attr = {
2281 .attr = {
2282 .name = "fw_version",
2283 .mode = S_IRUGO,
2285 .show = ipr_show_fw_version,
2289 * ipr_show_log_level - Show the adapter's error logging level
2290 * @class_dev: class device struct
2291 * @buf: buffer
2293 * Return value:
2294 * number of bytes printed to buffer
2296 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2298 struct Scsi_Host *shost = class_to_shost(class_dev);
2299 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2300 unsigned long lock_flags = 0;
2301 int len;
2303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2304 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2306 return len;
2310 * ipr_store_log_level - Change the adapter's error logging level
2311 * @class_dev: class device struct
2312 * @buf: buffer
2314 * Return value:
2315 * number of bytes printed to buffer
2317 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2318 const char *buf, size_t count)
2320 struct Scsi_Host *shost = class_to_shost(class_dev);
2321 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2322 unsigned long lock_flags = 0;
2324 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2325 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2326 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2327 return strlen(buf);
2330 static struct class_device_attribute ipr_log_level_attr = {
2331 .attr = {
2332 .name = "log_level",
2333 .mode = S_IRUGO | S_IWUSR,
2335 .show = ipr_show_log_level,
2336 .store = ipr_store_log_level
2340 * ipr_store_diagnostics - IOA Diagnostics interface
2341 * @class_dev: class_device struct
2342 * @buf: buffer
2343 * @count: buffer size
2345 * This function will reset the adapter and wait a reasonable
2346 * amount of time for any errors that the adapter might log.
2348 * Return value:
2349 * count on success / other on failure
2351 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2352 const char *buf, size_t count)
2354 struct Scsi_Host *shost = class_to_shost(class_dev);
2355 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2356 unsigned long lock_flags = 0;
2357 int rc = count;
2359 if (!capable(CAP_SYS_ADMIN))
2360 return -EACCES;
2362 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2364 ioa_cfg->errors_logged = 0;
2365 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2367 if (ioa_cfg->in_reset_reload) {
2368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2369 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2371 /* Wait for a second for any errors to be logged */
2372 msleep(1000);
2373 } else {
2374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2375 return -EIO;
2378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2379 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2380 rc = -EIO;
2381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2383 return rc;
2386 static struct class_device_attribute ipr_diagnostics_attr = {
2387 .attr = {
2388 .name = "run_diagnostics",
2389 .mode = S_IWUSR,
2391 .store = ipr_store_diagnostics
2395 * ipr_show_adapter_state - Show the adapter's state
2396 * @class_dev: class device struct
2397 * @buf: buffer
2399 * Return value:
2400 * number of bytes printed to buffer
2402 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2404 struct Scsi_Host *shost = class_to_shost(class_dev);
2405 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2406 unsigned long lock_flags = 0;
2407 int len;
2409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2410 if (ioa_cfg->ioa_is_dead)
2411 len = snprintf(buf, PAGE_SIZE, "offline\n");
2412 else
2413 len = snprintf(buf, PAGE_SIZE, "online\n");
2414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2415 return len;
2419 * ipr_store_adapter_state - Change adapter state
2420 * @class_dev: class_device struct
2421 * @buf: buffer
2422 * @count: buffer size
2424 * This function will change the adapter's state.
2426 * Return value:
2427 * count on success / other on failure
2429 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2430 const char *buf, size_t count)
2432 struct Scsi_Host *shost = class_to_shost(class_dev);
2433 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2434 unsigned long lock_flags;
2435 int result = count;
2437 if (!capable(CAP_SYS_ADMIN))
2438 return -EACCES;
2440 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2441 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2442 ioa_cfg->ioa_is_dead = 0;
2443 ioa_cfg->reset_retries = 0;
2444 ioa_cfg->in_ioa_bringdown = 0;
2445 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2448 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2450 return result;
2453 static struct class_device_attribute ipr_ioa_state_attr = {
2454 .attr = {
2455 .name = "state",
2456 .mode = S_IRUGO | S_IWUSR,
2458 .show = ipr_show_adapter_state,
2459 .store = ipr_store_adapter_state
2463 * ipr_store_reset_adapter - Reset the adapter
2464 * @class_dev: class_device struct
2465 * @buf: buffer
2466 * @count: buffer size
2468 * This function will reset the adapter.
2470 * Return value:
2471 * count on success / other on failure
2473 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2474 const char *buf, size_t count)
2476 struct Scsi_Host *shost = class_to_shost(class_dev);
2477 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2478 unsigned long lock_flags;
2479 int result = count;
2481 if (!capable(CAP_SYS_ADMIN))
2482 return -EACCES;
2484 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2485 if (!ioa_cfg->in_reset_reload)
2486 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2488 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2490 return result;
2493 static struct class_device_attribute ipr_ioa_reset_attr = {
2494 .attr = {
2495 .name = "reset_host",
2496 .mode = S_IWUSR,
2498 .store = ipr_store_reset_adapter
2502 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2503 * @buf_len: buffer length
2505 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2506 * list to use for microcode download
2508 * Return value:
2509 * pointer to sglist / NULL on failure
2511 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2513 int sg_size, order, bsize_elem, num_elem, i, j;
2514 struct ipr_sglist *sglist;
2515 struct scatterlist *scatterlist;
2516 struct page *page;
2518 /* Get the minimum size per scatter/gather element */
2519 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2521 /* Get the actual size per element */
2522 order = get_order(sg_size);
2524 /* Determine the actual number of bytes per element */
2525 bsize_elem = PAGE_SIZE * (1 << order);
2527 /* Determine the actual number of sg entries needed */
2528 if (buf_len % bsize_elem)
2529 num_elem = (buf_len / bsize_elem) + 1;
2530 else
2531 num_elem = buf_len / bsize_elem;
2533 /* Allocate a scatter/gather list for the DMA */
2534 sglist = kzalloc(sizeof(struct ipr_sglist) +
2535 (sizeof(struct scatterlist) * (num_elem - 1)),
2536 GFP_KERNEL);
2538 if (sglist == NULL) {
2539 ipr_trace;
2540 return NULL;
2543 scatterlist = sglist->scatterlist;
2545 sglist->order = order;
2546 sglist->num_sg = num_elem;
2548 /* Allocate a bunch of sg elements */
2549 for (i = 0; i < num_elem; i++) {
2550 page = alloc_pages(GFP_KERNEL, order);
2551 if (!page) {
2552 ipr_trace;
2554 /* Free up what we already allocated */
2555 for (j = i - 1; j >= 0; j--)
2556 __free_pages(scatterlist[j].page, order);
2557 kfree(sglist);
2558 return NULL;
2561 scatterlist[i].page = page;
2564 return sglist;
2568 * ipr_free_ucode_buffer - Frees a microcode download buffer
2569 * @p_dnld: scatter/gather list pointer
2571 * Free a DMA'able ucode download buffer previously allocated with
2572 * ipr_alloc_ucode_buffer
2574 * Return value:
2575 * nothing
2577 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2579 int i;
2581 for (i = 0; i < sglist->num_sg; i++)
2582 __free_pages(sglist->scatterlist[i].page, sglist->order);
2584 kfree(sglist);
2588 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2589 * @sglist: scatter/gather list pointer
2590 * @buffer: buffer pointer
2591 * @len: buffer length
2593 * Copy a microcode image from a user buffer into a buffer allocated by
2594 * ipr_alloc_ucode_buffer
2596 * Return value:
2597 * 0 on success / other on failure
2599 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2600 u8 *buffer, u32 len)
2602 int bsize_elem, i, result = 0;
2603 struct scatterlist *scatterlist;
2604 void *kaddr;
2606 /* Determine the actual number of bytes per element */
2607 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2609 scatterlist = sglist->scatterlist;
2611 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2612 kaddr = kmap(scatterlist[i].page);
2613 memcpy(kaddr, buffer, bsize_elem);
2614 kunmap(scatterlist[i].page);
2616 scatterlist[i].length = bsize_elem;
2618 if (result != 0) {
2619 ipr_trace;
2620 return result;
2624 if (len % bsize_elem) {
2625 kaddr = kmap(scatterlist[i].page);
2626 memcpy(kaddr, buffer, len % bsize_elem);
2627 kunmap(scatterlist[i].page);
2629 scatterlist[i].length = len % bsize_elem;
2632 sglist->buffer_len = len;
2633 return result;
2637 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2638 * @ipr_cmd: ipr command struct
2639 * @sglist: scatter/gather list
2641 * Builds a microcode download IOA data list (IOADL).
2644 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2645 struct ipr_sglist *sglist)
2647 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2648 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2649 struct scatterlist *scatterlist = sglist->scatterlist;
2650 int i;
2652 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2654 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2655 ioarcb->write_ioadl_len =
2656 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2658 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2659 ioadl[i].flags_and_data_len =
2660 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2661 ioadl[i].address =
2662 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2665 ioadl[i-1].flags_and_data_len |=
2666 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2670 * ipr_update_ioa_ucode - Update IOA's microcode
2671 * @ioa_cfg: ioa config struct
2672 * @sglist: scatter/gather list
2674 * Initiate an adapter reset to update the IOA's microcode
2676 * Return value:
2677 * 0 on success / -EIO on failure
2679 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2680 struct ipr_sglist *sglist)
2682 unsigned long lock_flags;
2684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2686 if (ioa_cfg->ucode_sglist) {
2687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688 dev_err(&ioa_cfg->pdev->dev,
2689 "Microcode download already in progress\n");
2690 return -EIO;
2693 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2694 sglist->num_sg, DMA_TO_DEVICE);
2696 if (!sglist->num_dma_sg) {
2697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2698 dev_err(&ioa_cfg->pdev->dev,
2699 "Failed to map microcode download buffer!\n");
2700 return -EIO;
2703 ioa_cfg->ucode_sglist = sglist;
2704 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2705 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2706 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2708 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709 ioa_cfg->ucode_sglist = NULL;
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711 return 0;
2715 * ipr_store_update_fw - Update the firmware on the adapter
2716 * @class_dev: class_device struct
2717 * @buf: buffer
2718 * @count: buffer size
2720 * This function will update the firmware on the adapter.
2722 * Return value:
2723 * count on success / other on failure
2725 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2726 const char *buf, size_t count)
2728 struct Scsi_Host *shost = class_to_shost(class_dev);
2729 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2730 struct ipr_ucode_image_header *image_hdr;
2731 const struct firmware *fw_entry;
2732 struct ipr_sglist *sglist;
2733 char fname[100];
2734 char *src;
2735 int len, result, dnld_size;
2737 if (!capable(CAP_SYS_ADMIN))
2738 return -EACCES;
2740 len = snprintf(fname, 99, "%s", buf);
2741 fname[len-1] = '\0';
2743 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2744 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2745 return -EIO;
2748 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2750 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2751 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2752 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2753 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2754 release_firmware(fw_entry);
2755 return -EINVAL;
2758 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2759 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2760 sglist = ipr_alloc_ucode_buffer(dnld_size);
2762 if (!sglist) {
2763 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2764 release_firmware(fw_entry);
2765 return -ENOMEM;
2768 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2770 if (result) {
2771 dev_err(&ioa_cfg->pdev->dev,
2772 "Microcode buffer copy to DMA buffer failed\n");
2773 goto out;
2776 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2778 if (!result)
2779 result = count;
2780 out:
2781 ipr_free_ucode_buffer(sglist);
2782 release_firmware(fw_entry);
2783 return result;
2786 static struct class_device_attribute ipr_update_fw_attr = {
2787 .attr = {
2788 .name = "update_fw",
2789 .mode = S_IWUSR,
2791 .store = ipr_store_update_fw
2794 static struct class_device_attribute *ipr_ioa_attrs[] = {
2795 &ipr_fw_version_attr,
2796 &ipr_log_level_attr,
2797 &ipr_diagnostics_attr,
2798 &ipr_ioa_state_attr,
2799 &ipr_ioa_reset_attr,
2800 &ipr_update_fw_attr,
2801 &ipr_ioa_cache_attr,
2802 NULL,
2805 #ifdef CONFIG_SCSI_IPR_DUMP
2807 * ipr_read_dump - Dump the adapter
2808 * @kobj: kobject struct
2809 * @buf: buffer
2810 * @off: offset
2811 * @count: buffer size
2813 * Return value:
2814 * number of bytes printed to buffer
2816 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2817 loff_t off, size_t count)
2819 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2820 struct Scsi_Host *shost = class_to_shost(cdev);
2821 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2822 struct ipr_dump *dump;
2823 unsigned long lock_flags = 0;
2824 char *src;
2825 int len;
2826 size_t rc = count;
2828 if (!capable(CAP_SYS_ADMIN))
2829 return -EACCES;
2831 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2832 dump = ioa_cfg->dump;
2834 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2835 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2836 return 0;
2838 kref_get(&dump->kref);
2839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2841 if (off > dump->driver_dump.hdr.len) {
2842 kref_put(&dump->kref, ipr_release_dump);
2843 return 0;
2846 if (off + count > dump->driver_dump.hdr.len) {
2847 count = dump->driver_dump.hdr.len - off;
2848 rc = count;
2851 if (count && off < sizeof(dump->driver_dump)) {
2852 if (off + count > sizeof(dump->driver_dump))
2853 len = sizeof(dump->driver_dump) - off;
2854 else
2855 len = count;
2856 src = (u8 *)&dump->driver_dump + off;
2857 memcpy(buf, src, len);
2858 buf += len;
2859 off += len;
2860 count -= len;
2863 off -= sizeof(dump->driver_dump);
2865 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2866 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2867 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2868 else
2869 len = count;
2870 src = (u8 *)&dump->ioa_dump + off;
2871 memcpy(buf, src, len);
2872 buf += len;
2873 off += len;
2874 count -= len;
2877 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2879 while (count) {
2880 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2881 len = PAGE_ALIGN(off) - off;
2882 else
2883 len = count;
2884 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2885 src += off & ~PAGE_MASK;
2886 memcpy(buf, src, len);
2887 buf += len;
2888 off += len;
2889 count -= len;
2892 kref_put(&dump->kref, ipr_release_dump);
2893 return rc;
2897 * ipr_alloc_dump - Prepare for adapter dump
2898 * @ioa_cfg: ioa config struct
2900 * Return value:
2901 * 0 on success / other on failure
2903 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2905 struct ipr_dump *dump;
2906 unsigned long lock_flags = 0;
2908 ENTER;
2909 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2911 if (!dump) {
2912 ipr_err("Dump memory allocation failed\n");
2913 return -ENOMEM;
2916 kref_init(&dump->kref);
2917 dump->ioa_cfg = ioa_cfg;
2919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2921 if (INACTIVE != ioa_cfg->sdt_state) {
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2923 kfree(dump);
2924 return 0;
2927 ioa_cfg->dump = dump;
2928 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2929 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2930 ioa_cfg->dump_taken = 1;
2931 schedule_work(&ioa_cfg->work_q);
2933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2935 LEAVE;
2936 return 0;
2940 * ipr_free_dump - Free adapter dump memory
2941 * @ioa_cfg: ioa config struct
2943 * Return value:
2944 * 0 on success / other on failure
2946 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2948 struct ipr_dump *dump;
2949 unsigned long lock_flags = 0;
2951 ENTER;
2953 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2954 dump = ioa_cfg->dump;
2955 if (!dump) {
2956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2957 return 0;
2960 ioa_cfg->dump = NULL;
2961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2963 kref_put(&dump->kref, ipr_release_dump);
2965 LEAVE;
2966 return 0;
2970 * ipr_write_dump - Setup dump state of adapter
2971 * @kobj: kobject struct
2972 * @buf: buffer
2973 * @off: offset
2974 * @count: buffer size
2976 * Return value:
2977 * number of bytes printed to buffer
2979 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2980 loff_t off, size_t count)
2982 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2983 struct Scsi_Host *shost = class_to_shost(cdev);
2984 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2985 int rc;
2987 if (!capable(CAP_SYS_ADMIN))
2988 return -EACCES;
2990 if (buf[0] == '1')
2991 rc = ipr_alloc_dump(ioa_cfg);
2992 else if (buf[0] == '0')
2993 rc = ipr_free_dump(ioa_cfg);
2994 else
2995 return -EINVAL;
2997 if (rc)
2998 return rc;
2999 else
3000 return count;
3003 static struct bin_attribute ipr_dump_attr = {
3004 .attr = {
3005 .name = "dump",
3006 .mode = S_IRUSR | S_IWUSR,
3008 .size = 0,
3009 .read = ipr_read_dump,
3010 .write = ipr_write_dump
3012 #else
3013 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3014 #endif
3017 * ipr_change_queue_depth - Change the device's queue depth
3018 * @sdev: scsi device struct
3019 * @qdepth: depth to set
3021 * Return value:
3022 * actual depth set
3024 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3026 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3027 return sdev->queue_depth;
3031 * ipr_change_queue_type - Change the device's queue type
3032 * @dsev: scsi device struct
3033 * @tag_type: type of tags to use
3035 * Return value:
3036 * actual queue type set
3038 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3040 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3041 struct ipr_resource_entry *res;
3042 unsigned long lock_flags = 0;
3044 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3045 res = (struct ipr_resource_entry *)sdev->hostdata;
3047 if (res) {
3048 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3050 * We don't bother quiescing the device here since the
3051 * adapter firmware does it for us.
3053 scsi_set_tag_type(sdev, tag_type);
3055 if (tag_type)
3056 scsi_activate_tcq(sdev, sdev->queue_depth);
3057 else
3058 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3059 } else
3060 tag_type = 0;
3061 } else
3062 tag_type = 0;
3064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065 return tag_type;
3069 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3070 * @dev: device struct
3071 * @buf: buffer
3073 * Return value:
3074 * number of bytes printed to buffer
3076 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3078 struct scsi_device *sdev = to_scsi_device(dev);
3079 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3080 struct ipr_resource_entry *res;
3081 unsigned long lock_flags = 0;
3082 ssize_t len = -ENXIO;
3084 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3085 res = (struct ipr_resource_entry *)sdev->hostdata;
3086 if (res)
3087 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3089 return len;
3092 static struct device_attribute ipr_adapter_handle_attr = {
3093 .attr = {
3094 .name = "adapter_handle",
3095 .mode = S_IRUSR,
3097 .show = ipr_show_adapter_handle
3100 static struct device_attribute *ipr_dev_attrs[] = {
3101 &ipr_adapter_handle_attr,
3102 NULL,
3106 * ipr_biosparam - Return the HSC mapping
3107 * @sdev: scsi device struct
3108 * @block_device: block device pointer
3109 * @capacity: capacity of the device
3110 * @parm: Array containing returned HSC values.
3112 * This function generates the HSC parms that fdisk uses.
3113 * We want to make sure we return something that places partitions
3114 * on 4k boundaries for best performance with the IOA.
3116 * Return value:
3117 * 0 on success
3119 static int ipr_biosparam(struct scsi_device *sdev,
3120 struct block_device *block_device,
3121 sector_t capacity, int *parm)
3123 int heads, sectors;
3124 sector_t cylinders;
3126 heads = 128;
3127 sectors = 32;
3129 cylinders = capacity;
3130 sector_div(cylinders, (128 * 32));
3132 /* return result */
3133 parm[0] = heads;
3134 parm[1] = sectors;
3135 parm[2] = cylinders;
3137 return 0;
3141 * ipr_slave_destroy - Unconfigure a SCSI device
3142 * @sdev: scsi device struct
3144 * Return value:
3145 * nothing
3147 static void ipr_slave_destroy(struct scsi_device *sdev)
3149 struct ipr_resource_entry *res;
3150 struct ipr_ioa_cfg *ioa_cfg;
3151 unsigned long lock_flags = 0;
3153 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3156 res = (struct ipr_resource_entry *) sdev->hostdata;
3157 if (res) {
3158 sdev->hostdata = NULL;
3159 res->sdev = NULL;
3161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3165 * ipr_slave_configure - Configure a SCSI device
3166 * @sdev: scsi device struct
3168 * This function configures the specified scsi device.
3170 * Return value:
3171 * 0 on success
3173 static int ipr_slave_configure(struct scsi_device *sdev)
3175 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3176 struct ipr_resource_entry *res;
3177 unsigned long lock_flags = 0;
3179 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3180 res = sdev->hostdata;
3181 if (res) {
3182 if (ipr_is_af_dasd_device(res))
3183 sdev->type = TYPE_RAID;
3184 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3185 sdev->scsi_level = 4;
3186 sdev->no_uld_attach = 1;
3188 if (ipr_is_vset_device(res)) {
3189 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3190 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3192 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3193 sdev->allow_restart = 1;
3194 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3197 return 0;
3201 * ipr_slave_alloc - Prepare for commands to a device.
3202 * @sdev: scsi device struct
3204 * This function saves a pointer to the resource entry
3205 * in the scsi device struct if the device exists. We
3206 * can then use this pointer in ipr_queuecommand when
3207 * handling new commands.
3209 * Return value:
3210 * 0 on success / -ENXIO if device does not exist
3212 static int ipr_slave_alloc(struct scsi_device *sdev)
3214 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3215 struct ipr_resource_entry *res;
3216 unsigned long lock_flags;
3217 int rc = -ENXIO;
3219 sdev->hostdata = NULL;
3221 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3223 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3224 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3225 (res->cfgte.res_addr.target == sdev->id) &&
3226 (res->cfgte.res_addr.lun == sdev->lun)) {
3227 res->sdev = sdev;
3228 res->add_to_ml = 0;
3229 res->in_erp = 0;
3230 sdev->hostdata = res;
3231 if (!ipr_is_naca_model(res))
3232 res->needs_sync_complete = 1;
3233 rc = 0;
3234 break;
3238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3240 return rc;
3244 * ipr_eh_host_reset - Reset the host adapter
3245 * @scsi_cmd: scsi command struct
3247 * Return value:
3248 * SUCCESS / FAILED
3250 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3252 struct ipr_ioa_cfg *ioa_cfg;
3253 int rc;
3255 ENTER;
3256 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3258 dev_err(&ioa_cfg->pdev->dev,
3259 "Adapter being reset as a result of error recovery.\n");
3261 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3262 ioa_cfg->sdt_state = GET_DUMP;
3264 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3266 LEAVE;
3267 return rc;
3270 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3272 int rc;
3274 spin_lock_irq(cmd->device->host->host_lock);
3275 rc = __ipr_eh_host_reset(cmd);
3276 spin_unlock_irq(cmd->device->host->host_lock);
3278 return rc;
3282 * ipr_device_reset - Reset the device
3283 * @ioa_cfg: ioa config struct
3284 * @res: resource entry struct
3286 * This function issues a device reset to the affected device.
3287 * If the device is a SCSI device, a LUN reset will be sent
3288 * to the device first. If that does not work, a target reset
3289 * will be sent.
3291 * Return value:
3292 * 0 on success / non-zero on failure
3294 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3295 struct ipr_resource_entry *res)
3297 struct ipr_cmnd *ipr_cmd;
3298 struct ipr_ioarcb *ioarcb;
3299 struct ipr_cmd_pkt *cmd_pkt;
3300 u32 ioasc;
3302 ENTER;
3303 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3304 ioarcb = &ipr_cmd->ioarcb;
3305 cmd_pkt = &ioarcb->cmd_pkt;
3307 ioarcb->res_handle = res->cfgte.res_handle;
3308 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3309 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3311 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3312 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3313 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3315 LEAVE;
3316 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3320 * ipr_eh_dev_reset - Reset the device
3321 * @scsi_cmd: scsi command struct
3323 * This function issues a device reset to the affected device.
3324 * A LUN reset will be sent to the device first. If that does
3325 * not work, a target reset will be sent.
3327 * Return value:
3328 * SUCCESS / FAILED
3330 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3332 struct ipr_cmnd *ipr_cmd;
3333 struct ipr_ioa_cfg *ioa_cfg;
3334 struct ipr_resource_entry *res;
3335 int rc;
3337 ENTER;
3338 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3339 res = scsi_cmd->device->hostdata;
3341 if (!res)
3342 return FAILED;
3345 * If we are currently going through reset/reload, return failed. This will force the
3346 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3347 * reset to complete
3349 if (ioa_cfg->in_reset_reload)
3350 return FAILED;
3351 if (ioa_cfg->ioa_is_dead)
3352 return FAILED;
3354 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3355 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3356 if (ipr_cmd->scsi_cmd)
3357 ipr_cmd->done = ipr_scsi_eh_done;
3361 res->resetting_device = 1;
3362 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3363 rc = ipr_device_reset(ioa_cfg, res);
3364 res->resetting_device = 0;
3366 LEAVE;
3367 return (rc ? FAILED : SUCCESS);
3370 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3372 int rc;
3374 spin_lock_irq(cmd->device->host->host_lock);
3375 rc = __ipr_eh_dev_reset(cmd);
3376 spin_unlock_irq(cmd->device->host->host_lock);
3378 return rc;
3382 * ipr_bus_reset_done - Op done function for bus reset.
3383 * @ipr_cmd: ipr command struct
3385 * This function is the op done function for a bus reset
3387 * Return value:
3388 * none
3390 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3393 struct ipr_resource_entry *res;
3395 ENTER;
3396 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3397 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3398 sizeof(res->cfgte.res_handle))) {
3399 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3400 break;
3405 * If abort has not completed, indicate the reset has, else call the
3406 * abort's done function to wake the sleeping eh thread
3408 if (ipr_cmd->sibling->sibling)
3409 ipr_cmd->sibling->sibling = NULL;
3410 else
3411 ipr_cmd->sibling->done(ipr_cmd->sibling);
3413 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3414 LEAVE;
3418 * ipr_abort_timeout - An abort task has timed out
3419 * @ipr_cmd: ipr command struct
3421 * This function handles when an abort task times out. If this
3422 * happens we issue a bus reset since we have resources tied
3423 * up that must be freed before returning to the midlayer.
3425 * Return value:
3426 * none
3428 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3430 struct ipr_cmnd *reset_cmd;
3431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3432 struct ipr_cmd_pkt *cmd_pkt;
3433 unsigned long lock_flags = 0;
3435 ENTER;
3436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3437 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3439 return;
3442 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3443 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3444 ipr_cmd->sibling = reset_cmd;
3445 reset_cmd->sibling = ipr_cmd;
3446 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3447 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3448 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3449 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3450 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3452 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454 LEAVE;
3458 * ipr_cancel_op - Cancel specified op
3459 * @scsi_cmd: scsi command struct
3461 * This function cancels specified op.
3463 * Return value:
3464 * SUCCESS / FAILED
3466 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3468 struct ipr_cmnd *ipr_cmd;
3469 struct ipr_ioa_cfg *ioa_cfg;
3470 struct ipr_resource_entry *res;
3471 struct ipr_cmd_pkt *cmd_pkt;
3472 u32 ioasc;
3473 int op_found = 0;
3475 ENTER;
3476 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3477 res = scsi_cmd->device->hostdata;
3479 /* If we are currently going through reset/reload, return failed.
3480 * This will force the mid-layer to call ipr_eh_host_reset,
3481 * which will then go to sleep and wait for the reset to complete
3483 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3484 return FAILED;
3485 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3486 return FAILED;
3488 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3489 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3490 ipr_cmd->done = ipr_scsi_eh_done;
3491 op_found = 1;
3492 break;
3496 if (!op_found)
3497 return SUCCESS;
3499 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3500 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3501 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3502 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3503 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3504 ipr_cmd->u.sdev = scsi_cmd->device;
3506 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3507 scsi_cmd->cmnd[0]);
3508 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3509 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3512 * If the abort task timed out and we sent a bus reset, we will get
3513 * one the following responses to the abort
3515 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3516 ioasc = 0;
3517 ipr_trace;
3520 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3521 if (!ipr_is_naca_model(res))
3522 res->needs_sync_complete = 1;
3524 LEAVE;
3525 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3529 * ipr_eh_abort - Abort a single op
3530 * @scsi_cmd: scsi command struct
3532 * Return value:
3533 * SUCCESS / FAILED
3535 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3537 unsigned long flags;
3538 int rc;
3540 ENTER;
3542 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3543 rc = ipr_cancel_op(scsi_cmd);
3544 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3546 LEAVE;
3547 return rc;
3551 * ipr_handle_other_interrupt - Handle "other" interrupts
3552 * @ioa_cfg: ioa config struct
3553 * @int_reg: interrupt register
3555 * Return value:
3556 * IRQ_NONE / IRQ_HANDLED
3558 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3559 volatile u32 int_reg)
3561 irqreturn_t rc = IRQ_HANDLED;
3563 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3564 /* Mask the interrupt */
3565 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3567 /* Clear the interrupt */
3568 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3569 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3571 list_del(&ioa_cfg->reset_cmd->queue);
3572 del_timer(&ioa_cfg->reset_cmd->timer);
3573 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3574 } else {
3575 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3576 ioa_cfg->ioa_unit_checked = 1;
3577 else
3578 dev_err(&ioa_cfg->pdev->dev,
3579 "Permanent IOA failure. 0x%08X\n", int_reg);
3581 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3582 ioa_cfg->sdt_state = GET_DUMP;
3584 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3585 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3588 return rc;
3592 * ipr_isr - Interrupt service routine
3593 * @irq: irq number
3594 * @devp: pointer to ioa config struct
3595 * @regs: pt_regs struct
3597 * Return value:
3598 * IRQ_NONE / IRQ_HANDLED
3600 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3603 unsigned long lock_flags = 0;
3604 volatile u32 int_reg, int_mask_reg;
3605 u32 ioasc;
3606 u16 cmd_index;
3607 struct ipr_cmnd *ipr_cmd;
3608 irqreturn_t rc = IRQ_NONE;
3610 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3612 /* If interrupts are disabled, ignore the interrupt */
3613 if (!ioa_cfg->allow_interrupts) {
3614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3615 return IRQ_NONE;
3618 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3619 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3621 /* If an interrupt on the adapter did not occur, ignore it */
3622 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624 return IRQ_NONE;
3627 while (1) {
3628 ipr_cmd = NULL;
3630 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3631 ioa_cfg->toggle_bit) {
3633 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3634 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3636 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3637 ioa_cfg->errors_logged++;
3638 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3640 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3641 ioa_cfg->sdt_state = GET_DUMP;
3643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3644 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3645 return IRQ_HANDLED;
3648 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3650 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3652 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3654 list_del(&ipr_cmd->queue);
3655 del_timer(&ipr_cmd->timer);
3656 ipr_cmd->done(ipr_cmd);
3658 rc = IRQ_HANDLED;
3660 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3661 ioa_cfg->hrrq_curr++;
3662 } else {
3663 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3664 ioa_cfg->toggle_bit ^= 1u;
3668 if (ipr_cmd != NULL) {
3669 /* Clear the PCI interrupt */
3670 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3671 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3672 } else
3673 break;
3676 if (unlikely(rc == IRQ_NONE))
3677 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 return rc;
3684 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3685 * @ioa_cfg: ioa config struct
3686 * @ipr_cmd: ipr command struct
3688 * Return value:
3689 * 0 on success / -1 on failure
3691 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3692 struct ipr_cmnd *ipr_cmd)
3694 int i;
3695 struct scatterlist *sglist;
3696 u32 length;
3697 u32 ioadl_flags = 0;
3698 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3699 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3700 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3702 length = scsi_cmd->request_bufflen;
3704 if (length == 0)
3705 return 0;
3707 if (scsi_cmd->use_sg) {
3708 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3709 scsi_cmd->request_buffer,
3710 scsi_cmd->use_sg,
3711 scsi_cmd->sc_data_direction);
3713 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3714 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3715 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3716 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3717 ioarcb->write_ioadl_len =
3718 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3719 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3720 ioadl_flags = IPR_IOADL_FLAGS_READ;
3721 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3722 ioarcb->read_ioadl_len =
3723 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3726 sglist = scsi_cmd->request_buffer;
3728 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3729 ioadl[i].flags_and_data_len =
3730 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3731 ioadl[i].address =
3732 cpu_to_be32(sg_dma_address(&sglist[i]));
3735 if (likely(ipr_cmd->dma_use_sg)) {
3736 ioadl[i-1].flags_and_data_len |=
3737 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3738 return 0;
3739 } else
3740 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3741 } else {
3742 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3743 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3744 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3745 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3746 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3747 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3748 ioadl_flags = IPR_IOADL_FLAGS_READ;
3749 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3750 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3753 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3754 scsi_cmd->request_buffer, length,
3755 scsi_cmd->sc_data_direction);
3757 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3758 ipr_cmd->dma_use_sg = 1;
3759 ioadl[0].flags_and_data_len =
3760 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3761 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3762 return 0;
3763 } else
3764 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3767 return -1;
3771 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3772 * @scsi_cmd: scsi command struct
3774 * Return value:
3775 * task attributes
3777 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3779 u8 tag[2];
3780 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3782 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3783 switch (tag[0]) {
3784 case MSG_SIMPLE_TAG:
3785 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3786 break;
3787 case MSG_HEAD_TAG:
3788 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3789 break;
3790 case MSG_ORDERED_TAG:
3791 rc = IPR_FLAGS_LO_ORDERED_TASK;
3792 break;
3796 return rc;
3800 * ipr_erp_done - Process completion of ERP for a device
3801 * @ipr_cmd: ipr command struct
3803 * This function copies the sense buffer into the scsi_cmd
3804 * struct and pushes the scsi_done function.
3806 * Return value:
3807 * nothing
3809 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3811 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3812 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3813 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3814 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3816 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3817 scsi_cmd->result |= (DID_ERROR << 16);
3818 scmd_printk(KERN_ERR, scsi_cmd,
3819 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3820 } else {
3821 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3822 SCSI_SENSE_BUFFERSIZE);
3825 if (res) {
3826 if (!ipr_is_naca_model(res))
3827 res->needs_sync_complete = 1;
3828 res->in_erp = 0;
3830 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3831 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3832 scsi_cmd->scsi_done(scsi_cmd);
3836 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3837 * @ipr_cmd: ipr command struct
3839 * Return value:
3840 * none
3842 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3844 struct ipr_ioarcb *ioarcb;
3845 struct ipr_ioasa *ioasa;
3847 ioarcb = &ipr_cmd->ioarcb;
3848 ioasa = &ipr_cmd->ioasa;
3850 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3851 ioarcb->write_data_transfer_length = 0;
3852 ioarcb->read_data_transfer_length = 0;
3853 ioarcb->write_ioadl_len = 0;
3854 ioarcb->read_ioadl_len = 0;
3855 ioasa->ioasc = 0;
3856 ioasa->residual_data_len = 0;
3860 * ipr_erp_request_sense - Send request sense to a device
3861 * @ipr_cmd: ipr command struct
3863 * This function sends a request sense to a device as a result
3864 * of a check condition.
3866 * Return value:
3867 * nothing
3869 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3871 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3872 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3874 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3875 ipr_erp_done(ipr_cmd);
3876 return;
3879 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3881 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3882 cmd_pkt->cdb[0] = REQUEST_SENSE;
3883 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3884 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3885 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3886 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3888 ipr_cmd->ioadl[0].flags_and_data_len =
3889 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3890 ipr_cmd->ioadl[0].address =
3891 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3893 ipr_cmd->ioarcb.read_ioadl_len =
3894 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3895 ipr_cmd->ioarcb.read_data_transfer_length =
3896 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3898 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3899 IPR_REQUEST_SENSE_TIMEOUT * 2);
3903 * ipr_erp_cancel_all - Send cancel all to a device
3904 * @ipr_cmd: ipr command struct
3906 * This function sends a cancel all to a device to clear the
3907 * queue. If we are running TCQ on the device, QERR is set to 1,
3908 * which means all outstanding ops have been dropped on the floor.
3909 * Cancel all will return them to us.
3911 * Return value:
3912 * nothing
3914 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3916 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3917 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3918 struct ipr_cmd_pkt *cmd_pkt;
3920 res->in_erp = 1;
3922 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3924 if (!scsi_get_tag_type(scsi_cmd->device)) {
3925 ipr_erp_request_sense(ipr_cmd);
3926 return;
3929 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3930 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3931 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3933 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3934 IPR_CANCEL_ALL_TIMEOUT);
3938 * ipr_dump_ioasa - Dump contents of IOASA
3939 * @ioa_cfg: ioa config struct
3940 * @ipr_cmd: ipr command struct
3941 * @res: resource entry struct
3943 * This function is invoked by the interrupt handler when ops
3944 * fail. It will log the IOASA if appropriate. Only called
3945 * for GPDD ops.
3947 * Return value:
3948 * none
3950 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3951 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
3953 int i;
3954 u16 data_len;
3955 u32 ioasc;
3956 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3957 __be32 *ioasa_data = (__be32 *)ioasa;
3958 int error_index;
3960 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3962 if (0 == ioasc)
3963 return;
3965 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3966 return;
3968 error_index = ipr_get_error(ioasc);
3970 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3971 /* Don't log an error if the IOA already logged one */
3972 if (ioasa->ilid != 0)
3973 return;
3975 if (ipr_error_table[error_index].log_ioasa == 0)
3976 return;
3979 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
3981 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3982 data_len = sizeof(struct ipr_ioasa);
3983 else
3984 data_len = be16_to_cpu(ioasa->ret_stat_len);
3986 ipr_err("IOASA Dump:\n");
3988 for (i = 0; i < data_len / 4; i += 4) {
3989 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3990 be32_to_cpu(ioasa_data[i]),
3991 be32_to_cpu(ioasa_data[i+1]),
3992 be32_to_cpu(ioasa_data[i+2]),
3993 be32_to_cpu(ioasa_data[i+3]));
3998 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3999 * @ioasa: IOASA
4000 * @sense_buf: sense data buffer
4002 * Return value:
4003 * none
4005 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4007 u32 failing_lba;
4008 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4009 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4010 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4011 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4013 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4015 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4016 return;
4018 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4020 if (ipr_is_vset_device(res) &&
4021 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4022 ioasa->u.vset.failing_lba_hi != 0) {
4023 sense_buf[0] = 0x72;
4024 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4025 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4026 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4028 sense_buf[7] = 12;
4029 sense_buf[8] = 0;
4030 sense_buf[9] = 0x0A;
4031 sense_buf[10] = 0x80;
4033 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4035 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4036 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4037 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4038 sense_buf[15] = failing_lba & 0x000000ff;
4040 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4042 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4043 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4044 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4045 sense_buf[19] = failing_lba & 0x000000ff;
4046 } else {
4047 sense_buf[0] = 0x70;
4048 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4049 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4050 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4052 /* Illegal request */
4053 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4054 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4055 sense_buf[7] = 10; /* additional length */
4057 /* IOARCB was in error */
4058 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4059 sense_buf[15] = 0xC0;
4060 else /* Parameter data was invalid */
4061 sense_buf[15] = 0x80;
4063 sense_buf[16] =
4064 ((IPR_FIELD_POINTER_MASK &
4065 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4066 sense_buf[17] =
4067 (IPR_FIELD_POINTER_MASK &
4068 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4069 } else {
4070 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4071 if (ipr_is_vset_device(res))
4072 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4073 else
4074 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4076 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4077 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4078 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4079 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4080 sense_buf[6] = failing_lba & 0x000000ff;
4083 sense_buf[7] = 6; /* additional length */
4089 * ipr_get_autosense - Copy autosense data to sense buffer
4090 * @ipr_cmd: ipr command struct
4092 * This function copies the autosense buffer to the buffer
4093 * in the scsi_cmd, if there is autosense available.
4095 * Return value:
4096 * 1 if autosense was available / 0 if not
4098 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4100 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4102 if ((be32_to_cpu(ioasa->ioasc_specific) &
4103 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4104 return 0;
4106 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4107 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4108 SCSI_SENSE_BUFFERSIZE));
4109 return 1;
4113 * ipr_erp_start - Process an error response for a SCSI op
4114 * @ioa_cfg: ioa config struct
4115 * @ipr_cmd: ipr command struct
4117 * This function determines whether or not to initiate ERP
4118 * on the affected device.
4120 * Return value:
4121 * nothing
4123 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4124 struct ipr_cmnd *ipr_cmd)
4126 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4127 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4128 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4130 if (!res) {
4131 ipr_scsi_eh_done(ipr_cmd);
4132 return;
4135 if (ipr_is_gscsi(res))
4136 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4137 else
4138 ipr_gen_sense(ipr_cmd);
4140 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4141 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4142 if (ipr_is_naca_model(res))
4143 scsi_cmd->result |= (DID_ABORT << 16);
4144 else
4145 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4146 break;
4147 case IPR_IOASC_IR_RESOURCE_HANDLE:
4148 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4149 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4150 break;
4151 case IPR_IOASC_HW_SEL_TIMEOUT:
4152 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4153 if (!ipr_is_naca_model(res))
4154 res->needs_sync_complete = 1;
4155 break;
4156 case IPR_IOASC_SYNC_REQUIRED:
4157 if (!res->in_erp)
4158 res->needs_sync_complete = 1;
4159 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4160 break;
4161 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4162 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4163 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4164 break;
4165 case IPR_IOASC_BUS_WAS_RESET:
4166 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4168 * Report the bus reset and ask for a retry. The device
4169 * will give CC/UA the next command.
4171 if (!res->resetting_device)
4172 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4173 scsi_cmd->result |= (DID_ERROR << 16);
4174 if (!ipr_is_naca_model(res))
4175 res->needs_sync_complete = 1;
4176 break;
4177 case IPR_IOASC_HW_DEV_BUS_STATUS:
4178 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4179 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4180 if (!ipr_get_autosense(ipr_cmd)) {
4181 if (!ipr_is_naca_model(res)) {
4182 ipr_erp_cancel_all(ipr_cmd);
4183 return;
4187 if (!ipr_is_naca_model(res))
4188 res->needs_sync_complete = 1;
4189 break;
4190 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4191 break;
4192 default:
4193 scsi_cmd->result |= (DID_ERROR << 16);
4194 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4195 res->needs_sync_complete = 1;
4196 break;
4199 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4200 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4201 scsi_cmd->scsi_done(scsi_cmd);
4205 * ipr_scsi_done - mid-layer done function
4206 * @ipr_cmd: ipr command struct
4208 * This function is invoked by the interrupt handler for
4209 * ops generated by the SCSI mid-layer
4211 * Return value:
4212 * none
4214 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4217 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4218 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4220 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4222 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4223 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4224 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4225 scsi_cmd->scsi_done(scsi_cmd);
4226 } else
4227 ipr_erp_start(ioa_cfg, ipr_cmd);
4231 * ipr_queuecommand - Queue a mid-layer request
4232 * @scsi_cmd: scsi command struct
4233 * @done: done function
4235 * This function queues a request generated by the mid-layer.
4237 * Return value:
4238 * 0 on success
4239 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4240 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4242 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4243 void (*done) (struct scsi_cmnd *))
4245 struct ipr_ioa_cfg *ioa_cfg;
4246 struct ipr_resource_entry *res;
4247 struct ipr_ioarcb *ioarcb;
4248 struct ipr_cmnd *ipr_cmd;
4249 int rc = 0;
4251 scsi_cmd->scsi_done = done;
4252 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4253 res = scsi_cmd->device->hostdata;
4254 scsi_cmd->result = (DID_OK << 16);
4257 * We are currently blocking all devices due to a host reset
4258 * We have told the host to stop giving us new requests, but
4259 * ERP ops don't count. FIXME
4261 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4262 return SCSI_MLQUEUE_HOST_BUSY;
4265 * FIXME - Create scsi_set_host_offline interface
4266 * and the ioa_is_dead check can be removed
4268 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4269 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4270 scsi_cmd->result = (DID_NO_CONNECT << 16);
4271 scsi_cmd->scsi_done(scsi_cmd);
4272 return 0;
4275 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4276 ioarcb = &ipr_cmd->ioarcb;
4277 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4279 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4280 ipr_cmd->scsi_cmd = scsi_cmd;
4281 ioarcb->res_handle = res->cfgte.res_handle;
4282 ipr_cmd->done = ipr_scsi_done;
4283 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4285 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4286 if (scsi_cmd->underflow == 0)
4287 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4289 if (res->needs_sync_complete) {
4290 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4291 res->needs_sync_complete = 0;
4294 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4295 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4296 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4297 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4300 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4301 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4302 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4304 if (likely(rc == 0))
4305 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4307 if (likely(rc == 0)) {
4308 mb();
4309 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4310 ioa_cfg->regs.ioarrin_reg);
4311 } else {
4312 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4313 return SCSI_MLQUEUE_HOST_BUSY;
4316 return 0;
4320 * ipr_info - Get information about the card/driver
4321 * @scsi_host: scsi host struct
4323 * Return value:
4324 * pointer to buffer with description string
4326 static const char * ipr_ioa_info(struct Scsi_Host *host)
4328 static char buffer[512];
4329 struct ipr_ioa_cfg *ioa_cfg;
4330 unsigned long lock_flags = 0;
4332 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4334 spin_lock_irqsave(host->host_lock, lock_flags);
4335 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4336 spin_unlock_irqrestore(host->host_lock, lock_flags);
4338 return buffer;
4341 static struct scsi_host_template driver_template = {
4342 .module = THIS_MODULE,
4343 .name = "IPR",
4344 .info = ipr_ioa_info,
4345 .queuecommand = ipr_queuecommand,
4346 .eh_abort_handler = ipr_eh_abort,
4347 .eh_device_reset_handler = ipr_eh_dev_reset,
4348 .eh_host_reset_handler = ipr_eh_host_reset,
4349 .slave_alloc = ipr_slave_alloc,
4350 .slave_configure = ipr_slave_configure,
4351 .slave_destroy = ipr_slave_destroy,
4352 .change_queue_depth = ipr_change_queue_depth,
4353 .change_queue_type = ipr_change_queue_type,
4354 .bios_param = ipr_biosparam,
4355 .can_queue = IPR_MAX_COMMANDS,
4356 .this_id = -1,
4357 .sg_tablesize = IPR_MAX_SGLIST,
4358 .max_sectors = IPR_IOA_MAX_SECTORS,
4359 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4360 .use_clustering = ENABLE_CLUSTERING,
4361 .shost_attrs = ipr_ioa_attrs,
4362 .sdev_attrs = ipr_dev_attrs,
4363 .proc_name = IPR_NAME
4366 #ifdef CONFIG_PPC_PSERIES
4367 static const u16 ipr_blocked_processors[] = {
4368 PV_NORTHSTAR,
4369 PV_PULSAR,
4370 PV_POWER4,
4371 PV_ICESTAR,
4372 PV_SSTAR,
4373 PV_POWER4p,
4374 PV_630,
4375 PV_630p
4379 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4380 * @ioa_cfg: ioa cfg struct
4382 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4383 * certain pSeries hardware. This function determines if the given
4384 * adapter is in one of these confgurations or not.
4386 * Return value:
4387 * 1 if adapter is not supported / 0 if adapter is supported
4389 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4391 u8 rev_id;
4392 int i;
4394 if (ioa_cfg->type == 0x5702) {
4395 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4396 &rev_id) == PCIBIOS_SUCCESSFUL) {
4397 if (rev_id < 4) {
4398 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4399 if (__is_processor(ipr_blocked_processors[i]))
4400 return 1;
4405 return 0;
4407 #else
4408 #define ipr_invalid_adapter(ioa_cfg) 0
4409 #endif
4412 * ipr_ioa_bringdown_done - IOA bring down completion.
4413 * @ipr_cmd: ipr command struct
4415 * This function processes the completion of an adapter bring down.
4416 * It wakes any reset sleepers.
4418 * Return value:
4419 * IPR_RC_JOB_RETURN
4421 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4425 ENTER;
4426 ioa_cfg->in_reset_reload = 0;
4427 ioa_cfg->reset_retries = 0;
4428 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4429 wake_up_all(&ioa_cfg->reset_wait_q);
4431 spin_unlock_irq(ioa_cfg->host->host_lock);
4432 scsi_unblock_requests(ioa_cfg->host);
4433 spin_lock_irq(ioa_cfg->host->host_lock);
4434 LEAVE;
4436 return IPR_RC_JOB_RETURN;
4440 * ipr_ioa_reset_done - IOA reset completion.
4441 * @ipr_cmd: ipr command struct
4443 * This function processes the completion of an adapter reset.
4444 * It schedules any necessary mid-layer add/removes and
4445 * wakes any reset sleepers.
4447 * Return value:
4448 * IPR_RC_JOB_RETURN
4450 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4452 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4453 struct ipr_resource_entry *res;
4454 struct ipr_hostrcb *hostrcb, *temp;
4455 int i = 0;
4457 ENTER;
4458 ioa_cfg->in_reset_reload = 0;
4459 ioa_cfg->allow_cmds = 1;
4460 ioa_cfg->reset_cmd = NULL;
4461 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4463 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4464 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4465 ipr_trace;
4466 break;
4469 schedule_work(&ioa_cfg->work_q);
4471 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4472 list_del(&hostrcb->queue);
4473 if (i++ < IPR_NUM_LOG_HCAMS)
4474 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4475 else
4476 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4479 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4481 ioa_cfg->reset_retries = 0;
4482 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4483 wake_up_all(&ioa_cfg->reset_wait_q);
4485 spin_unlock_irq(ioa_cfg->host->host_lock);
4486 scsi_unblock_requests(ioa_cfg->host);
4487 spin_lock_irq(ioa_cfg->host->host_lock);
4489 if (!ioa_cfg->allow_cmds)
4490 scsi_block_requests(ioa_cfg->host);
4492 LEAVE;
4493 return IPR_RC_JOB_RETURN;
4497 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4498 * @supported_dev: supported device struct
4499 * @vpids: vendor product id struct
4501 * Return value:
4502 * none
4504 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4505 struct ipr_std_inq_vpids *vpids)
4507 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4508 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4509 supported_dev->num_records = 1;
4510 supported_dev->data_length =
4511 cpu_to_be16(sizeof(struct ipr_supported_device));
4512 supported_dev->reserved = 0;
4516 * ipr_set_supported_devs - Send Set Supported Devices for a device
4517 * @ipr_cmd: ipr command struct
4519 * This function send a Set Supported Devices to the adapter
4521 * Return value:
4522 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4524 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4526 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4527 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4528 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4529 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4530 struct ipr_resource_entry *res = ipr_cmd->u.res;
4532 ipr_cmd->job_step = ipr_ioa_reset_done;
4534 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4535 if (!ipr_is_scsi_disk(res))
4536 continue;
4538 ipr_cmd->u.res = res;
4539 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4541 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4542 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4543 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4545 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4546 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4547 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4549 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4550 sizeof(struct ipr_supported_device));
4551 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4552 offsetof(struct ipr_misc_cbs, supp_dev));
4553 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4554 ioarcb->write_data_transfer_length =
4555 cpu_to_be32(sizeof(struct ipr_supported_device));
4557 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4558 IPR_SET_SUP_DEVICE_TIMEOUT);
4560 ipr_cmd->job_step = ipr_set_supported_devs;
4561 return IPR_RC_JOB_RETURN;
4564 return IPR_RC_JOB_CONTINUE;
4568 * ipr_setup_write_cache - Disable write cache if needed
4569 * @ipr_cmd: ipr command struct
4571 * This function sets up adapters write cache to desired setting
4573 * Return value:
4574 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4576 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4580 ipr_cmd->job_step = ipr_set_supported_devs;
4581 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4582 struct ipr_resource_entry, queue);
4584 if (ioa_cfg->cache_state != CACHE_DISABLED)
4585 return IPR_RC_JOB_CONTINUE;
4587 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4588 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4589 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4590 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4592 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4594 return IPR_RC_JOB_RETURN;
4598 * ipr_get_mode_page - Locate specified mode page
4599 * @mode_pages: mode page buffer
4600 * @page_code: page code to find
4601 * @len: minimum required length for mode page
4603 * Return value:
4604 * pointer to mode page / NULL on failure
4606 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4607 u32 page_code, u32 len)
4609 struct ipr_mode_page_hdr *mode_hdr;
4610 u32 page_length;
4611 u32 length;
4613 if (!mode_pages || (mode_pages->hdr.length == 0))
4614 return NULL;
4616 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4617 mode_hdr = (struct ipr_mode_page_hdr *)
4618 (mode_pages->data + mode_pages->hdr.block_desc_len);
4620 while (length) {
4621 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4622 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4623 return mode_hdr;
4624 break;
4625 } else {
4626 page_length = (sizeof(struct ipr_mode_page_hdr) +
4627 mode_hdr->page_length);
4628 length -= page_length;
4629 mode_hdr = (struct ipr_mode_page_hdr *)
4630 ((unsigned long)mode_hdr + page_length);
4633 return NULL;
4637 * ipr_check_term_power - Check for term power errors
4638 * @ioa_cfg: ioa config struct
4639 * @mode_pages: IOAFP mode pages buffer
4641 * Check the IOAFP's mode page 28 for term power errors
4643 * Return value:
4644 * nothing
4646 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4647 struct ipr_mode_pages *mode_pages)
4649 int i;
4650 int entry_length;
4651 struct ipr_dev_bus_entry *bus;
4652 struct ipr_mode_page28 *mode_page;
4654 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4655 sizeof(struct ipr_mode_page28));
4657 entry_length = mode_page->entry_length;
4659 bus = mode_page->bus;
4661 for (i = 0; i < mode_page->num_entries; i++) {
4662 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4663 dev_err(&ioa_cfg->pdev->dev,
4664 "Term power is absent on scsi bus %d\n",
4665 bus->res_addr.bus);
4668 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4673 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4674 * @ioa_cfg: ioa config struct
4676 * Looks through the config table checking for SES devices. If
4677 * the SES device is in the SES table indicating a maximum SCSI
4678 * bus speed, the speed is limited for the bus.
4680 * Return value:
4681 * none
4683 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4685 u32 max_xfer_rate;
4686 int i;
4688 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4689 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4690 ioa_cfg->bus_attr[i].bus_width);
4692 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4693 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4698 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4699 * @ioa_cfg: ioa config struct
4700 * @mode_pages: mode page 28 buffer
4702 * Updates mode page 28 based on driver configuration
4704 * Return value:
4705 * none
4707 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4708 struct ipr_mode_pages *mode_pages)
4710 int i, entry_length;
4711 struct ipr_dev_bus_entry *bus;
4712 struct ipr_bus_attributes *bus_attr;
4713 struct ipr_mode_page28 *mode_page;
4715 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4716 sizeof(struct ipr_mode_page28));
4718 entry_length = mode_page->entry_length;
4720 /* Loop for each device bus entry */
4721 for (i = 0, bus = mode_page->bus;
4722 i < mode_page->num_entries;
4723 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4724 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4725 dev_err(&ioa_cfg->pdev->dev,
4726 "Invalid resource address reported: 0x%08X\n",
4727 IPR_GET_PHYS_LOC(bus->res_addr));
4728 continue;
4731 bus_attr = &ioa_cfg->bus_attr[i];
4732 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4733 bus->bus_width = bus_attr->bus_width;
4734 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4735 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4736 if (bus_attr->qas_enabled)
4737 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4738 else
4739 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4744 * ipr_build_mode_select - Build a mode select command
4745 * @ipr_cmd: ipr command struct
4746 * @res_handle: resource handle to send command to
4747 * @parm: Byte 2 of Mode Sense command
4748 * @dma_addr: DMA buffer address
4749 * @xfer_len: data transfer length
4751 * Return value:
4752 * none
4754 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4755 __be32 res_handle, u8 parm, u32 dma_addr,
4756 u8 xfer_len)
4758 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4759 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4761 ioarcb->res_handle = res_handle;
4762 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4763 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4764 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4765 ioarcb->cmd_pkt.cdb[1] = parm;
4766 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4768 ioadl->flags_and_data_len =
4769 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4770 ioadl->address = cpu_to_be32(dma_addr);
4771 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4772 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4776 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4777 * @ipr_cmd: ipr command struct
4779 * This function sets up the SCSI bus attributes and sends
4780 * a Mode Select for Page 28 to activate them.
4782 * Return value:
4783 * IPR_RC_JOB_RETURN
4785 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4787 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4788 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4789 int length;
4791 ENTER;
4792 ipr_scsi_bus_speed_limit(ioa_cfg);
4793 ipr_check_term_power(ioa_cfg, mode_pages);
4794 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4795 length = mode_pages->hdr.length + 1;
4796 mode_pages->hdr.length = 0;
4798 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4799 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4800 length);
4802 ipr_cmd->job_step = ipr_setup_write_cache;
4803 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4805 LEAVE;
4806 return IPR_RC_JOB_RETURN;
4810 * ipr_build_mode_sense - Builds a mode sense command
4811 * @ipr_cmd: ipr command struct
4812 * @res: resource entry struct
4813 * @parm: Byte 2 of mode sense command
4814 * @dma_addr: DMA address of mode sense buffer
4815 * @xfer_len: Size of DMA buffer
4817 * Return value:
4818 * none
4820 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4821 __be32 res_handle,
4822 u8 parm, u32 dma_addr, u8 xfer_len)
4824 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4825 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4827 ioarcb->res_handle = res_handle;
4828 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4829 ioarcb->cmd_pkt.cdb[2] = parm;
4830 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4831 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4833 ioadl->flags_and_data_len =
4834 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4835 ioadl->address = cpu_to_be32(dma_addr);
4836 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4837 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4841 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4842 * @ipr_cmd: ipr command struct
4844 * This function handles the failure of an IOA bringup command.
4846 * Return value:
4847 * IPR_RC_JOB_RETURN
4849 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4851 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4852 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4854 dev_err(&ioa_cfg->pdev->dev,
4855 "0x%02X failed with IOASC: 0x%08X\n",
4856 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4858 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4859 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4860 return IPR_RC_JOB_RETURN;
4864 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4865 * @ipr_cmd: ipr command struct
4867 * This function handles the failure of a Mode Sense to the IOAFP.
4868 * Some adapters do not handle all mode pages.
4870 * Return value:
4871 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4873 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4875 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4877 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4878 ipr_cmd->job_step = ipr_setup_write_cache;
4879 return IPR_RC_JOB_CONTINUE;
4882 return ipr_reset_cmd_failed(ipr_cmd);
4886 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4887 * @ipr_cmd: ipr command struct
4889 * This function send a Page 28 mode sense to the IOA to
4890 * retrieve SCSI bus attributes.
4892 * Return value:
4893 * IPR_RC_JOB_RETURN
4895 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4899 ENTER;
4900 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4901 0x28, ioa_cfg->vpd_cbs_dma +
4902 offsetof(struct ipr_misc_cbs, mode_pages),
4903 sizeof(struct ipr_mode_pages));
4905 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4906 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4908 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4910 LEAVE;
4911 return IPR_RC_JOB_RETURN;
4915 * ipr_init_res_table - Initialize the resource table
4916 * @ipr_cmd: ipr command struct
4918 * This function looks through the existing resource table, comparing
4919 * it with the config table. This function will take care of old/new
4920 * devices and schedule adding/removing them from the mid-layer
4921 * as appropriate.
4923 * Return value:
4924 * IPR_RC_JOB_CONTINUE
4926 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4928 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4929 struct ipr_resource_entry *res, *temp;
4930 struct ipr_config_table_entry *cfgte;
4931 int found, i;
4932 LIST_HEAD(old_res);
4934 ENTER;
4935 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4936 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4938 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4939 list_move_tail(&res->queue, &old_res);
4941 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4942 cfgte = &ioa_cfg->cfg_table->dev[i];
4943 found = 0;
4945 list_for_each_entry_safe(res, temp, &old_res, queue) {
4946 if (!memcmp(&res->cfgte.res_addr,
4947 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4948 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4949 found = 1;
4950 break;
4954 if (!found) {
4955 if (list_empty(&ioa_cfg->free_res_q)) {
4956 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4957 break;
4960 found = 1;
4961 res = list_entry(ioa_cfg->free_res_q.next,
4962 struct ipr_resource_entry, queue);
4963 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4964 ipr_init_res_entry(res);
4965 res->add_to_ml = 1;
4968 if (found)
4969 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4972 list_for_each_entry_safe(res, temp, &old_res, queue) {
4973 if (res->sdev) {
4974 res->del_from_ml = 1;
4975 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
4976 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4977 } else {
4978 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4982 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4984 LEAVE;
4985 return IPR_RC_JOB_CONTINUE;
4989 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4990 * @ipr_cmd: ipr command struct
4992 * This function sends a Query IOA Configuration command
4993 * to the adapter to retrieve the IOA configuration table.
4995 * Return value:
4996 * IPR_RC_JOB_RETURN
4998 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5000 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5001 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5002 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5003 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5005 ENTER;
5006 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5007 ucode_vpd->major_release, ucode_vpd->card_type,
5008 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5009 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5010 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5012 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5013 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5014 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5016 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5017 ioarcb->read_data_transfer_length =
5018 cpu_to_be32(sizeof(struct ipr_config_table));
5020 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5021 ioadl->flags_and_data_len =
5022 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5024 ipr_cmd->job_step = ipr_init_res_table;
5026 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5028 LEAVE;
5029 return IPR_RC_JOB_RETURN;
5033 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5034 * @ipr_cmd: ipr command struct
5036 * This utility function sends an inquiry to the adapter.
5038 * Return value:
5039 * none
5041 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5042 u32 dma_addr, u8 xfer_len)
5044 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5045 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5047 ENTER;
5048 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5049 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5051 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5052 ioarcb->cmd_pkt.cdb[1] = flags;
5053 ioarcb->cmd_pkt.cdb[2] = page;
5054 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5056 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5057 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5059 ioadl->address = cpu_to_be32(dma_addr);
5060 ioadl->flags_and_data_len =
5061 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5063 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5064 LEAVE;
5068 * ipr_inquiry_page_supported - Is the given inquiry page supported
5069 * @page0: inquiry page 0 buffer
5070 * @page: page code.
5072 * This function determines if the specified inquiry page is supported.
5074 * Return value:
5075 * 1 if page is supported / 0 if not
5077 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5079 int i;
5081 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5082 if (page0->page[i] == page)
5083 return 1;
5085 return 0;
5089 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5090 * @ipr_cmd: ipr command struct
5092 * This function sends a Page 3 inquiry to the adapter
5093 * to retrieve software VPD information.
5095 * Return value:
5096 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5098 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5101 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5103 ENTER;
5105 if (!ipr_inquiry_page_supported(page0, 1))
5106 ioa_cfg->cache_state = CACHE_NONE;
5108 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5110 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5111 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5112 sizeof(struct ipr_inquiry_page3));
5114 LEAVE;
5115 return IPR_RC_JOB_RETURN;
5119 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5120 * @ipr_cmd: ipr command struct
5122 * This function sends a Page 0 inquiry to the adapter
5123 * to retrieve supported inquiry pages.
5125 * Return value:
5126 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5128 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5130 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5131 char type[5];
5133 ENTER;
5135 /* Grab the type out of the VPD and store it away */
5136 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5137 type[4] = '\0';
5138 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5140 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5142 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5143 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5144 sizeof(struct ipr_inquiry_page0));
5146 LEAVE;
5147 return IPR_RC_JOB_RETURN;
5151 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5152 * @ipr_cmd: ipr command struct
5154 * This function sends a standard inquiry to the adapter.
5156 * Return value:
5157 * IPR_RC_JOB_RETURN
5159 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5161 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5163 ENTER;
5164 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5166 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5167 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5168 sizeof(struct ipr_ioa_vpd));
5170 LEAVE;
5171 return IPR_RC_JOB_RETURN;
5175 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5176 * @ipr_cmd: ipr command struct
5178 * This function send an Identify Host Request Response Queue
5179 * command to establish the HRRQ with the adapter.
5181 * Return value:
5182 * IPR_RC_JOB_RETURN
5184 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5186 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5187 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5189 ENTER;
5190 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5192 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5193 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5195 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5196 ioarcb->cmd_pkt.cdb[2] =
5197 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5198 ioarcb->cmd_pkt.cdb[3] =
5199 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5200 ioarcb->cmd_pkt.cdb[4] =
5201 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5202 ioarcb->cmd_pkt.cdb[5] =
5203 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5204 ioarcb->cmd_pkt.cdb[7] =
5205 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5206 ioarcb->cmd_pkt.cdb[8] =
5207 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5209 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5211 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5213 LEAVE;
5214 return IPR_RC_JOB_RETURN;
5218 * ipr_reset_timer_done - Adapter reset timer function
5219 * @ipr_cmd: ipr command struct
5221 * Description: This function is used in adapter reset processing
5222 * for timing events. If the reset_cmd pointer in the IOA
5223 * config struct is not this adapter's we are doing nested
5224 * resets and fail_all_ops will take care of freeing the
5225 * command block.
5227 * Return value:
5228 * none
5230 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5233 unsigned long lock_flags = 0;
5235 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5237 if (ioa_cfg->reset_cmd == ipr_cmd) {
5238 list_del(&ipr_cmd->queue);
5239 ipr_cmd->done(ipr_cmd);
5242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5246 * ipr_reset_start_timer - Start a timer for adapter reset job
5247 * @ipr_cmd: ipr command struct
5248 * @timeout: timeout value
5250 * Description: This function is used in adapter reset processing
5251 * for timing events. If the reset_cmd pointer in the IOA
5252 * config struct is not this adapter's we are doing nested
5253 * resets and fail_all_ops will take care of freeing the
5254 * command block.
5256 * Return value:
5257 * none
5259 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5260 unsigned long timeout)
5262 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5263 ipr_cmd->done = ipr_reset_ioa_job;
5265 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5266 ipr_cmd->timer.expires = jiffies + timeout;
5267 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5268 add_timer(&ipr_cmd->timer);
5272 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5273 * @ioa_cfg: ioa cfg struct
5275 * Return value:
5276 * nothing
5278 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5280 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5282 /* Initialize Host RRQ pointers */
5283 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5284 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5285 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5286 ioa_cfg->toggle_bit = 1;
5288 /* Zero out config table */
5289 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5293 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5294 * @ipr_cmd: ipr command struct
5296 * This function reinitializes some control blocks and
5297 * enables destructive diagnostics on the adapter.
5299 * Return value:
5300 * IPR_RC_JOB_RETURN
5302 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5304 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5305 volatile u32 int_reg;
5307 ENTER;
5308 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5309 ipr_init_ioa_mem(ioa_cfg);
5311 ioa_cfg->allow_interrupts = 1;
5312 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5314 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5315 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5316 ioa_cfg->regs.clr_interrupt_mask_reg);
5317 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5318 return IPR_RC_JOB_CONTINUE;
5321 /* Enable destructive diagnostics on IOA */
5322 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5324 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5325 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5327 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5329 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5330 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5331 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5332 ipr_cmd->done = ipr_reset_ioa_job;
5333 add_timer(&ipr_cmd->timer);
5334 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5336 LEAVE;
5337 return IPR_RC_JOB_RETURN;
5341 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5342 * @ipr_cmd: ipr command struct
5344 * This function is invoked when an adapter dump has run out
5345 * of processing time.
5347 * Return value:
5348 * IPR_RC_JOB_CONTINUE
5350 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5354 if (ioa_cfg->sdt_state == GET_DUMP)
5355 ioa_cfg->sdt_state = ABORT_DUMP;
5357 ipr_cmd->job_step = ipr_reset_alert;
5359 return IPR_RC_JOB_CONTINUE;
5363 * ipr_unit_check_no_data - Log a unit check/no data error log
5364 * @ioa_cfg: ioa config struct
5366 * Logs an error indicating the adapter unit checked, but for some
5367 * reason, we were unable to fetch the unit check buffer.
5369 * Return value:
5370 * nothing
5372 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5374 ioa_cfg->errors_logged++;
5375 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5379 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5380 * @ioa_cfg: ioa config struct
5382 * Fetches the unit check buffer from the adapter by clocking the data
5383 * through the mailbox register.
5385 * Return value:
5386 * nothing
5388 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5390 unsigned long mailbox;
5391 struct ipr_hostrcb *hostrcb;
5392 struct ipr_uc_sdt sdt;
5393 int rc, length;
5395 mailbox = readl(ioa_cfg->ioa_mailbox);
5397 if (!ipr_sdt_is_fmt2(mailbox)) {
5398 ipr_unit_check_no_data(ioa_cfg);
5399 return;
5402 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5403 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5404 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5406 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5407 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5408 ipr_unit_check_no_data(ioa_cfg);
5409 return;
5412 /* Find length of the first sdt entry (UC buffer) */
5413 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5414 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5416 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5417 struct ipr_hostrcb, queue);
5418 list_del(&hostrcb->queue);
5419 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5421 rc = ipr_get_ldump_data_section(ioa_cfg,
5422 be32_to_cpu(sdt.entry[0].bar_str_offset),
5423 (__be32 *)&hostrcb->hcam,
5424 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5426 if (!rc)
5427 ipr_handle_log_data(ioa_cfg, hostrcb);
5428 else
5429 ipr_unit_check_no_data(ioa_cfg);
5431 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5435 * ipr_reset_restore_cfg_space - Restore PCI config space.
5436 * @ipr_cmd: ipr command struct
5438 * Description: This function restores the saved PCI config space of
5439 * the adapter, fails all outstanding ops back to the callers, and
5440 * fetches the dump/unit check if applicable to this reset.
5442 * Return value:
5443 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5445 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5447 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5448 int rc;
5450 ENTER;
5451 pci_unblock_user_cfg_access(ioa_cfg->pdev);
5452 rc = pci_restore_state(ioa_cfg->pdev);
5454 if (rc != PCIBIOS_SUCCESSFUL) {
5455 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5456 return IPR_RC_JOB_CONTINUE;
5459 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5460 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5461 return IPR_RC_JOB_CONTINUE;
5464 ipr_fail_all_ops(ioa_cfg);
5466 if (ioa_cfg->ioa_unit_checked) {
5467 ioa_cfg->ioa_unit_checked = 0;
5468 ipr_get_unit_check_buffer(ioa_cfg);
5469 ipr_cmd->job_step = ipr_reset_alert;
5470 ipr_reset_start_timer(ipr_cmd, 0);
5471 return IPR_RC_JOB_RETURN;
5474 if (ioa_cfg->in_ioa_bringdown) {
5475 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5476 } else {
5477 ipr_cmd->job_step = ipr_reset_enable_ioa;
5479 if (GET_DUMP == ioa_cfg->sdt_state) {
5480 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5481 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5482 schedule_work(&ioa_cfg->work_q);
5483 return IPR_RC_JOB_RETURN;
5487 ENTER;
5488 return IPR_RC_JOB_CONTINUE;
5492 * ipr_reset_start_bist - Run BIST on the adapter.
5493 * @ipr_cmd: ipr command struct
5495 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5497 * Return value:
5498 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5500 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5502 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5503 int rc;
5505 ENTER;
5506 pci_block_user_cfg_access(ioa_cfg->pdev);
5507 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5509 if (rc != PCIBIOS_SUCCESSFUL) {
5510 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5511 rc = IPR_RC_JOB_CONTINUE;
5512 } else {
5513 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5514 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5515 rc = IPR_RC_JOB_RETURN;
5518 LEAVE;
5519 return rc;
5523 * ipr_reset_allowed - Query whether or not IOA can be reset
5524 * @ioa_cfg: ioa config struct
5526 * Return value:
5527 * 0 if reset not allowed / non-zero if reset is allowed
5529 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5531 volatile u32 temp_reg;
5533 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5534 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5538 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5539 * @ipr_cmd: ipr command struct
5541 * Description: This function waits for adapter permission to run BIST,
5542 * then runs BIST. If the adapter does not give permission after a
5543 * reasonable time, we will reset the adapter anyway. The impact of
5544 * resetting the adapter without warning the adapter is the risk of
5545 * losing the persistent error log on the adapter. If the adapter is
5546 * reset while it is writing to the flash on the adapter, the flash
5547 * segment will have bad ECC and be zeroed.
5549 * Return value:
5550 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5552 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5554 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5555 int rc = IPR_RC_JOB_RETURN;
5557 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5558 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5559 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5560 } else {
5561 ipr_cmd->job_step = ipr_reset_start_bist;
5562 rc = IPR_RC_JOB_CONTINUE;
5565 return rc;
5569 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5570 * @ipr_cmd: ipr command struct
5572 * Description: This function alerts the adapter that it will be reset.
5573 * If memory space is not currently enabled, proceed directly
5574 * to running BIST on the adapter. The timer must always be started
5575 * so we guarantee we do not run BIST from ipr_isr.
5577 * Return value:
5578 * IPR_RC_JOB_RETURN
5580 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5582 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5583 u16 cmd_reg;
5584 int rc;
5586 ENTER;
5587 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5589 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5590 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5591 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5592 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5593 } else {
5594 ipr_cmd->job_step = ipr_reset_start_bist;
5597 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5598 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5600 LEAVE;
5601 return IPR_RC_JOB_RETURN;
5605 * ipr_reset_ucode_download_done - Microcode download completion
5606 * @ipr_cmd: ipr command struct
5608 * Description: This function unmaps the microcode download buffer.
5610 * Return value:
5611 * IPR_RC_JOB_CONTINUE
5613 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5615 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5616 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5618 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5619 sglist->num_sg, DMA_TO_DEVICE);
5621 ipr_cmd->job_step = ipr_reset_alert;
5622 return IPR_RC_JOB_CONTINUE;
5626 * ipr_reset_ucode_download - Download microcode to the adapter
5627 * @ipr_cmd: ipr command struct
5629 * Description: This function checks to see if it there is microcode
5630 * to download to the adapter. If there is, a download is performed.
5632 * Return value:
5633 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5635 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5638 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5640 ENTER;
5641 ipr_cmd->job_step = ipr_reset_alert;
5643 if (!sglist)
5644 return IPR_RC_JOB_CONTINUE;
5646 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5647 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5648 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5649 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5650 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5651 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5652 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5654 ipr_build_ucode_ioadl(ipr_cmd, sglist);
5655 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5657 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5658 IPR_WRITE_BUFFER_TIMEOUT);
5660 LEAVE;
5661 return IPR_RC_JOB_RETURN;
5665 * ipr_reset_shutdown_ioa - Shutdown the adapter
5666 * @ipr_cmd: ipr command struct
5668 * Description: This function issues an adapter shutdown of the
5669 * specified type to the specified adapter as part of the
5670 * adapter reset job.
5672 * Return value:
5673 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5675 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5677 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5678 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5679 unsigned long timeout;
5680 int rc = IPR_RC_JOB_CONTINUE;
5682 ENTER;
5683 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5684 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5685 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5686 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5687 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5689 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5690 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5691 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5692 timeout = IPR_INTERNAL_TIMEOUT;
5693 else
5694 timeout = IPR_SHUTDOWN_TIMEOUT;
5696 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5698 rc = IPR_RC_JOB_RETURN;
5699 ipr_cmd->job_step = ipr_reset_ucode_download;
5700 } else
5701 ipr_cmd->job_step = ipr_reset_alert;
5703 LEAVE;
5704 return rc;
5708 * ipr_reset_ioa_job - Adapter reset job
5709 * @ipr_cmd: ipr command struct
5711 * Description: This function is the job router for the adapter reset job.
5713 * Return value:
5714 * none
5716 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5718 u32 rc, ioasc;
5719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5721 do {
5722 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5724 if (ioa_cfg->reset_cmd != ipr_cmd) {
5726 * We are doing nested adapter resets and this is
5727 * not the current reset job.
5729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5730 return;
5733 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5734 rc = ipr_cmd->job_step_failed(ipr_cmd);
5735 if (rc == IPR_RC_JOB_RETURN)
5736 return;
5739 ipr_reinit_ipr_cmnd(ipr_cmd);
5740 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5741 rc = ipr_cmd->job_step(ipr_cmd);
5742 } while(rc == IPR_RC_JOB_CONTINUE);
5746 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5747 * @ioa_cfg: ioa config struct
5748 * @job_step: first job step of reset job
5749 * @shutdown_type: shutdown type
5751 * Description: This function will initiate the reset of the given adapter
5752 * starting at the selected job step.
5753 * If the caller needs to wait on the completion of the reset,
5754 * the caller must sleep on the reset_wait_q.
5756 * Return value:
5757 * none
5759 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5760 int (*job_step) (struct ipr_cmnd *),
5761 enum ipr_shutdown_type shutdown_type)
5763 struct ipr_cmnd *ipr_cmd;
5765 ioa_cfg->in_reset_reload = 1;
5766 ioa_cfg->allow_cmds = 0;
5767 scsi_block_requests(ioa_cfg->host);
5769 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5770 ioa_cfg->reset_cmd = ipr_cmd;
5771 ipr_cmd->job_step = job_step;
5772 ipr_cmd->u.shutdown_type = shutdown_type;
5774 ipr_reset_ioa_job(ipr_cmd);
5778 * ipr_initiate_ioa_reset - Initiate an adapter reset
5779 * @ioa_cfg: ioa config struct
5780 * @shutdown_type: shutdown type
5782 * Description: This function will initiate the reset of the given adapter.
5783 * If the caller needs to wait on the completion of the reset,
5784 * the caller must sleep on the reset_wait_q.
5786 * Return value:
5787 * none
5789 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5790 enum ipr_shutdown_type shutdown_type)
5792 if (ioa_cfg->ioa_is_dead)
5793 return;
5795 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5796 ioa_cfg->sdt_state = ABORT_DUMP;
5798 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5799 dev_err(&ioa_cfg->pdev->dev,
5800 "IOA taken offline - error recovery failed\n");
5802 ioa_cfg->reset_retries = 0;
5803 ioa_cfg->ioa_is_dead = 1;
5805 if (ioa_cfg->in_ioa_bringdown) {
5806 ioa_cfg->reset_cmd = NULL;
5807 ioa_cfg->in_reset_reload = 0;
5808 ipr_fail_all_ops(ioa_cfg);
5809 wake_up_all(&ioa_cfg->reset_wait_q);
5811 spin_unlock_irq(ioa_cfg->host->host_lock);
5812 scsi_unblock_requests(ioa_cfg->host);
5813 spin_lock_irq(ioa_cfg->host->host_lock);
5814 return;
5815 } else {
5816 ioa_cfg->in_ioa_bringdown = 1;
5817 shutdown_type = IPR_SHUTDOWN_NONE;
5821 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5822 shutdown_type);
5826 * ipr_reset_freeze - Hold off all I/O activity
5827 * @ipr_cmd: ipr command struct
5829 * Description: If the PCI slot is frozen, hold off all I/O
5830 * activity; then, as soon as the slot is available again,
5831 * initiate an adapter reset.
5833 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5835 /* Disallow new interrupts, avoid loop */
5836 ipr_cmd->ioa_cfg->allow_interrupts = 0;
5837 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5838 ipr_cmd->done = ipr_reset_ioa_job;
5839 return IPR_RC_JOB_RETURN;
5843 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5844 * @pdev: PCI device struct
5846 * Description: This routine is called to tell us that the PCI bus
5847 * is down. Can't do anything here, except put the device driver
5848 * into a holding pattern, waiting for the PCI bus to come back.
5850 static void ipr_pci_frozen(struct pci_dev *pdev)
5852 unsigned long flags = 0;
5853 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5855 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5856 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5861 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5862 * @pdev: PCI device struct
5864 * Description: This routine is called by the pci error recovery
5865 * code after the PCI slot has been reset, just before we
5866 * should resume normal operations.
5868 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5870 unsigned long flags = 0;
5871 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5873 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5874 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5875 IPR_SHUTDOWN_NONE);
5876 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5877 return PCI_ERS_RESULT_RECOVERED;
5881 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5882 * @pdev: PCI device struct
5884 * Description: This routine is called when the PCI bus has
5885 * permanently failed.
5887 static void ipr_pci_perm_failure(struct pci_dev *pdev)
5889 unsigned long flags = 0;
5890 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5892 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5893 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5894 ioa_cfg->sdt_state = ABORT_DUMP;
5895 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5896 ioa_cfg->in_ioa_bringdown = 1;
5897 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5902 * ipr_pci_error_detected - Called when a PCI error is detected.
5903 * @pdev: PCI device struct
5904 * @state: PCI channel state
5906 * Description: Called when a PCI error is detected.
5908 * Return value:
5909 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5911 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5912 pci_channel_state_t state)
5914 switch (state) {
5915 case pci_channel_io_frozen:
5916 ipr_pci_frozen(pdev);
5917 return PCI_ERS_RESULT_NEED_RESET;
5918 case pci_channel_io_perm_failure:
5919 ipr_pci_perm_failure(pdev);
5920 return PCI_ERS_RESULT_DISCONNECT;
5921 break;
5922 default:
5923 break;
5925 return PCI_ERS_RESULT_NEED_RESET;
5929 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5930 * @ioa_cfg: ioa cfg struct
5932 * Description: This is the second phase of adapter intialization
5933 * This function takes care of initilizing the adapter to the point
5934 * where it can accept new commands.
5936 * Return value:
5937 * 0 on sucess / -EIO on failure
5939 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5941 int rc = 0;
5942 unsigned long host_lock_flags = 0;
5944 ENTER;
5945 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5946 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5947 if (ioa_cfg->needs_hard_reset) {
5948 ioa_cfg->needs_hard_reset = 0;
5949 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5950 } else
5951 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5952 IPR_SHUTDOWN_NONE);
5954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5955 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5956 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5958 if (ioa_cfg->ioa_is_dead) {
5959 rc = -EIO;
5960 } else if (ipr_invalid_adapter(ioa_cfg)) {
5961 if (!ipr_testmode)
5962 rc = -EIO;
5964 dev_err(&ioa_cfg->pdev->dev,
5965 "Adapter not supported in this hardware configuration.\n");
5968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5970 LEAVE;
5971 return rc;
5975 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5976 * @ioa_cfg: ioa config struct
5978 * Return value:
5979 * none
5981 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5983 int i;
5985 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5986 if (ioa_cfg->ipr_cmnd_list[i])
5987 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5988 ioa_cfg->ipr_cmnd_list[i],
5989 ioa_cfg->ipr_cmnd_list_dma[i]);
5991 ioa_cfg->ipr_cmnd_list[i] = NULL;
5994 if (ioa_cfg->ipr_cmd_pool)
5995 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5997 ioa_cfg->ipr_cmd_pool = NULL;
6001 * ipr_free_mem - Frees memory allocated for an adapter
6002 * @ioa_cfg: ioa cfg struct
6004 * Return value:
6005 * nothing
6007 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6009 int i;
6011 kfree(ioa_cfg->res_entries);
6012 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6013 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6014 ipr_free_cmd_blks(ioa_cfg);
6015 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6016 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6017 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6018 ioa_cfg->cfg_table,
6019 ioa_cfg->cfg_table_dma);
6021 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6022 pci_free_consistent(ioa_cfg->pdev,
6023 sizeof(struct ipr_hostrcb),
6024 ioa_cfg->hostrcb[i],
6025 ioa_cfg->hostrcb_dma[i]);
6028 ipr_free_dump(ioa_cfg);
6029 kfree(ioa_cfg->trace);
6033 * ipr_free_all_resources - Free all allocated resources for an adapter.
6034 * @ipr_cmd: ipr command struct
6036 * This function frees all allocated resources for the
6037 * specified adapter.
6039 * Return value:
6040 * none
6042 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6044 struct pci_dev *pdev = ioa_cfg->pdev;
6046 ENTER;
6047 free_irq(pdev->irq, ioa_cfg);
6048 iounmap(ioa_cfg->hdw_dma_regs);
6049 pci_release_regions(pdev);
6050 ipr_free_mem(ioa_cfg);
6051 scsi_host_put(ioa_cfg->host);
6052 pci_disable_device(pdev);
6053 LEAVE;
6057 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6058 * @ioa_cfg: ioa config struct
6060 * Return value:
6061 * 0 on success / -ENOMEM on allocation failure
6063 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6065 struct ipr_cmnd *ipr_cmd;
6066 struct ipr_ioarcb *ioarcb;
6067 dma_addr_t dma_addr;
6068 int i;
6070 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6071 sizeof(struct ipr_cmnd), 8, 0);
6073 if (!ioa_cfg->ipr_cmd_pool)
6074 return -ENOMEM;
6076 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6077 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6079 if (!ipr_cmd) {
6080 ipr_free_cmd_blks(ioa_cfg);
6081 return -ENOMEM;
6084 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6085 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6086 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6088 ioarcb = &ipr_cmd->ioarcb;
6089 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6090 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6091 ioarcb->write_ioadl_addr =
6092 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6093 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6094 ioarcb->ioasa_host_pci_addr =
6095 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6096 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6097 ipr_cmd->cmd_index = i;
6098 ipr_cmd->ioa_cfg = ioa_cfg;
6099 ipr_cmd->sense_buffer_dma = dma_addr +
6100 offsetof(struct ipr_cmnd, sense_buffer);
6102 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6105 return 0;
6109 * ipr_alloc_mem - Allocate memory for an adapter
6110 * @ioa_cfg: ioa config struct
6112 * Return value:
6113 * 0 on success / non-zero for error
6115 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6117 struct pci_dev *pdev = ioa_cfg->pdev;
6118 int i, rc = -ENOMEM;
6120 ENTER;
6121 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6122 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6124 if (!ioa_cfg->res_entries)
6125 goto out;
6127 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6128 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6130 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6131 sizeof(struct ipr_misc_cbs),
6132 &ioa_cfg->vpd_cbs_dma);
6134 if (!ioa_cfg->vpd_cbs)
6135 goto out_free_res_entries;
6137 if (ipr_alloc_cmd_blks(ioa_cfg))
6138 goto out_free_vpd_cbs;
6140 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6141 sizeof(u32) * IPR_NUM_CMD_BLKS,
6142 &ioa_cfg->host_rrq_dma);
6144 if (!ioa_cfg->host_rrq)
6145 goto out_ipr_free_cmd_blocks;
6147 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6148 sizeof(struct ipr_config_table),
6149 &ioa_cfg->cfg_table_dma);
6151 if (!ioa_cfg->cfg_table)
6152 goto out_free_host_rrq;
6154 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6155 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6156 sizeof(struct ipr_hostrcb),
6157 &ioa_cfg->hostrcb_dma[i]);
6159 if (!ioa_cfg->hostrcb[i])
6160 goto out_free_hostrcb_dma;
6162 ioa_cfg->hostrcb[i]->hostrcb_dma =
6163 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6164 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6167 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6168 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6170 if (!ioa_cfg->trace)
6171 goto out_free_hostrcb_dma;
6173 rc = 0;
6174 out:
6175 LEAVE;
6176 return rc;
6178 out_free_hostrcb_dma:
6179 while (i-- > 0) {
6180 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6181 ioa_cfg->hostrcb[i],
6182 ioa_cfg->hostrcb_dma[i]);
6184 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6185 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6186 out_free_host_rrq:
6187 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6188 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6189 out_ipr_free_cmd_blocks:
6190 ipr_free_cmd_blks(ioa_cfg);
6191 out_free_vpd_cbs:
6192 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6193 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6194 out_free_res_entries:
6195 kfree(ioa_cfg->res_entries);
6196 goto out;
6200 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6201 * @ioa_cfg: ioa config struct
6203 * Return value:
6204 * none
6206 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6208 int i;
6210 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6211 ioa_cfg->bus_attr[i].bus = i;
6212 ioa_cfg->bus_attr[i].qas_enabled = 0;
6213 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6214 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6215 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6216 else
6217 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6222 * ipr_init_ioa_cfg - Initialize IOA config struct
6223 * @ioa_cfg: ioa config struct
6224 * @host: scsi host struct
6225 * @pdev: PCI dev struct
6227 * Return value:
6228 * none
6230 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6231 struct Scsi_Host *host, struct pci_dev *pdev)
6233 const struct ipr_interrupt_offsets *p;
6234 struct ipr_interrupts *t;
6235 void __iomem *base;
6237 ioa_cfg->host = host;
6238 ioa_cfg->pdev = pdev;
6239 ioa_cfg->log_level = ipr_log_level;
6240 ioa_cfg->doorbell = IPR_DOORBELL;
6241 if (!ipr_auto_create)
6242 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6243 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6244 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6245 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6246 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6247 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6248 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6249 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6250 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6252 INIT_LIST_HEAD(&ioa_cfg->free_q);
6253 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6254 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6255 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6256 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6257 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6258 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6259 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6260 ioa_cfg->sdt_state = INACTIVE;
6261 if (ipr_enable_cache)
6262 ioa_cfg->cache_state = CACHE_ENABLED;
6263 else
6264 ioa_cfg->cache_state = CACHE_DISABLED;
6266 ipr_initialize_bus_attr(ioa_cfg);
6268 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6269 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6270 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6271 host->unique_id = host->host_no;
6272 host->max_cmd_len = IPR_MAX_CDB_LEN;
6273 pci_set_drvdata(pdev, ioa_cfg);
6275 p = &ioa_cfg->chip_cfg->regs;
6276 t = &ioa_cfg->regs;
6277 base = ioa_cfg->hdw_dma_regs;
6279 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6280 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6281 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6282 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6283 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6284 t->ioarrin_reg = base + p->ioarrin_reg;
6285 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6286 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6287 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6291 * ipr_get_chip_cfg - Find adapter chip configuration
6292 * @dev_id: PCI device id struct
6294 * Return value:
6295 * ptr to chip config on success / NULL on failure
6297 static const struct ipr_chip_cfg_t * __devinit
6298 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6300 int i;
6302 if (dev_id->driver_data)
6303 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6305 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6306 if (ipr_chip[i].vendor == dev_id->vendor &&
6307 ipr_chip[i].device == dev_id->device)
6308 return ipr_chip[i].cfg;
6309 return NULL;
6313 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6314 * @pdev: PCI device struct
6315 * @dev_id: PCI device id struct
6317 * Return value:
6318 * 0 on success / non-zero on failure
6320 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6321 const struct pci_device_id *dev_id)
6323 struct ipr_ioa_cfg *ioa_cfg;
6324 struct Scsi_Host *host;
6325 unsigned long ipr_regs_pci;
6326 void __iomem *ipr_regs;
6327 u32 rc = PCIBIOS_SUCCESSFUL;
6328 volatile u32 mask, uproc;
6330 ENTER;
6332 if ((rc = pci_enable_device(pdev))) {
6333 dev_err(&pdev->dev, "Cannot enable adapter\n");
6334 goto out;
6337 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6339 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6341 if (!host) {
6342 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6343 rc = -ENOMEM;
6344 goto out_disable;
6347 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6348 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6350 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6352 if (!ioa_cfg->chip_cfg) {
6353 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6354 dev_id->vendor, dev_id->device);
6355 goto out_scsi_host_put;
6358 ipr_regs_pci = pci_resource_start(pdev, 0);
6360 rc = pci_request_regions(pdev, IPR_NAME);
6361 if (rc < 0) {
6362 dev_err(&pdev->dev,
6363 "Couldn't register memory range of registers\n");
6364 goto out_scsi_host_put;
6367 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6369 if (!ipr_regs) {
6370 dev_err(&pdev->dev,
6371 "Couldn't map memory range of registers\n");
6372 rc = -ENOMEM;
6373 goto out_release_regions;
6376 ioa_cfg->hdw_dma_regs = ipr_regs;
6377 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6378 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6380 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6382 pci_set_master(pdev);
6384 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6385 if (rc < 0) {
6386 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6387 goto cleanup_nomem;
6390 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6391 ioa_cfg->chip_cfg->cache_line_size);
6393 if (rc != PCIBIOS_SUCCESSFUL) {
6394 dev_err(&pdev->dev, "Write of cache line size failed\n");
6395 rc = -EIO;
6396 goto cleanup_nomem;
6399 /* Save away PCI config space for use following IOA reset */
6400 rc = pci_save_state(pdev);
6402 if (rc != PCIBIOS_SUCCESSFUL) {
6403 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6404 rc = -EIO;
6405 goto cleanup_nomem;
6408 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6409 goto cleanup_nomem;
6411 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6412 goto cleanup_nomem;
6414 rc = ipr_alloc_mem(ioa_cfg);
6415 if (rc < 0) {
6416 dev_err(&pdev->dev,
6417 "Couldn't allocate enough memory for device driver!\n");
6418 goto cleanup_nomem;
6422 * If HRRQ updated interrupt is not masked, or reset alert is set,
6423 * the card is in an unknown state and needs a hard reset
6425 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6426 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6427 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6428 ioa_cfg->needs_hard_reset = 1;
6430 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6431 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
6433 if (rc) {
6434 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6435 pdev->irq, rc);
6436 goto cleanup_nolog;
6439 spin_lock(&ipr_driver_lock);
6440 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6441 spin_unlock(&ipr_driver_lock);
6443 LEAVE;
6444 out:
6445 return rc;
6447 cleanup_nolog:
6448 ipr_free_mem(ioa_cfg);
6449 cleanup_nomem:
6450 iounmap(ipr_regs);
6451 out_release_regions:
6452 pci_release_regions(pdev);
6453 out_scsi_host_put:
6454 scsi_host_put(host);
6455 out_disable:
6456 pci_disable_device(pdev);
6457 goto out;
6461 * ipr_scan_vsets - Scans for VSET devices
6462 * @ioa_cfg: ioa config struct
6464 * Description: Since the VSET resources do not follow SAM in that we can have
6465 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6467 * Return value:
6468 * none
6470 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6472 int target, lun;
6474 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6475 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6476 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6480 * ipr_initiate_ioa_bringdown - Bring down an adapter
6481 * @ioa_cfg: ioa config struct
6482 * @shutdown_type: shutdown type
6484 * Description: This function will initiate bringing down the adapter.
6485 * This consists of issuing an IOA shutdown to the adapter
6486 * to flush the cache, and running BIST.
6487 * If the caller needs to wait on the completion of the reset,
6488 * the caller must sleep on the reset_wait_q.
6490 * Return value:
6491 * none
6493 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6494 enum ipr_shutdown_type shutdown_type)
6496 ENTER;
6497 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6498 ioa_cfg->sdt_state = ABORT_DUMP;
6499 ioa_cfg->reset_retries = 0;
6500 ioa_cfg->in_ioa_bringdown = 1;
6501 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6502 LEAVE;
6506 * __ipr_remove - Remove a single adapter
6507 * @pdev: pci device struct
6509 * Adapter hot plug remove entry point.
6511 * Return value:
6512 * none
6514 static void __ipr_remove(struct pci_dev *pdev)
6516 unsigned long host_lock_flags = 0;
6517 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6518 ENTER;
6520 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6521 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6523 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6524 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6525 flush_scheduled_work();
6526 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6528 spin_lock(&ipr_driver_lock);
6529 list_del(&ioa_cfg->queue);
6530 spin_unlock(&ipr_driver_lock);
6532 if (ioa_cfg->sdt_state == ABORT_DUMP)
6533 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6536 ipr_free_all_resources(ioa_cfg);
6538 LEAVE;
6542 * ipr_remove - IOA hot plug remove entry point
6543 * @pdev: pci device struct
6545 * Adapter hot plug remove entry point.
6547 * Return value:
6548 * none
6550 static void ipr_remove(struct pci_dev *pdev)
6552 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6554 ENTER;
6556 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6557 &ipr_trace_attr);
6558 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6559 &ipr_dump_attr);
6560 scsi_remove_host(ioa_cfg->host);
6562 __ipr_remove(pdev);
6564 LEAVE;
6568 * ipr_probe - Adapter hot plug add entry point
6570 * Return value:
6571 * 0 on success / non-zero on failure
6573 static int __devinit ipr_probe(struct pci_dev *pdev,
6574 const struct pci_device_id *dev_id)
6576 struct ipr_ioa_cfg *ioa_cfg;
6577 int rc;
6579 rc = ipr_probe_ioa(pdev, dev_id);
6581 if (rc)
6582 return rc;
6584 ioa_cfg = pci_get_drvdata(pdev);
6585 rc = ipr_probe_ioa_part2(ioa_cfg);
6587 if (rc) {
6588 __ipr_remove(pdev);
6589 return rc;
6592 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6594 if (rc) {
6595 __ipr_remove(pdev);
6596 return rc;
6599 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6600 &ipr_trace_attr);
6602 if (rc) {
6603 scsi_remove_host(ioa_cfg->host);
6604 __ipr_remove(pdev);
6605 return rc;
6608 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6609 &ipr_dump_attr);
6611 if (rc) {
6612 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6613 &ipr_trace_attr);
6614 scsi_remove_host(ioa_cfg->host);
6615 __ipr_remove(pdev);
6616 return rc;
6619 scsi_scan_host(ioa_cfg->host);
6620 ipr_scan_vsets(ioa_cfg);
6621 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6622 ioa_cfg->allow_ml_add_del = 1;
6623 ioa_cfg->host->max_channel = IPR_VSET_BUS;
6624 schedule_work(&ioa_cfg->work_q);
6625 return 0;
6629 * ipr_shutdown - Shutdown handler.
6630 * @pdev: pci device struct
6632 * This function is invoked upon system shutdown/reboot. It will issue
6633 * an adapter shutdown to the adapter to flush the write cache.
6635 * Return value:
6636 * none
6638 static void ipr_shutdown(struct pci_dev *pdev)
6640 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6641 unsigned long lock_flags = 0;
6643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6644 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6646 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6649 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6650 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6651 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6652 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6653 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6654 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6655 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6656 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6657 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6658 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6659 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6660 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6661 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6662 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6663 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6664 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6665 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6667 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6668 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6670 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6671 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6672 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6673 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6674 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6676 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6677 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6679 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6682 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6683 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6684 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6685 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6688 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6689 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6690 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6691 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6692 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6694 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6697 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6699 static struct pci_error_handlers ipr_err_handler = {
6700 .error_detected = ipr_pci_error_detected,
6701 .slot_reset = ipr_pci_slot_reset,
6704 static struct pci_driver ipr_driver = {
6705 .name = IPR_NAME,
6706 .id_table = ipr_pci_table,
6707 .probe = ipr_probe,
6708 .remove = ipr_remove,
6709 .shutdown = ipr_shutdown,
6710 .err_handler = &ipr_err_handler,
6714 * ipr_init - Module entry point
6716 * Return value:
6717 * 0 on success / negative value on failure
6719 static int __init ipr_init(void)
6721 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6722 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6724 return pci_module_init(&ipr_driver);
6728 * ipr_exit - Module unload
6730 * Module unload entry point.
6732 * Return value:
6733 * none
6735 static void __exit ipr_exit(void)
6737 pci_unregister_driver(&ipr_driver);
6740 module_init(ipr_init);
6741 module_exit(ipr_exit);