2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
87 static struct list_head ipr_ioa_head
= LIST_HEAD_INIT(ipr_ioa_head
);
88 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
89 static unsigned int ipr_max_speed
= 1;
90 static int ipr_testmode
= 0;
91 static unsigned int ipr_fastfail
= 0;
92 static unsigned int ipr_transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
93 static unsigned int ipr_enable_cache
= 1;
94 static unsigned int ipr_debug
= 0;
95 static int ipr_auto_create
= 1;
96 static DEFINE_SPINLOCK(ipr_driver_lock
);
98 /* This table describes the differences between DMA controller chips */
99 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
100 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
102 .cache_line_size
= 0x20,
104 .set_interrupt_mask_reg
= 0x0022C,
105 .clr_interrupt_mask_reg
= 0x00230,
106 .sense_interrupt_mask_reg
= 0x0022C,
107 .clr_interrupt_reg
= 0x00228,
108 .sense_interrupt_reg
= 0x00224,
109 .ioarrin_reg
= 0x00404,
110 .sense_uproc_interrupt_reg
= 0x00214,
111 .set_uproc_interrupt_reg
= 0x00214,
112 .clr_uproc_interrupt_reg
= 0x00218
115 { /* Snipe and Scamp */
117 .cache_line_size
= 0x20,
119 .set_interrupt_mask_reg
= 0x00288,
120 .clr_interrupt_mask_reg
= 0x0028C,
121 .sense_interrupt_mask_reg
= 0x00288,
122 .clr_interrupt_reg
= 0x00284,
123 .sense_interrupt_reg
= 0x00280,
124 .ioarrin_reg
= 0x00504,
125 .sense_uproc_interrupt_reg
= 0x00290,
126 .set_uproc_interrupt_reg
= 0x00290,
127 .clr_uproc_interrupt_reg
= 0x00294
132 static const struct ipr_chip_t ipr_chip
[] = {
133 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, &ipr_chip_cfg
[0] },
134 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, &ipr_chip_cfg
[0] },
135 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, &ipr_chip_cfg
[0] },
136 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, &ipr_chip_cfg
[0] },
137 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, &ipr_chip_cfg
[0] },
138 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, &ipr_chip_cfg
[1] },
139 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, &ipr_chip_cfg
[1] }
142 static int ipr_max_bus_speeds
[] = {
143 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
146 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
149 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150 module_param_named(log_level
, ipr_log_level
, uint
, 0);
151 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
152 module_param_named(testmode
, ipr_testmode
, int, 0);
153 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
154 module_param_named(fastfail
, ipr_fastfail
, int, 0);
155 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
156 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
157 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
158 module_param_named(enable_cache
, ipr_enable_cache
, int, 0);
159 MODULE_PARM_DESC(enable_cache
, "Enable adapter's non-volatile write cache (default: 1)");
160 module_param_named(debug
, ipr_debug
, int, 0);
161 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162 module_param_named(auto_create
, ipr_auto_create
, int, 0);
163 MODULE_PARM_DESC(auto_create
, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(IPR_DRIVER_VERSION
);
167 /* A constant array of IOASCs/URCs/Error Messages */
169 struct ipr_error_table_t ipr_error_table
[] = {
171 "8155: An unknown error was received"},
173 "Soft underlength error"},
175 "Command to be cancelled not found"},
177 "Qualified success"},
179 "FFFE: Soft device bus error recovered by the IOA"},
181 "4101: Soft device bus fabric error"},
183 "FFF9: Device sector reassign successful"},
185 "FFF7: Media error recovered by device rewrite procedures"},
187 "7001: IOA sector reassignment successful"},
189 "FFF9: Soft media error. Sector reassignment recommended"},
191 "FFF7: Media error recovered by IOA rewrite procedures"},
193 "FF3D: Soft PCI bus error recovered by the IOA"},
195 "FFF6: Device hardware error recovered by the IOA"},
197 "FFF6: Device hardware error recovered by the device"},
199 "FF3D: Soft IOA error recovered by the IOA"},
201 "FFFA: Undefined device response recovered by the IOA"},
203 "FFF6: Device bus error, message or command phase"},
205 "FFFE: Task Management Function failed"},
207 "FFF6: Failure prediction threshold exceeded"},
209 "8009: Impending cache battery pack failure"},
211 "34FF: Disk device format in progress"},
213 "Synchronization required"},
215 "No ready, IOA shutdown"},
217 "Not ready, IOA has been shutdown"},
219 "3020: Storage subsystem configuration error"},
221 "FFF5: Medium error, data unreadable, recommend reassign"},
223 "7000: Medium error, data unreadable, do not reassign"},
225 "FFF3: Disk media format bad"},
227 "3002: Addressed device failed to respond to selection"},
229 "3100: Device bus error"},
231 "3109: IOA timed out a device command"},
233 "3120: SCSI bus is not operational"},
235 "4100: Hard device bus fabric error"},
237 "9000: IOA reserved area data check"},
239 "9001: IOA reserved area invalid data pattern"},
241 "9002: IOA reserved area LRC error"},
243 "102E: Out of alternate sectors for disk storage"},
245 "FFF4: Data transfer underlength error"},
247 "FFF4: Data transfer overlength error"},
249 "3400: Logical unit failure"},
251 "FFF4: Device microcode is corrupt"},
253 "8150: PCI bus error"},
255 "Unsupported device bus message received"},
257 "FFF4: Disk device problem"},
259 "8150: Permanent IOA failure"},
261 "3010: Disk device returned wrong response to IOA"},
263 "8151: IOA microcode error"},
265 "Device bus status error"},
267 "8157: IOA error requiring IOA reset to recover"},
269 "ATA device status error"},
271 "Message reject received from the device"},
273 "8008: A permanent cache battery pack failure occurred"},
275 "9090: Disk unit has been modified after the last known status"},
277 "9081: IOA detected device error"},
279 "9082: IOA detected device error"},
281 "3110: Device bus error, message or command phase"},
283 "3110: SAS Command / Task Management Function failed"},
285 "9091: Incorrect hardware configuration change has been detected"},
287 "9073: Invalid multi-adapter configuration"},
289 "4010: Incorrect connection between cascaded expanders"},
291 "4020: Connections exceed IOA design limits"},
293 "4030: Incorrect multipath connection"},
295 "4110: Unsupported enclosure function"},
297 "FFF4: Command to logical unit failed"},
299 "Illegal request, invalid request type or request packet"},
301 "Illegal request, invalid resource handle"},
303 "Illegal request, commands not allowed to this device"},
305 "Illegal request, command not allowed to a secondary adapter"},
307 "Illegal request, invalid field in parameter list"},
309 "Illegal request, parameter not supported"},
311 "Illegal request, parameter value invalid"},
313 "Illegal request, command sequence error"},
315 "Illegal request, dual adapter support not enabled"},
317 "9031: Array protection temporarily suspended, protection resuming"},
319 "9040: Array protection temporarily suspended, protection resuming"},
321 "3140: Device bus not ready to ready transition"},
323 "FFFB: SCSI bus was reset"},
325 "FFFE: SCSI bus transition to single ended"},
327 "FFFE: SCSI bus transition to LVD"},
329 "FFFB: SCSI bus was reset by another initiator"},
331 "3029: A device replacement has occurred"},
333 "9051: IOA cache data exists for a missing or failed device"},
335 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
337 "9025: Disk unit is not supported at its physical location"},
339 "3020: IOA detected a SCSI bus configuration error"},
341 "3150: SCSI bus configuration error"},
343 "9074: Asymmetric advanced function disk configuration"},
345 "4040: Incomplete multipath connection between IOA and enclosure"},
347 "4041: Incomplete multipath connection between enclosure and device"},
349 "9075: Incomplete multipath connection between IOA and remote IOA"},
351 "9076: Configuration error, missing remote IOA"},
353 "4050: Enclosure does not support a required multipath function"},
355 "9041: Array protection temporarily suspended"},
357 "9042: Corrupt array parity detected on specified device"},
359 "9030: Array no longer protected due to missing or failed disk unit"},
361 "9071: Link operational transition"},
363 "9072: Link not operational transition"},
365 "9032: Array exposed but still protected"},
367 "4061: Multipath redundancy level got better"},
369 "4060: Multipath redundancy level got worse"},
371 "Failure due to other device"},
373 "9008: IOA does not support functions expected by devices"},
375 "9010: Cache data associated with attached devices cannot be found"},
377 "9011: Cache data belongs to devices other than those attached"},
379 "9020: Array missing 2 or more devices with only 1 device present"},
381 "9021: Array missing 2 or more devices with 2 or more devices present"},
383 "9022: Exposed array is missing a required device"},
385 "9023: Array member(s) not at required physical locations"},
387 "9024: Array not functional due to present hardware configuration"},
389 "9026: Array not functional due to present hardware configuration"},
391 "9027: Array is missing a device and parity is out of sync"},
393 "9028: Maximum number of arrays already exist"},
395 "9050: Required cache data cannot be located for a disk unit"},
397 "9052: Cache data exists for a device that has been modified"},
399 "9054: IOA resources not available due to previous problems"},
401 "9092: Disk unit requires initialization before use"},
403 "9029: Incorrect hardware configuration change has been detected"},
405 "9060: One or more disk pairs are missing from an array"},
407 "9061: One or more disks are missing from an array"},
409 "9062: One or more disks are missing from an array"},
411 "9063: Maximum number of functional arrays has been exceeded"},
413 "Aborted command, invalid descriptor"},
415 "Command terminated by host"}
418 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
419 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
420 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
421 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
422 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
423 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
424 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
425 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
426 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
427 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
429 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
430 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
431 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
435 * Function Prototypes
437 static int ipr_reset_alert(struct ipr_cmnd
*);
438 static void ipr_process_ccn(struct ipr_cmnd
*);
439 static void ipr_process_error(struct ipr_cmnd
*);
440 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
441 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
442 enum ipr_shutdown_type
);
444 #ifdef CONFIG_SCSI_IPR_TRACE
446 * ipr_trc_hook - Add a trace entry to the driver trace
447 * @ipr_cmd: ipr command struct
449 * @add_data: additional data
454 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
455 u8 type
, u32 add_data
)
457 struct ipr_trace_entry
*trace_entry
;
458 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
460 trace_entry
= &ioa_cfg
->trace
[ioa_cfg
->trace_index
++];
461 trace_entry
->time
= jiffies
;
462 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
463 trace_entry
->type
= type
;
464 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.add_data
.u
.regs
.command
;
465 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
466 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
467 trace_entry
->u
.add_data
= add_data
;
470 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
474 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
475 * @ipr_cmd: ipr command struct
480 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
482 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
483 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
485 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
486 ioarcb
->write_data_transfer_length
= 0;
487 ioarcb
->read_data_transfer_length
= 0;
488 ioarcb
->write_ioadl_len
= 0;
489 ioarcb
->read_ioadl_len
= 0;
491 ioasa
->residual_data_len
= 0;
492 ioasa
->u
.gata
.status
= 0;
494 ipr_cmd
->scsi_cmd
= NULL
;
496 ipr_cmd
->sense_buffer
[0] = 0;
497 ipr_cmd
->dma_use_sg
= 0;
501 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
502 * @ipr_cmd: ipr command struct
507 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
509 ipr_reinit_ipr_cmnd(ipr_cmd
);
510 ipr_cmd
->u
.scratch
= 0;
511 ipr_cmd
->sibling
= NULL
;
512 init_timer(&ipr_cmd
->timer
);
516 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
517 * @ioa_cfg: ioa config struct
520 * pointer to ipr command struct
523 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
525 struct ipr_cmnd
*ipr_cmd
;
527 ipr_cmd
= list_entry(ioa_cfg
->free_q
.next
, struct ipr_cmnd
, queue
);
528 list_del(&ipr_cmd
->queue
);
529 ipr_init_ipr_cmnd(ipr_cmd
);
535 * ipr_unmap_sglist - Unmap scatterlist if mapped
536 * @ioa_cfg: ioa config struct
537 * @ipr_cmd: ipr command struct
542 static void ipr_unmap_sglist(struct ipr_ioa_cfg
*ioa_cfg
,
543 struct ipr_cmnd
*ipr_cmd
)
545 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
547 if (ipr_cmd
->dma_use_sg
) {
548 if (scsi_cmd
->use_sg
> 0) {
549 pci_unmap_sg(ioa_cfg
->pdev
, scsi_cmd
->request_buffer
,
551 scsi_cmd
->sc_data_direction
);
553 pci_unmap_single(ioa_cfg
->pdev
, ipr_cmd
->dma_handle
,
554 scsi_cmd
->request_bufflen
,
555 scsi_cmd
->sc_data_direction
);
561 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
562 * @ioa_cfg: ioa config struct
563 * @clr_ints: interrupts to clear
565 * This function masks all interrupts on the adapter, then clears the
566 * interrupts specified in the mask
571 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
574 volatile u32 int_reg
;
576 /* Stop new interrupts */
577 ioa_cfg
->allow_interrupts
= 0;
579 /* Set interrupt mask to stop all new interrupts */
580 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
582 /* Clear any pending interrupts */
583 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg
);
584 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
588 * ipr_save_pcix_cmd_reg - Save PCI-X command register
589 * @ioa_cfg: ioa config struct
592 * 0 on success / -EIO on failure
594 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
596 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
598 if (pcix_cmd_reg
== 0) {
599 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
603 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
604 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
605 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
609 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
614 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
615 * @ioa_cfg: ioa config struct
618 * 0 on success / -EIO on failure
620 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
622 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
625 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
626 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
627 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
631 dev_err(&ioa_cfg
->pdev
->dev
,
632 "Failed to setup PCI-X command register\n");
640 * ipr_sata_eh_done - done function for aborted SATA commands
641 * @ipr_cmd: ipr command struct
643 * This function is invoked for ops generated to SATA
644 * devices which are being aborted.
649 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
651 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
652 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
653 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
655 qc
->err_mask
|= AC_ERR_OTHER
;
656 sata_port
->ioasa
.status
|= ATA_BUSY
;
657 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
662 * ipr_scsi_eh_done - mid-layer done function for aborted ops
663 * @ipr_cmd: ipr command struct
665 * This function is invoked by the interrupt handler for
666 * ops generated by the SCSI mid-layer which are being aborted.
671 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
673 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
674 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
676 scsi_cmd
->result
|= (DID_ERROR
<< 16);
678 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
679 scsi_cmd
->scsi_done(scsi_cmd
);
680 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
684 * ipr_fail_all_ops - Fails all outstanding ops.
685 * @ioa_cfg: ioa config struct
687 * This function fails all outstanding ops.
692 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
694 struct ipr_cmnd
*ipr_cmd
, *temp
;
697 list_for_each_entry_safe(ipr_cmd
, temp
, &ioa_cfg
->pending_q
, queue
) {
698 list_del(&ipr_cmd
->queue
);
700 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
701 ipr_cmd
->ioasa
.ilid
= cpu_to_be32(IPR_DRIVER_ILID
);
703 if (ipr_cmd
->scsi_cmd
)
704 ipr_cmd
->done
= ipr_scsi_eh_done
;
705 else if (ipr_cmd
->qc
)
706 ipr_cmd
->done
= ipr_sata_eh_done
;
708 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, IPR_IOASC_IOA_WAS_RESET
);
709 del_timer(&ipr_cmd
->timer
);
710 ipr_cmd
->done(ipr_cmd
);
717 * ipr_do_req - Send driver initiated requests.
718 * @ipr_cmd: ipr command struct
719 * @done: done function
720 * @timeout_func: timeout function
721 * @timeout: timeout value
723 * This function sends the specified command to the adapter with the
724 * timeout given. The done function is invoked on command completion.
729 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
730 void (*done
) (struct ipr_cmnd
*),
731 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
733 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
735 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
737 ipr_cmd
->done
= done
;
739 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
740 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
741 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
743 add_timer(&ipr_cmd
->timer
);
745 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
748 writel(be32_to_cpu(ipr_cmd
->ioarcb
.ioarcb_host_pci_addr
),
749 ioa_cfg
->regs
.ioarrin_reg
);
753 * ipr_internal_cmd_done - Op done function for an internally generated op.
754 * @ipr_cmd: ipr command struct
756 * This function is the op done function for an internally generated,
757 * blocking op. It simply wakes the sleeping thread.
762 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
764 if (ipr_cmd
->sibling
)
765 ipr_cmd
->sibling
= NULL
;
767 complete(&ipr_cmd
->completion
);
771 * ipr_send_blocking_cmd - Send command and sleep on its completion.
772 * @ipr_cmd: ipr command struct
773 * @timeout_func: function to invoke if command times out
779 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
780 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
783 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
785 init_completion(&ipr_cmd
->completion
);
786 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
788 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
789 wait_for_completion(&ipr_cmd
->completion
);
790 spin_lock_irq(ioa_cfg
->host
->host_lock
);
794 * ipr_send_hcam - Send an HCAM to the adapter.
795 * @ioa_cfg: ioa config struct
797 * @hostrcb: hostrcb struct
799 * This function will send a Host Controlled Async command to the adapter.
800 * If HCAMs are currently not allowed to be issued to the adapter, it will
801 * place the hostrcb on the free queue.
806 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
807 struct ipr_hostrcb
*hostrcb
)
809 struct ipr_cmnd
*ipr_cmd
;
810 struct ipr_ioarcb
*ioarcb
;
812 if (ioa_cfg
->allow_cmds
) {
813 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
814 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
815 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
817 ipr_cmd
->u
.hostrcb
= hostrcb
;
818 ioarcb
= &ipr_cmd
->ioarcb
;
820 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
821 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
822 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
823 ioarcb
->cmd_pkt
.cdb
[1] = type
;
824 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
825 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
827 ioarcb
->read_data_transfer_length
= cpu_to_be32(sizeof(hostrcb
->hcam
));
828 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
829 ipr_cmd
->ioadl
[0].flags_and_data_len
=
830 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| sizeof(hostrcb
->hcam
));
831 ipr_cmd
->ioadl
[0].address
= cpu_to_be32(hostrcb
->hostrcb_dma
);
833 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
834 ipr_cmd
->done
= ipr_process_ccn
;
836 ipr_cmd
->done
= ipr_process_error
;
838 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
841 writel(be32_to_cpu(ipr_cmd
->ioarcb
.ioarcb_host_pci_addr
),
842 ioa_cfg
->regs
.ioarrin_reg
);
844 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
849 * ipr_init_res_entry - Initialize a resource entry struct.
850 * @res: resource entry struct
855 static void ipr_init_res_entry(struct ipr_resource_entry
*res
)
857 res
->needs_sync_complete
= 0;
860 res
->del_from_ml
= 0;
861 res
->resetting_device
= 0;
863 res
->sata_port
= NULL
;
867 * ipr_handle_config_change - Handle a config change from the adapter
868 * @ioa_cfg: ioa config struct
874 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
875 struct ipr_hostrcb
*hostrcb
)
877 struct ipr_resource_entry
*res
= NULL
;
878 struct ipr_config_table_entry
*cfgte
;
881 cfgte
= &hostrcb
->hcam
.u
.ccn
.cfgte
;
883 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
884 if (!memcmp(&res
->cfgte
.res_addr
, &cfgte
->res_addr
,
885 sizeof(cfgte
->res_addr
))) {
892 if (list_empty(&ioa_cfg
->free_res_q
)) {
893 ipr_send_hcam(ioa_cfg
,
894 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
899 res
= list_entry(ioa_cfg
->free_res_q
.next
,
900 struct ipr_resource_entry
, queue
);
902 list_del(&res
->queue
);
903 ipr_init_res_entry(res
);
904 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
907 memcpy(&res
->cfgte
, cfgte
, sizeof(struct ipr_config_table_entry
));
909 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
911 res
->del_from_ml
= 1;
912 res
->cfgte
.res_handle
= IPR_INVALID_RES_HANDLE
;
913 if (ioa_cfg
->allow_ml_add_del
)
914 schedule_work(&ioa_cfg
->work_q
);
916 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
917 } else if (!res
->sdev
) {
919 if (ioa_cfg
->allow_ml_add_del
)
920 schedule_work(&ioa_cfg
->work_q
);
923 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
927 * ipr_process_ccn - Op done function for a CCN.
928 * @ipr_cmd: ipr command struct
930 * This function is the op done function for a configuration
931 * change notification host controlled async from the adapter.
936 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
938 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
939 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
940 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
942 list_del(&hostrcb
->queue
);
943 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
946 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
947 dev_err(&ioa_cfg
->pdev
->dev
,
948 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
950 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
952 ipr_handle_config_change(ioa_cfg
, hostrcb
);
957 * ipr_log_vpd - Log the passed VPD to the error log.
958 * @vpd: vendor/product id/sn struct
963 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
965 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
966 + IPR_SERIAL_NUM_LEN
];
968 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
969 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
971 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
972 ipr_err("Vendor/Product ID: %s\n", buffer
);
974 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
975 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
976 ipr_err(" Serial Number: %s\n", buffer
);
980 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
981 * @vpd: vendor/product id/sn/wwn struct
986 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
988 ipr_log_vpd(&vpd
->vpd
);
989 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
990 be32_to_cpu(vpd
->wwid
[1]));
994 * ipr_log_enhanced_cache_error - Log a cache error.
995 * @ioa_cfg: ioa config struct
996 * @hostrcb: hostrcb struct
1001 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1002 struct ipr_hostrcb
*hostrcb
)
1004 struct ipr_hostrcb_type_12_error
*error
=
1005 &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1007 ipr_err("-----Current Configuration-----\n");
1008 ipr_err("Cache Directory Card Information:\n");
1009 ipr_log_ext_vpd(&error
->ioa_vpd
);
1010 ipr_err("Adapter Card Information:\n");
1011 ipr_log_ext_vpd(&error
->cfc_vpd
);
1013 ipr_err("-----Expected Configuration-----\n");
1014 ipr_err("Cache Directory Card Information:\n");
1015 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1016 ipr_err("Adapter Card Information:\n");
1017 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1019 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1020 be32_to_cpu(error
->ioa_data
[0]),
1021 be32_to_cpu(error
->ioa_data
[1]),
1022 be32_to_cpu(error
->ioa_data
[2]));
1026 * ipr_log_cache_error - Log a cache error.
1027 * @ioa_cfg: ioa config struct
1028 * @hostrcb: hostrcb struct
1033 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1034 struct ipr_hostrcb
*hostrcb
)
1036 struct ipr_hostrcb_type_02_error
*error
=
1037 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1039 ipr_err("-----Current Configuration-----\n");
1040 ipr_err("Cache Directory Card Information:\n");
1041 ipr_log_vpd(&error
->ioa_vpd
);
1042 ipr_err("Adapter Card Information:\n");
1043 ipr_log_vpd(&error
->cfc_vpd
);
1045 ipr_err("-----Expected Configuration-----\n");
1046 ipr_err("Cache Directory Card Information:\n");
1047 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1048 ipr_err("Adapter Card Information:\n");
1049 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1051 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1052 be32_to_cpu(error
->ioa_data
[0]),
1053 be32_to_cpu(error
->ioa_data
[1]),
1054 be32_to_cpu(error
->ioa_data
[2]));
1058 * ipr_log_enhanced_config_error - Log a configuration error.
1059 * @ioa_cfg: ioa config struct
1060 * @hostrcb: hostrcb struct
1065 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1066 struct ipr_hostrcb
*hostrcb
)
1068 int errors_logged
, i
;
1069 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1070 struct ipr_hostrcb_type_13_error
*error
;
1072 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1073 errors_logged
= be32_to_cpu(error
->errors_logged
);
1075 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1076 be32_to_cpu(error
->errors_detected
), errors_logged
);
1078 dev_entry
= error
->dev
;
1080 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1083 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1084 ipr_log_ext_vpd(&dev_entry
->vpd
);
1086 ipr_err("-----New Device Information-----\n");
1087 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1089 ipr_err("Cache Directory Card Information:\n");
1090 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1092 ipr_err("Adapter Card Information:\n");
1093 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1098 * ipr_log_config_error - Log a configuration error.
1099 * @ioa_cfg: ioa config struct
1100 * @hostrcb: hostrcb struct
1105 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1106 struct ipr_hostrcb
*hostrcb
)
1108 int errors_logged
, i
;
1109 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1110 struct ipr_hostrcb_type_03_error
*error
;
1112 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1113 errors_logged
= be32_to_cpu(error
->errors_logged
);
1115 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1116 be32_to_cpu(error
->errors_detected
), errors_logged
);
1118 dev_entry
= error
->dev
;
1120 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1123 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1124 ipr_log_vpd(&dev_entry
->vpd
);
1126 ipr_err("-----New Device Information-----\n");
1127 ipr_log_vpd(&dev_entry
->new_vpd
);
1129 ipr_err("Cache Directory Card Information:\n");
1130 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1132 ipr_err("Adapter Card Information:\n");
1133 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1135 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1136 be32_to_cpu(dev_entry
->ioa_data
[0]),
1137 be32_to_cpu(dev_entry
->ioa_data
[1]),
1138 be32_to_cpu(dev_entry
->ioa_data
[2]),
1139 be32_to_cpu(dev_entry
->ioa_data
[3]),
1140 be32_to_cpu(dev_entry
->ioa_data
[4]));
1145 * ipr_log_enhanced_array_error - Log an array configuration error.
1146 * @ioa_cfg: ioa config struct
1147 * @hostrcb: hostrcb struct
1152 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1153 struct ipr_hostrcb
*hostrcb
)
1156 struct ipr_hostrcb_type_14_error
*error
;
1157 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1158 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1160 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1164 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1165 error
->protection_level
,
1166 ioa_cfg
->host
->host_no
,
1167 error
->last_func_vset_res_addr
.bus
,
1168 error
->last_func_vset_res_addr
.target
,
1169 error
->last_func_vset_res_addr
.lun
);
1173 array_entry
= error
->array_member
;
1174 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1175 sizeof(error
->array_member
));
1177 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1178 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1181 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1182 ipr_err("Exposed Array Member %d:\n", i
);
1184 ipr_err("Array Member %d:\n", i
);
1186 ipr_log_ext_vpd(&array_entry
->vpd
);
1187 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1188 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1189 "Expected Location");
1196 * ipr_log_array_error - Log an array configuration error.
1197 * @ioa_cfg: ioa config struct
1198 * @hostrcb: hostrcb struct
1203 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1204 struct ipr_hostrcb
*hostrcb
)
1207 struct ipr_hostrcb_type_04_error
*error
;
1208 struct ipr_hostrcb_array_data_entry
*array_entry
;
1209 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1211 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1215 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1216 error
->protection_level
,
1217 ioa_cfg
->host
->host_no
,
1218 error
->last_func_vset_res_addr
.bus
,
1219 error
->last_func_vset_res_addr
.target
,
1220 error
->last_func_vset_res_addr
.lun
);
1224 array_entry
= error
->array_member
;
1226 for (i
= 0; i
< 18; i
++) {
1227 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1230 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1231 ipr_err("Exposed Array Member %d:\n", i
);
1233 ipr_err("Array Member %d:\n", i
);
1235 ipr_log_vpd(&array_entry
->vpd
);
1237 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1238 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1239 "Expected Location");
1244 array_entry
= error
->array_member2
;
1251 * ipr_log_hex_data - Log additional hex IOA error data.
1252 * @data: IOA error data
1258 static void ipr_log_hex_data(u32
*data
, int len
)
1265 for (i
= 0; i
< len
/ 4; i
+= 4) {
1266 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1267 be32_to_cpu(data
[i
]),
1268 be32_to_cpu(data
[i
+1]),
1269 be32_to_cpu(data
[i
+2]),
1270 be32_to_cpu(data
[i
+3]));
1275 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1276 * @ioa_cfg: ioa config struct
1277 * @hostrcb: hostrcb struct
1282 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1283 struct ipr_hostrcb
*hostrcb
)
1285 struct ipr_hostrcb_type_17_error
*error
;
1287 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1288 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1290 ipr_err("%s\n", error
->failure_reason
);
1291 ipr_err("Remote Adapter VPD:\n");
1292 ipr_log_ext_vpd(&error
->vpd
);
1293 ipr_log_hex_data(error
->data
,
1294 be32_to_cpu(hostrcb
->hcam
.length
) -
1295 (offsetof(struct ipr_hostrcb_error
, u
) +
1296 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1300 * ipr_log_dual_ioa_error - Log a dual adapter error.
1301 * @ioa_cfg: ioa config struct
1302 * @hostrcb: hostrcb struct
1307 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1308 struct ipr_hostrcb
*hostrcb
)
1310 struct ipr_hostrcb_type_07_error
*error
;
1312 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1313 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1315 ipr_err("%s\n", error
->failure_reason
);
1316 ipr_err("Remote Adapter VPD:\n");
1317 ipr_log_vpd(&error
->vpd
);
1318 ipr_log_hex_data(error
->data
,
1319 be32_to_cpu(hostrcb
->hcam
.length
) -
1320 (offsetof(struct ipr_hostrcb_error
, u
) +
1321 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1325 * ipr_log_generic_error - Log an adapter error.
1326 * @ioa_cfg: ioa config struct
1327 * @hostrcb: hostrcb struct
1332 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
1333 struct ipr_hostrcb
*hostrcb
)
1335 ipr_log_hex_data(hostrcb
->hcam
.u
.raw
.data
,
1336 be32_to_cpu(hostrcb
->hcam
.length
));
1340 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1343 * This function will return the index of into the ipr_error_table
1344 * for the specified IOASC. If the IOASC is not in the table,
1345 * 0 will be returned, which points to the entry used for unknown errors.
1348 * index into the ipr_error_table
1350 static u32
ipr_get_error(u32 ioasc
)
1354 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
1355 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
1362 * ipr_handle_log_data - Log an adapter error.
1363 * @ioa_cfg: ioa config struct
1364 * @hostrcb: hostrcb struct
1366 * This function logs an adapter error to the system.
1371 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
1372 struct ipr_hostrcb
*hostrcb
)
1377 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
1380 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
1381 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
1383 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.failing_dev_ioasc
);
1385 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
1386 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
) {
1387 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1388 scsi_report_bus_reset(ioa_cfg
->host
,
1389 hostrcb
->hcam
.u
.error
.failing_dev_res_addr
.bus
);
1392 error_index
= ipr_get_error(ioasc
);
1394 if (!ipr_error_table
[error_index
].log_hcam
)
1397 if (ipr_is_device(&hostrcb
->hcam
.u
.error
.failing_dev_res_addr
)) {
1398 ipr_ra_err(ioa_cfg
, hostrcb
->hcam
.u
.error
.failing_dev_res_addr
,
1399 "%s\n", ipr_error_table
[error_index
].error
);
1401 dev_err(&ioa_cfg
->pdev
->dev
, "%s\n",
1402 ipr_error_table
[error_index
].error
);
1405 /* Set indication we have logged an error */
1406 ioa_cfg
->errors_logged
++;
1408 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
1410 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
1411 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
1413 switch (hostrcb
->hcam
.overlay_id
) {
1414 case IPR_HOST_RCB_OVERLAY_ID_2
:
1415 ipr_log_cache_error(ioa_cfg
, hostrcb
);
1417 case IPR_HOST_RCB_OVERLAY_ID_3
:
1418 ipr_log_config_error(ioa_cfg
, hostrcb
);
1420 case IPR_HOST_RCB_OVERLAY_ID_4
:
1421 case IPR_HOST_RCB_OVERLAY_ID_6
:
1422 ipr_log_array_error(ioa_cfg
, hostrcb
);
1424 case IPR_HOST_RCB_OVERLAY_ID_7
:
1425 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
1427 case IPR_HOST_RCB_OVERLAY_ID_12
:
1428 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
1430 case IPR_HOST_RCB_OVERLAY_ID_13
:
1431 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
1433 case IPR_HOST_RCB_OVERLAY_ID_14
:
1434 case IPR_HOST_RCB_OVERLAY_ID_16
:
1435 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
1437 case IPR_HOST_RCB_OVERLAY_ID_17
:
1438 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
1440 case IPR_HOST_RCB_OVERLAY_ID_1
:
1441 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
1443 ipr_log_generic_error(ioa_cfg
, hostrcb
);
1449 * ipr_process_error - Op done function for an adapter error log.
1450 * @ipr_cmd: ipr command struct
1452 * This function is the op done function for an error log host
1453 * controlled async from the adapter. It will log the error and
1454 * send the HCAM back to the adapter.
1459 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
1461 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1462 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1463 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
1465 list_del(&hostrcb
->queue
);
1466 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
1469 ipr_handle_log_data(ioa_cfg
, hostrcb
);
1470 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
1471 dev_err(&ioa_cfg
->pdev
->dev
,
1472 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1475 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
1479 * ipr_timeout - An internally generated op has timed out.
1480 * @ipr_cmd: ipr command struct
1482 * This function blocks host requests and initiates an
1488 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
1490 unsigned long lock_flags
= 0;
1491 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1494 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1496 ioa_cfg
->errors_logged
++;
1497 dev_err(&ioa_cfg
->pdev
->dev
,
1498 "Adapter being reset due to command timeout.\n");
1500 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
1501 ioa_cfg
->sdt_state
= GET_DUMP
;
1503 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
1504 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
1506 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1511 * ipr_oper_timeout - Adapter timed out transitioning to operational
1512 * @ipr_cmd: ipr command struct
1514 * This function blocks host requests and initiates an
1520 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
1522 unsigned long lock_flags
= 0;
1523 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1526 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1528 ioa_cfg
->errors_logged
++;
1529 dev_err(&ioa_cfg
->pdev
->dev
,
1530 "Adapter timed out transitioning to operational.\n");
1532 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
1533 ioa_cfg
->sdt_state
= GET_DUMP
;
1535 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
1537 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
1538 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
1541 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1546 * ipr_reset_reload - Reset/Reload the IOA
1547 * @ioa_cfg: ioa config struct
1548 * @shutdown_type: shutdown type
1550 * This function resets the adapter and re-initializes it.
1551 * This function assumes that all new host commands have been stopped.
1555 static int ipr_reset_reload(struct ipr_ioa_cfg
*ioa_cfg
,
1556 enum ipr_shutdown_type shutdown_type
)
1558 if (!ioa_cfg
->in_reset_reload
)
1559 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
1561 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1562 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
1563 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1565 /* If we got hit with a host reset while we were already resetting
1566 the adapter for some reason, and the reset failed. */
1567 if (ioa_cfg
->ioa_is_dead
) {
1576 * ipr_find_ses_entry - Find matching SES in SES table
1577 * @res: resource entry struct of SES
1580 * pointer to SES table entry / NULL on failure
1582 static const struct ipr_ses_table_entry
*
1583 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
1586 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
1588 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
1589 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
1590 if (ste
->compare_product_id_byte
[j
] == 'X') {
1591 if (res
->cfgte
.std_inq_data
.vpids
.product_id
[j
] == ste
->product_id
[j
])
1599 if (matches
== IPR_PROD_ID_LEN
)
1607 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1608 * @ioa_cfg: ioa config struct
1610 * @bus_width: bus width
1613 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1614 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1615 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1616 * max 160MHz = max 320MB/sec).
1618 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
1620 struct ipr_resource_entry
*res
;
1621 const struct ipr_ses_table_entry
*ste
;
1622 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
1624 /* Loop through each config table entry in the config table buffer */
1625 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1626 if (!(IPR_IS_SES_DEVICE(res
->cfgte
.std_inq_data
)))
1629 if (bus
!= res
->cfgte
.res_addr
.bus
)
1632 if (!(ste
= ipr_find_ses_entry(res
)))
1635 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
1638 return max_xfer_rate
;
1642 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1643 * @ioa_cfg: ioa config struct
1644 * @max_delay: max delay in micro-seconds to wait
1646 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1649 * 0 on success / other on failure
1651 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
1653 volatile u32 pcii_reg
;
1656 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1657 while (delay
< max_delay
) {
1658 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
1660 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
1663 /* udelay cannot be used if delay is more than a few milliseconds */
1664 if ((delay
/ 1000) > MAX_UDELAY_MS
)
1665 mdelay(delay
/ 1000);
1675 * ipr_get_ldump_data_section - Dump IOA memory
1676 * @ioa_cfg: ioa config struct
1677 * @start_addr: adapter address to dump
1678 * @dest: destination kernel buffer
1679 * @length_in_words: length to dump in 4 byte words
1682 * 0 on success / -EIO on failure
1684 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
1686 __be32
*dest
, u32 length_in_words
)
1688 volatile u32 temp_pcii_reg
;
1691 /* Write IOA interrupt reg starting LDUMP state */
1692 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
1693 ioa_cfg
->regs
.set_uproc_interrupt_reg
);
1695 /* Wait for IO debug acknowledge */
1696 if (ipr_wait_iodbg_ack(ioa_cfg
,
1697 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
1698 dev_err(&ioa_cfg
->pdev
->dev
,
1699 "IOA dump long data transfer timeout\n");
1703 /* Signal LDUMP interlocked - clear IO debug ack */
1704 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
1705 ioa_cfg
->regs
.clr_interrupt_reg
);
1707 /* Write Mailbox with starting address */
1708 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
1710 /* Signal address valid - clear IOA Reset alert */
1711 writel(IPR_UPROCI_RESET_ALERT
,
1712 ioa_cfg
->regs
.clr_uproc_interrupt_reg
);
1714 for (i
= 0; i
< length_in_words
; i
++) {
1715 /* Wait for IO debug acknowledge */
1716 if (ipr_wait_iodbg_ack(ioa_cfg
,
1717 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
1718 dev_err(&ioa_cfg
->pdev
->dev
,
1719 "IOA dump short data transfer timeout\n");
1723 /* Read data from mailbox and increment destination pointer */
1724 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
1727 /* For all but the last word of data, signal data received */
1728 if (i
< (length_in_words
- 1)) {
1729 /* Signal dump data received - Clear IO debug Ack */
1730 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
1731 ioa_cfg
->regs
.clr_interrupt_reg
);
1735 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1736 writel(IPR_UPROCI_RESET_ALERT
,
1737 ioa_cfg
->regs
.set_uproc_interrupt_reg
);
1739 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
1740 ioa_cfg
->regs
.clr_uproc_interrupt_reg
);
1742 /* Signal dump data received - Clear IO debug Ack */
1743 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
1744 ioa_cfg
->regs
.clr_interrupt_reg
);
1746 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1747 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
1749 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg
);
1751 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
1761 #ifdef CONFIG_SCSI_IPR_DUMP
1763 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1764 * @ioa_cfg: ioa config struct
1765 * @pci_address: adapter address
1766 * @length: length of data to copy
1768 * Copy data from PCI adapter to kernel buffer.
1769 * Note: length MUST be a 4 byte multiple
1771 * 0 on success / other on failure
1773 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
1774 unsigned long pci_address
, u32 length
)
1776 int bytes_copied
= 0;
1777 int cur_len
, rc
, rem_len
, rem_page_len
;
1779 unsigned long lock_flags
= 0;
1780 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
1782 while (bytes_copied
< length
&&
1783 (ioa_dump
->hdr
.len
+ bytes_copied
) < IPR_MAX_IOA_DUMP_SIZE
) {
1784 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
1785 ioa_dump
->page_offset
== 0) {
1786 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
1790 return bytes_copied
;
1793 ioa_dump
->page_offset
= 0;
1794 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
1795 ioa_dump
->next_page_index
++;
1797 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
1799 rem_len
= length
- bytes_copied
;
1800 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
1801 cur_len
= min(rem_len
, rem_page_len
);
1803 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1804 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
1807 rc
= ipr_get_ldump_data_section(ioa_cfg
,
1808 pci_address
+ bytes_copied
,
1809 &page
[ioa_dump
->page_offset
/ 4],
1810 (cur_len
/ sizeof(u32
)));
1812 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1815 ioa_dump
->page_offset
+= cur_len
;
1816 bytes_copied
+= cur_len
;
1824 return bytes_copied
;
1828 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1829 * @hdr: dump entry header struct
1834 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
1836 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
1838 hdr
->offset
= sizeof(*hdr
);
1839 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
1843 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1844 * @ioa_cfg: ioa config struct
1845 * @driver_dump: driver dump struct
1850 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
1851 struct ipr_driver_dump
*driver_dump
)
1853 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
1855 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
1856 driver_dump
->ioa_type_entry
.hdr
.len
=
1857 sizeof(struct ipr_dump_ioa_type_entry
) -
1858 sizeof(struct ipr_dump_entry_header
);
1859 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
1860 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
1861 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
1862 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
1863 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
1864 ucode_vpd
->minor_release
[1];
1865 driver_dump
->hdr
.num_entries
++;
1869 * ipr_dump_version_data - Fill in the driver version in the dump.
1870 * @ioa_cfg: ioa config struct
1871 * @driver_dump: driver dump struct
1876 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
1877 struct ipr_driver_dump
*driver_dump
)
1879 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
1880 driver_dump
->version_entry
.hdr
.len
=
1881 sizeof(struct ipr_dump_version_entry
) -
1882 sizeof(struct ipr_dump_entry_header
);
1883 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
1884 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
1885 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
1886 driver_dump
->hdr
.num_entries
++;
1890 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1891 * @ioa_cfg: ioa config struct
1892 * @driver_dump: driver dump struct
1897 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
1898 struct ipr_driver_dump
*driver_dump
)
1900 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
1901 driver_dump
->trace_entry
.hdr
.len
=
1902 sizeof(struct ipr_dump_trace_entry
) -
1903 sizeof(struct ipr_dump_entry_header
);
1904 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
1905 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
1906 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
1907 driver_dump
->hdr
.num_entries
++;
1911 * ipr_dump_location_data - Fill in the IOA location in the dump.
1912 * @ioa_cfg: ioa config struct
1913 * @driver_dump: driver dump struct
1918 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
1919 struct ipr_driver_dump
*driver_dump
)
1921 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
1922 driver_dump
->location_entry
.hdr
.len
=
1923 sizeof(struct ipr_dump_location_entry
) -
1924 sizeof(struct ipr_dump_entry_header
);
1925 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
1926 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
1927 strcpy(driver_dump
->location_entry
.location
, ioa_cfg
->pdev
->dev
.bus_id
);
1928 driver_dump
->hdr
.num_entries
++;
1932 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1933 * @ioa_cfg: ioa config struct
1934 * @dump: dump struct
1939 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
1941 unsigned long start_addr
, sdt_word
;
1942 unsigned long lock_flags
= 0;
1943 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
1944 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
1945 u32 num_entries
, start_off
, end_off
;
1946 u32 bytes_to_copy
, bytes_copied
, rc
;
1947 struct ipr_sdt
*sdt
;
1952 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1954 if (ioa_cfg
->sdt_state
!= GET_DUMP
) {
1955 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1959 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
1961 if (!ipr_sdt_is_fmt2(start_addr
)) {
1962 dev_err(&ioa_cfg
->pdev
->dev
,
1963 "Invalid dump table format: %lx\n", start_addr
);
1964 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1968 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
1970 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
1972 /* Initialize the overall dump header */
1973 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
1974 driver_dump
->hdr
.num_entries
= 1;
1975 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
1976 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
1977 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
1978 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
1980 ipr_dump_version_data(ioa_cfg
, driver_dump
);
1981 ipr_dump_location_data(ioa_cfg
, driver_dump
);
1982 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
1983 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
1985 /* Update dump_header */
1986 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
1988 /* IOA Dump entry */
1989 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
1990 ioa_dump
->format
= IPR_SDT_FMT2
;
1991 ioa_dump
->hdr
.len
= 0;
1992 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
1993 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
1995 /* First entries in sdt are actually a list of dump addresses and
1996 lengths to gather the real dump data. sdt represents the pointer
1997 to the ioa generated dump table. Dump data will be extracted based
1998 on entries in this table */
1999 sdt
= &ioa_dump
->sdt
;
2001 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
2002 sizeof(struct ipr_sdt
) / sizeof(__be32
));
2004 /* Smart Dump table is ready to use and the first entry is valid */
2005 if (rc
|| (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
)) {
2006 dev_err(&ioa_cfg
->pdev
->dev
,
2007 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2008 rc
, be32_to_cpu(sdt
->hdr
.state
));
2009 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
2010 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
2011 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2015 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
2017 if (num_entries
> IPR_NUM_SDT_ENTRIES
)
2018 num_entries
= IPR_NUM_SDT_ENTRIES
;
2020 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2022 for (i
= 0; i
< num_entries
; i
++) {
2023 if (ioa_dump
->hdr
.len
> IPR_MAX_IOA_DUMP_SIZE
) {
2024 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
2028 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
2029 sdt_word
= be32_to_cpu(sdt
->entry
[i
].bar_str_offset
);
2030 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
2031 end_off
= be32_to_cpu(sdt
->entry
[i
].end_offset
);
2033 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
) {
2034 bytes_to_copy
= end_off
- start_off
;
2035 if (bytes_to_copy
> IPR_MAX_IOA_DUMP_SIZE
) {
2036 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
2040 /* Copy data from adapter to driver buffers */
2041 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
2044 ioa_dump
->hdr
.len
+= bytes_copied
;
2046 if (bytes_copied
!= bytes_to_copy
) {
2047 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
2054 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
2056 /* Update dump_header */
2057 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
2059 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
2064 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2068 * ipr_release_dump - Free adapter dump memory
2069 * @kref: kref struct
2074 static void ipr_release_dump(struct kref
*kref
)
2076 struct ipr_dump
*dump
= container_of(kref
,struct ipr_dump
,kref
);
2077 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
2078 unsigned long lock_flags
= 0;
2082 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2083 ioa_cfg
->dump
= NULL
;
2084 ioa_cfg
->sdt_state
= INACTIVE
;
2085 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2087 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
2088 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
2095 * ipr_worker_thread - Worker thread
2096 * @data: ioa config struct
2098 * Called at task level from a work thread. This function takes care
2099 * of adding and removing device from the mid-layer as configuration
2100 * changes are detected by the adapter.
2105 static void ipr_worker_thread(void *data
)
2107 unsigned long lock_flags
;
2108 struct ipr_resource_entry
*res
;
2109 struct scsi_device
*sdev
;
2110 struct ipr_dump
*dump
;
2111 struct ipr_ioa_cfg
*ioa_cfg
= data
;
2112 u8 bus
, target
, lun
;
2116 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2118 if (ioa_cfg
->sdt_state
== GET_DUMP
) {
2119 dump
= ioa_cfg
->dump
;
2121 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2124 kref_get(&dump
->kref
);
2125 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2126 ipr_get_ioa_dump(ioa_cfg
, dump
);
2127 kref_put(&dump
->kref
, ipr_release_dump
);
2129 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2130 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
)
2131 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2132 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2139 if (!ioa_cfg
->allow_cmds
|| !ioa_cfg
->allow_ml_add_del
) {
2140 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2144 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2145 if (res
->del_from_ml
&& res
->sdev
) {
2148 if (!scsi_device_get(sdev
)) {
2149 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
2150 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2151 scsi_remove_device(sdev
);
2152 scsi_device_put(sdev
);
2153 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2160 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2161 if (res
->add_to_ml
) {
2162 bus
= res
->cfgte
.res_addr
.bus
;
2163 target
= res
->cfgte
.res_addr
.target
;
2164 lun
= res
->cfgte
.res_addr
.lun
;
2166 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2167 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
2168 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2173 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2174 kobject_uevent(&ioa_cfg
->host
->shost_classdev
.kobj
, KOBJ_CHANGE
);
2178 #ifdef CONFIG_SCSI_IPR_TRACE
2180 * ipr_read_trace - Dump the adapter trace
2181 * @kobj: kobject struct
2184 * @count: buffer size
2187 * number of bytes printed to buffer
2189 static ssize_t
ipr_read_trace(struct kobject
*kobj
, char *buf
,
2190 loff_t off
, size_t count
)
2192 struct class_device
*cdev
= container_of(kobj
,struct class_device
,kobj
);
2193 struct Scsi_Host
*shost
= class_to_shost(cdev
);
2194 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2195 unsigned long lock_flags
= 0;
2196 int size
= IPR_TRACE_SIZE
;
2197 char *src
= (char *)ioa_cfg
->trace
;
2201 if (off
+ count
> size
) {
2206 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2207 memcpy(buf
, &src
[off
], count
);
2208 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2212 static struct bin_attribute ipr_trace_attr
= {
2218 .read
= ipr_read_trace
,
2222 static const struct {
2223 enum ipr_cache_state state
;
2225 } cache_state
[] = {
2226 { CACHE_NONE
, "none" },
2227 { CACHE_DISABLED
, "disabled" },
2228 { CACHE_ENABLED
, "enabled" }
2232 * ipr_show_write_caching - Show the write caching attribute
2233 * @class_dev: class device struct
2237 * number of bytes printed to buffer
2239 static ssize_t
ipr_show_write_caching(struct class_device
*class_dev
, char *buf
)
2241 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2242 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2243 unsigned long lock_flags
= 0;
2246 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2247 for (i
= 0; i
< ARRAY_SIZE(cache_state
); i
++) {
2248 if (cache_state
[i
].state
== ioa_cfg
->cache_state
) {
2249 len
= snprintf(buf
, PAGE_SIZE
, "%s\n", cache_state
[i
].name
);
2253 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2259 * ipr_store_write_caching - Enable/disable adapter write cache
2260 * @class_dev: class_device struct
2262 * @count: buffer size
2264 * This function will enable/disable adapter write cache.
2267 * count on success / other on failure
2269 static ssize_t
ipr_store_write_caching(struct class_device
*class_dev
,
2270 const char *buf
, size_t count
)
2272 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2273 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2274 unsigned long lock_flags
= 0;
2275 enum ipr_cache_state new_state
= CACHE_INVALID
;
2278 if (!capable(CAP_SYS_ADMIN
))
2280 if (ioa_cfg
->cache_state
== CACHE_NONE
)
2283 for (i
= 0; i
< ARRAY_SIZE(cache_state
); i
++) {
2284 if (!strncmp(cache_state
[i
].name
, buf
, strlen(cache_state
[i
].name
))) {
2285 new_state
= cache_state
[i
].state
;
2290 if (new_state
!= CACHE_DISABLED
&& new_state
!= CACHE_ENABLED
)
2293 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2294 if (ioa_cfg
->cache_state
== new_state
) {
2295 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2299 ioa_cfg
->cache_state
= new_state
;
2300 dev_info(&ioa_cfg
->pdev
->dev
, "%s adapter write cache.\n",
2301 new_state
== CACHE_ENABLED
? "Enabling" : "Disabling");
2302 if (!ioa_cfg
->in_reset_reload
)
2303 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2304 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2305 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2310 static struct class_device_attribute ipr_ioa_cache_attr
= {
2312 .name
= "write_cache",
2313 .mode
= S_IRUGO
| S_IWUSR
,
2315 .show
= ipr_show_write_caching
,
2316 .store
= ipr_store_write_caching
2320 * ipr_show_fw_version - Show the firmware version
2321 * @class_dev: class device struct
2325 * number of bytes printed to buffer
2327 static ssize_t
ipr_show_fw_version(struct class_device
*class_dev
, char *buf
)
2329 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2330 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2331 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2332 unsigned long lock_flags
= 0;
2335 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2336 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
2337 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
2338 ucode_vpd
->minor_release
[0],
2339 ucode_vpd
->minor_release
[1]);
2340 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2344 static struct class_device_attribute ipr_fw_version_attr
= {
2346 .name
= "fw_version",
2349 .show
= ipr_show_fw_version
,
2353 * ipr_show_log_level - Show the adapter's error logging level
2354 * @class_dev: class device struct
2358 * number of bytes printed to buffer
2360 static ssize_t
ipr_show_log_level(struct class_device
*class_dev
, char *buf
)
2362 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2363 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2364 unsigned long lock_flags
= 0;
2367 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2368 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
2369 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2374 * ipr_store_log_level - Change the adapter's error logging level
2375 * @class_dev: class device struct
2379 * number of bytes printed to buffer
2381 static ssize_t
ipr_store_log_level(struct class_device
*class_dev
,
2382 const char *buf
, size_t count
)
2384 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2385 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2386 unsigned long lock_flags
= 0;
2388 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2389 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
2390 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2394 static struct class_device_attribute ipr_log_level_attr
= {
2396 .name
= "log_level",
2397 .mode
= S_IRUGO
| S_IWUSR
,
2399 .show
= ipr_show_log_level
,
2400 .store
= ipr_store_log_level
2404 * ipr_store_diagnostics - IOA Diagnostics interface
2405 * @class_dev: class_device struct
2407 * @count: buffer size
2409 * This function will reset the adapter and wait a reasonable
2410 * amount of time for any errors that the adapter might log.
2413 * count on success / other on failure
2415 static ssize_t
ipr_store_diagnostics(struct class_device
*class_dev
,
2416 const char *buf
, size_t count
)
2418 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2419 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2420 unsigned long lock_flags
= 0;
2423 if (!capable(CAP_SYS_ADMIN
))
2426 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2427 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2428 ioa_cfg
->errors_logged
= 0;
2429 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2431 if (ioa_cfg
->in_reset_reload
) {
2432 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2433 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2435 /* Wait for a second for any errors to be logged */
2438 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2442 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2443 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
2445 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2450 static struct class_device_attribute ipr_diagnostics_attr
= {
2452 .name
= "run_diagnostics",
2455 .store
= ipr_store_diagnostics
2459 * ipr_show_adapter_state - Show the adapter's state
2460 * @class_dev: class device struct
2464 * number of bytes printed to buffer
2466 static ssize_t
ipr_show_adapter_state(struct class_device
*class_dev
, char *buf
)
2468 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2469 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2470 unsigned long lock_flags
= 0;
2473 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2474 if (ioa_cfg
->ioa_is_dead
)
2475 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
2477 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
2478 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2483 * ipr_store_adapter_state - Change adapter state
2484 * @class_dev: class_device struct
2486 * @count: buffer size
2488 * This function will change the adapter's state.
2491 * count on success / other on failure
2493 static ssize_t
ipr_store_adapter_state(struct class_device
*class_dev
,
2494 const char *buf
, size_t count
)
2496 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2497 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2498 unsigned long lock_flags
;
2501 if (!capable(CAP_SYS_ADMIN
))
2504 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2505 if (ioa_cfg
->ioa_is_dead
&& !strncmp(buf
, "online", 6)) {
2506 ioa_cfg
->ioa_is_dead
= 0;
2507 ioa_cfg
->reset_retries
= 0;
2508 ioa_cfg
->in_ioa_bringdown
= 0;
2509 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2511 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2512 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2517 static struct class_device_attribute ipr_ioa_state_attr
= {
2520 .mode
= S_IRUGO
| S_IWUSR
,
2522 .show
= ipr_show_adapter_state
,
2523 .store
= ipr_store_adapter_state
2527 * ipr_store_reset_adapter - Reset the adapter
2528 * @class_dev: class_device struct
2530 * @count: buffer size
2532 * This function will reset the adapter.
2535 * count on success / other on failure
2537 static ssize_t
ipr_store_reset_adapter(struct class_device
*class_dev
,
2538 const char *buf
, size_t count
)
2540 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2541 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2542 unsigned long lock_flags
;
2545 if (!capable(CAP_SYS_ADMIN
))
2548 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2549 if (!ioa_cfg
->in_reset_reload
)
2550 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2551 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2552 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2557 static struct class_device_attribute ipr_ioa_reset_attr
= {
2559 .name
= "reset_host",
2562 .store
= ipr_store_reset_adapter
2566 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2567 * @buf_len: buffer length
2569 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2570 * list to use for microcode download
2573 * pointer to sglist / NULL on failure
2575 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
2577 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
2578 struct ipr_sglist
*sglist
;
2579 struct scatterlist
*scatterlist
;
2582 /* Get the minimum size per scatter/gather element */
2583 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
2585 /* Get the actual size per element */
2586 order
= get_order(sg_size
);
2588 /* Determine the actual number of bytes per element */
2589 bsize_elem
= PAGE_SIZE
* (1 << order
);
2591 /* Determine the actual number of sg entries needed */
2592 if (buf_len
% bsize_elem
)
2593 num_elem
= (buf_len
/ bsize_elem
) + 1;
2595 num_elem
= buf_len
/ bsize_elem
;
2597 /* Allocate a scatter/gather list for the DMA */
2598 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
2599 (sizeof(struct scatterlist
) * (num_elem
- 1)),
2602 if (sglist
== NULL
) {
2607 scatterlist
= sglist
->scatterlist
;
2609 sglist
->order
= order
;
2610 sglist
->num_sg
= num_elem
;
2612 /* Allocate a bunch of sg elements */
2613 for (i
= 0; i
< num_elem
; i
++) {
2614 page
= alloc_pages(GFP_KERNEL
, order
);
2618 /* Free up what we already allocated */
2619 for (j
= i
- 1; j
>= 0; j
--)
2620 __free_pages(scatterlist
[j
].page
, order
);
2625 scatterlist
[i
].page
= page
;
2632 * ipr_free_ucode_buffer - Frees a microcode download buffer
2633 * @p_dnld: scatter/gather list pointer
2635 * Free a DMA'able ucode download buffer previously allocated with
2636 * ipr_alloc_ucode_buffer
2641 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
2645 for (i
= 0; i
< sglist
->num_sg
; i
++)
2646 __free_pages(sglist
->scatterlist
[i
].page
, sglist
->order
);
2652 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2653 * @sglist: scatter/gather list pointer
2654 * @buffer: buffer pointer
2655 * @len: buffer length
2657 * Copy a microcode image from a user buffer into a buffer allocated by
2658 * ipr_alloc_ucode_buffer
2661 * 0 on success / other on failure
2663 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
2664 u8
*buffer
, u32 len
)
2666 int bsize_elem
, i
, result
= 0;
2667 struct scatterlist
*scatterlist
;
2670 /* Determine the actual number of bytes per element */
2671 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
2673 scatterlist
= sglist
->scatterlist
;
2675 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
2676 kaddr
= kmap(scatterlist
[i
].page
);
2677 memcpy(kaddr
, buffer
, bsize_elem
);
2678 kunmap(scatterlist
[i
].page
);
2680 scatterlist
[i
].length
= bsize_elem
;
2688 if (len
% bsize_elem
) {
2689 kaddr
= kmap(scatterlist
[i
].page
);
2690 memcpy(kaddr
, buffer
, len
% bsize_elem
);
2691 kunmap(scatterlist
[i
].page
);
2693 scatterlist
[i
].length
= len
% bsize_elem
;
2696 sglist
->buffer_len
= len
;
2701 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2702 * @ipr_cmd: ipr command struct
2703 * @sglist: scatter/gather list
2705 * Builds a microcode download IOA data list (IOADL).
2708 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
2709 struct ipr_sglist
*sglist
)
2711 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
2712 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
2713 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
2716 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
2717 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
2718 ioarcb
->write_data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
2719 ioarcb
->write_ioadl_len
=
2720 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
2722 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
2723 ioadl
[i
].flags_and_data_len
=
2724 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
2726 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
2729 ioadl
[i
-1].flags_and_data_len
|=
2730 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
2734 * ipr_update_ioa_ucode - Update IOA's microcode
2735 * @ioa_cfg: ioa config struct
2736 * @sglist: scatter/gather list
2738 * Initiate an adapter reset to update the IOA's microcode
2741 * 0 on success / -EIO on failure
2743 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
2744 struct ipr_sglist
*sglist
)
2746 unsigned long lock_flags
;
2748 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2750 if (ioa_cfg
->ucode_sglist
) {
2751 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2752 dev_err(&ioa_cfg
->pdev
->dev
,
2753 "Microcode download already in progress\n");
2757 sglist
->num_dma_sg
= pci_map_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
2758 sglist
->num_sg
, DMA_TO_DEVICE
);
2760 if (!sglist
->num_dma_sg
) {
2761 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2762 dev_err(&ioa_cfg
->pdev
->dev
,
2763 "Failed to map microcode download buffer!\n");
2767 ioa_cfg
->ucode_sglist
= sglist
;
2768 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2769 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2770 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2772 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2773 ioa_cfg
->ucode_sglist
= NULL
;
2774 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2779 * ipr_store_update_fw - Update the firmware on the adapter
2780 * @class_dev: class_device struct
2782 * @count: buffer size
2784 * This function will update the firmware on the adapter.
2787 * count on success / other on failure
2789 static ssize_t
ipr_store_update_fw(struct class_device
*class_dev
,
2790 const char *buf
, size_t count
)
2792 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2793 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2794 struct ipr_ucode_image_header
*image_hdr
;
2795 const struct firmware
*fw_entry
;
2796 struct ipr_sglist
*sglist
;
2799 int len
, result
, dnld_size
;
2801 if (!capable(CAP_SYS_ADMIN
))
2804 len
= snprintf(fname
, 99, "%s", buf
);
2805 fname
[len
-1] = '\0';
2807 if(request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
2808 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
2812 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
2814 if (be32_to_cpu(image_hdr
->header_length
) > fw_entry
->size
||
2815 (ioa_cfg
->vpd_cbs
->page3_data
.card_type
&&
2816 ioa_cfg
->vpd_cbs
->page3_data
.card_type
!= image_hdr
->card_type
)) {
2817 dev_err(&ioa_cfg
->pdev
->dev
, "Invalid microcode buffer\n");
2818 release_firmware(fw_entry
);
2822 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
2823 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
2824 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
2827 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
2828 release_firmware(fw_entry
);
2832 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
2835 dev_err(&ioa_cfg
->pdev
->dev
,
2836 "Microcode buffer copy to DMA buffer failed\n");
2840 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
2845 ipr_free_ucode_buffer(sglist
);
2846 release_firmware(fw_entry
);
2850 static struct class_device_attribute ipr_update_fw_attr
= {
2852 .name
= "update_fw",
2855 .store
= ipr_store_update_fw
2858 static struct class_device_attribute
*ipr_ioa_attrs
[] = {
2859 &ipr_fw_version_attr
,
2860 &ipr_log_level_attr
,
2861 &ipr_diagnostics_attr
,
2862 &ipr_ioa_state_attr
,
2863 &ipr_ioa_reset_attr
,
2864 &ipr_update_fw_attr
,
2865 &ipr_ioa_cache_attr
,
2869 #ifdef CONFIG_SCSI_IPR_DUMP
2871 * ipr_read_dump - Dump the adapter
2872 * @kobj: kobject struct
2875 * @count: buffer size
2878 * number of bytes printed to buffer
2880 static ssize_t
ipr_read_dump(struct kobject
*kobj
, char *buf
,
2881 loff_t off
, size_t count
)
2883 struct class_device
*cdev
= container_of(kobj
,struct class_device
,kobj
);
2884 struct Scsi_Host
*shost
= class_to_shost(cdev
);
2885 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2886 struct ipr_dump
*dump
;
2887 unsigned long lock_flags
= 0;
2892 if (!capable(CAP_SYS_ADMIN
))
2895 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2896 dump
= ioa_cfg
->dump
;
2898 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
2899 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2902 kref_get(&dump
->kref
);
2903 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2905 if (off
> dump
->driver_dump
.hdr
.len
) {
2906 kref_put(&dump
->kref
, ipr_release_dump
);
2910 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
2911 count
= dump
->driver_dump
.hdr
.len
- off
;
2915 if (count
&& off
< sizeof(dump
->driver_dump
)) {
2916 if (off
+ count
> sizeof(dump
->driver_dump
))
2917 len
= sizeof(dump
->driver_dump
) - off
;
2920 src
= (u8
*)&dump
->driver_dump
+ off
;
2921 memcpy(buf
, src
, len
);
2927 off
-= sizeof(dump
->driver_dump
);
2929 if (count
&& off
< offsetof(struct ipr_ioa_dump
, ioa_data
)) {
2930 if (off
+ count
> offsetof(struct ipr_ioa_dump
, ioa_data
))
2931 len
= offsetof(struct ipr_ioa_dump
, ioa_data
) - off
;
2934 src
= (u8
*)&dump
->ioa_dump
+ off
;
2935 memcpy(buf
, src
, len
);
2941 off
-= offsetof(struct ipr_ioa_dump
, ioa_data
);
2944 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
2945 len
= PAGE_ALIGN(off
) - off
;
2948 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
2949 src
+= off
& ~PAGE_MASK
;
2950 memcpy(buf
, src
, len
);
2956 kref_put(&dump
->kref
, ipr_release_dump
);
2961 * ipr_alloc_dump - Prepare for adapter dump
2962 * @ioa_cfg: ioa config struct
2965 * 0 on success / other on failure
2967 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
2969 struct ipr_dump
*dump
;
2970 unsigned long lock_flags
= 0;
2972 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
2975 ipr_err("Dump memory allocation failed\n");
2979 kref_init(&dump
->kref
);
2980 dump
->ioa_cfg
= ioa_cfg
;
2982 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2984 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
2985 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2990 ioa_cfg
->dump
= dump
;
2991 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
2992 if (ioa_cfg
->ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
2993 ioa_cfg
->dump_taken
= 1;
2994 schedule_work(&ioa_cfg
->work_q
);
2996 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3002 * ipr_free_dump - Free adapter dump memory
3003 * @ioa_cfg: ioa config struct
3006 * 0 on success / other on failure
3008 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
3010 struct ipr_dump
*dump
;
3011 unsigned long lock_flags
= 0;
3015 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3016 dump
= ioa_cfg
->dump
;
3018 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3022 ioa_cfg
->dump
= NULL
;
3023 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3025 kref_put(&dump
->kref
, ipr_release_dump
);
3032 * ipr_write_dump - Setup dump state of adapter
3033 * @kobj: kobject struct
3036 * @count: buffer size
3039 * number of bytes printed to buffer
3041 static ssize_t
ipr_write_dump(struct kobject
*kobj
, char *buf
,
3042 loff_t off
, size_t count
)
3044 struct class_device
*cdev
= container_of(kobj
,struct class_device
,kobj
);
3045 struct Scsi_Host
*shost
= class_to_shost(cdev
);
3046 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3049 if (!capable(CAP_SYS_ADMIN
))
3053 rc
= ipr_alloc_dump(ioa_cfg
);
3054 else if (buf
[0] == '0')
3055 rc
= ipr_free_dump(ioa_cfg
);
3065 static struct bin_attribute ipr_dump_attr
= {
3068 .mode
= S_IRUSR
| S_IWUSR
,
3071 .read
= ipr_read_dump
,
3072 .write
= ipr_write_dump
3075 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
3079 * ipr_change_queue_depth - Change the device's queue depth
3080 * @sdev: scsi device struct
3081 * @qdepth: depth to set
3086 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
3088 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
3089 struct ipr_resource_entry
*res
;
3090 unsigned long lock_flags
= 0;
3092 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3093 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
3095 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
3096 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
3097 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3099 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
3100 return sdev
->queue_depth
;
3104 * ipr_change_queue_type - Change the device's queue type
3105 * @dsev: scsi device struct
3106 * @tag_type: type of tags to use
3109 * actual queue type set
3111 static int ipr_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
3113 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
3114 struct ipr_resource_entry
*res
;
3115 unsigned long lock_flags
= 0;
3117 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3118 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
3121 if (ipr_is_gscsi(res
) && sdev
->tagged_supported
) {
3123 * We don't bother quiescing the device here since the
3124 * adapter firmware does it for us.
3126 scsi_set_tag_type(sdev
, tag_type
);
3129 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
3131 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
3137 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3142 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3143 * @dev: device struct
3147 * number of bytes printed to buffer
3149 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3151 struct scsi_device
*sdev
= to_scsi_device(dev
);
3152 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
3153 struct ipr_resource_entry
*res
;
3154 unsigned long lock_flags
= 0;
3155 ssize_t len
= -ENXIO
;
3157 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3158 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
3160 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->cfgte
.res_handle
);
3161 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3165 static struct device_attribute ipr_adapter_handle_attr
= {
3167 .name
= "adapter_handle",
3170 .show
= ipr_show_adapter_handle
3173 static struct device_attribute
*ipr_dev_attrs
[] = {
3174 &ipr_adapter_handle_attr
,
3179 * ipr_biosparam - Return the HSC mapping
3180 * @sdev: scsi device struct
3181 * @block_device: block device pointer
3182 * @capacity: capacity of the device
3183 * @parm: Array containing returned HSC values.
3185 * This function generates the HSC parms that fdisk uses.
3186 * We want to make sure we return something that places partitions
3187 * on 4k boundaries for best performance with the IOA.
3192 static int ipr_biosparam(struct scsi_device
*sdev
,
3193 struct block_device
*block_device
,
3194 sector_t capacity
, int *parm
)
3202 cylinders
= capacity
;
3203 sector_div(cylinders
, (128 * 32));
3208 parm
[2] = cylinders
;
3214 * ipr_find_starget - Find target based on bus/target.
3215 * @starget: scsi target struct
3218 * resource entry pointer if found / NULL if not found
3220 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
3222 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
3223 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
3224 struct ipr_resource_entry
*res
;
3226 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3227 if ((res
->cfgte
.res_addr
.bus
== starget
->channel
) &&
3228 (res
->cfgte
.res_addr
.target
== starget
->id
) &&
3229 (res
->cfgte
.res_addr
.lun
== 0)) {
3237 static struct ata_port_info sata_port_info
;
3240 * ipr_target_alloc - Prepare for commands to a SCSI target
3241 * @starget: scsi target struct
3243 * If the device is a SATA device, this function allocates an
3244 * ATA port with libata, else it does nothing.
3247 * 0 on success / non-0 on failure
3249 static int ipr_target_alloc(struct scsi_target
*starget
)
3251 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
3252 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
3253 struct ipr_sata_port
*sata_port
;
3254 struct ata_port
*ap
;
3255 struct ipr_resource_entry
*res
;
3256 unsigned long lock_flags
;
3258 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3259 res
= ipr_find_starget(starget
);
3260 starget
->hostdata
= NULL
;
3262 if (res
&& ipr_is_gata(res
)) {
3263 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3264 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
3268 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
3270 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3271 sata_port
->ioa_cfg
= ioa_cfg
;
3273 sata_port
->res
= res
;
3275 res
->sata_port
= sata_port
;
3276 ap
->private_data
= sata_port
;
3277 starget
->hostdata
= sata_port
;
3283 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3289 * ipr_target_destroy - Destroy a SCSI target
3290 * @starget: scsi target struct
3292 * If the device was a SATA device, this function frees the libata
3293 * ATA port, else it does nothing.
3296 static void ipr_target_destroy(struct scsi_target
*starget
)
3298 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
3301 starget
->hostdata
= NULL
;
3302 ata_sas_port_destroy(sata_port
->ap
);
3308 * ipr_find_sdev - Find device based on bus/target/lun.
3309 * @sdev: scsi device struct
3312 * resource entry pointer if found / NULL if not found
3314 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
3316 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
3317 struct ipr_resource_entry
*res
;
3319 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3320 if ((res
->cfgte
.res_addr
.bus
== sdev
->channel
) &&
3321 (res
->cfgte
.res_addr
.target
== sdev
->id
) &&
3322 (res
->cfgte
.res_addr
.lun
== sdev
->lun
))
3330 * ipr_slave_destroy - Unconfigure a SCSI device
3331 * @sdev: scsi device struct
3336 static void ipr_slave_destroy(struct scsi_device
*sdev
)
3338 struct ipr_resource_entry
*res
;
3339 struct ipr_ioa_cfg
*ioa_cfg
;
3340 unsigned long lock_flags
= 0;
3342 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
3344 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3345 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
3348 ata_port_disable(res
->sata_port
->ap
);
3349 sdev
->hostdata
= NULL
;
3351 res
->sata_port
= NULL
;
3353 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3357 * ipr_slave_configure - Configure a SCSI device
3358 * @sdev: scsi device struct
3360 * This function configures the specified scsi device.
3365 static int ipr_slave_configure(struct scsi_device
*sdev
)
3367 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
3368 struct ipr_resource_entry
*res
;
3369 unsigned long lock_flags
= 0;
3371 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3372 res
= sdev
->hostdata
;
3374 if (ipr_is_af_dasd_device(res
))
3375 sdev
->type
= TYPE_RAID
;
3376 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
3377 sdev
->scsi_level
= 4;
3378 sdev
->no_uld_attach
= 1;
3380 if (ipr_is_vset_device(res
)) {
3381 sdev
->timeout
= IPR_VSET_RW_TIMEOUT
;
3382 blk_queue_max_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
3384 if (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
))
3385 sdev
->allow_restart
= 1;
3386 if (ipr_is_gata(res
) && res
->sata_port
) {
3387 scsi_adjust_queue_depth(sdev
, 0, IPR_MAX_CMD_PER_ATA_LUN
);
3388 ata_sas_slave_configure(sdev
, res
->sata_port
->ap
);
3390 scsi_adjust_queue_depth(sdev
, 0, sdev
->host
->cmd_per_lun
);
3393 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3398 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3399 * @sdev: scsi device struct
3401 * This function initializes an ATA port so that future commands
3402 * sent through queuecommand will work.
3407 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
3409 struct ipr_sata_port
*sata_port
= NULL
;
3413 if (sdev
->sdev_target
)
3414 sata_port
= sdev
->sdev_target
->hostdata
;
3416 rc
= ata_sas_port_init(sata_port
->ap
);
3418 ipr_slave_destroy(sdev
);
3425 * ipr_slave_alloc - Prepare for commands to a device.
3426 * @sdev: scsi device struct
3428 * This function saves a pointer to the resource entry
3429 * in the scsi device struct if the device exists. We
3430 * can then use this pointer in ipr_queuecommand when
3431 * handling new commands.
3434 * 0 on success / -ENXIO if device does not exist
3436 static int ipr_slave_alloc(struct scsi_device
*sdev
)
3438 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
3439 struct ipr_resource_entry
*res
;
3440 unsigned long lock_flags
;
3443 sdev
->hostdata
= NULL
;
3445 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3447 res
= ipr_find_sdev(sdev
);
3452 sdev
->hostdata
= res
;
3453 if (!ipr_is_naca_model(res
))
3454 res
->needs_sync_complete
= 1;
3456 if (ipr_is_gata(res
)) {
3457 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3458 return ipr_ata_slave_alloc(sdev
);
3462 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3468 * ipr_eh_host_reset - Reset the host adapter
3469 * @scsi_cmd: scsi command struct
3474 static int __ipr_eh_host_reset(struct scsi_cmnd
* scsi_cmd
)
3476 struct ipr_ioa_cfg
*ioa_cfg
;
3480 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
3482 dev_err(&ioa_cfg
->pdev
->dev
,
3483 "Adapter being reset as a result of error recovery.\n");
3485 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
3486 ioa_cfg
->sdt_state
= GET_DUMP
;
3488 rc
= ipr_reset_reload(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
3494 static int ipr_eh_host_reset(struct scsi_cmnd
* cmd
)
3498 spin_lock_irq(cmd
->device
->host
->host_lock
);
3499 rc
= __ipr_eh_host_reset(cmd
);
3500 spin_unlock_irq(cmd
->device
->host
->host_lock
);
3506 * ipr_device_reset - Reset the device
3507 * @ioa_cfg: ioa config struct
3508 * @res: resource entry struct
3510 * This function issues a device reset to the affected device.
3511 * If the device is a SCSI device, a LUN reset will be sent
3512 * to the device first. If that does not work, a target reset
3513 * will be sent. If the device is a SATA device, a PHY reset will
3517 * 0 on success / non-zero on failure
3519 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
3520 struct ipr_resource_entry
*res
)
3522 struct ipr_cmnd
*ipr_cmd
;
3523 struct ipr_ioarcb
*ioarcb
;
3524 struct ipr_cmd_pkt
*cmd_pkt
;
3525 struct ipr_ioarcb_ata_regs
*regs
;
3529 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
3530 ioarcb
= &ipr_cmd
->ioarcb
;
3531 cmd_pkt
= &ioarcb
->cmd_pkt
;
3532 regs
= &ioarcb
->add_data
.u
.regs
;
3534 ioarcb
->res_handle
= res
->cfgte
.res_handle
;
3535 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
3536 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
3537 if (ipr_is_gata(res
)) {
3538 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
3539 ioarcb
->add_cmd_parms_len
= cpu_to_be32(sizeof(regs
->flags
));
3540 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
3543 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
3544 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3545 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
3546 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
3547 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->ioasa
.u
.gata
,
3548 sizeof(struct ipr_ioasa_gata
));
3551 return (IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0);
3555 * ipr_sata_reset - Reset the SATA port
3556 * @ap: SATA port to reset
3557 * @classes: class of the attached device
3559 * This function issues a SATA phy reset to the affected ATA port.
3562 * 0 on success / non-zero on failure
3564 static int ipr_sata_reset(struct ata_port
*ap
, unsigned int *classes
)
3566 struct ipr_sata_port
*sata_port
= ap
->private_data
;
3567 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
3568 struct ipr_resource_entry
*res
;
3569 unsigned long lock_flags
= 0;
3573 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3574 while(ioa_cfg
->in_reset_reload
) {
3575 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3576 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3577 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3580 res
= sata_port
->res
;
3582 rc
= ipr_device_reset(ioa_cfg
, res
);
3583 switch(res
->cfgte
.proto
) {
3584 case IPR_PROTO_SATA
:
3585 case IPR_PROTO_SAS_STP
:
3586 *classes
= ATA_DEV_ATA
;
3588 case IPR_PROTO_SATA_ATAPI
:
3589 case IPR_PROTO_SAS_STP_ATAPI
:
3590 *classes
= ATA_DEV_ATAPI
;
3593 *classes
= ATA_DEV_UNKNOWN
;
3598 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3604 * ipr_eh_dev_reset - Reset the device
3605 * @scsi_cmd: scsi command struct
3607 * This function issues a device reset to the affected device.
3608 * A LUN reset will be sent to the device first. If that does
3609 * not work, a target reset will be sent.
3614 static int __ipr_eh_dev_reset(struct scsi_cmnd
* scsi_cmd
)
3616 struct ipr_cmnd
*ipr_cmd
;
3617 struct ipr_ioa_cfg
*ioa_cfg
;
3618 struct ipr_resource_entry
*res
;
3619 struct ata_port
*ap
;
3623 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
3624 res
= scsi_cmd
->device
->hostdata
;
3630 * If we are currently going through reset/reload, return failed. This will force the
3631 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3634 if (ioa_cfg
->in_reset_reload
)
3636 if (ioa_cfg
->ioa_is_dead
)
3639 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
3640 if (ipr_cmd
->ioarcb
.res_handle
== res
->cfgte
.res_handle
) {
3641 if (ipr_cmd
->scsi_cmd
)
3642 ipr_cmd
->done
= ipr_scsi_eh_done
;
3643 if (ipr_cmd
->qc
&& !(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
3644 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
3645 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
3650 res
->resetting_device
= 1;
3651 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
3653 if (ipr_is_gata(res
) && res
->sata_port
) {
3654 ap
= res
->sata_port
->ap
;
3655 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
3656 ata_do_eh(ap
, NULL
, NULL
, ipr_sata_reset
, NULL
);
3657 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
3659 rc
= ipr_device_reset(ioa_cfg
, res
);
3660 res
->resetting_device
= 0;
3663 return (rc
? FAILED
: SUCCESS
);
3666 static int ipr_eh_dev_reset(struct scsi_cmnd
* cmd
)
3670 spin_lock_irq(cmd
->device
->host
->host_lock
);
3671 rc
= __ipr_eh_dev_reset(cmd
);
3672 spin_unlock_irq(cmd
->device
->host
->host_lock
);
3678 * ipr_bus_reset_done - Op done function for bus reset.
3679 * @ipr_cmd: ipr command struct
3681 * This function is the op done function for a bus reset
3686 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
3688 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
3689 struct ipr_resource_entry
*res
;
3692 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3693 if (!memcmp(&res
->cfgte
.res_handle
, &ipr_cmd
->ioarcb
.res_handle
,
3694 sizeof(res
->cfgte
.res_handle
))) {
3695 scsi_report_bus_reset(ioa_cfg
->host
, res
->cfgte
.res_addr
.bus
);
3701 * If abort has not completed, indicate the reset has, else call the
3702 * abort's done function to wake the sleeping eh thread
3704 if (ipr_cmd
->sibling
->sibling
)
3705 ipr_cmd
->sibling
->sibling
= NULL
;
3707 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
3709 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
3714 * ipr_abort_timeout - An abort task has timed out
3715 * @ipr_cmd: ipr command struct
3717 * This function handles when an abort task times out. If this
3718 * happens we issue a bus reset since we have resources tied
3719 * up that must be freed before returning to the midlayer.
3724 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
3726 struct ipr_cmnd
*reset_cmd
;
3727 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
3728 struct ipr_cmd_pkt
*cmd_pkt
;
3729 unsigned long lock_flags
= 0;
3732 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3733 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
3734 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3738 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
3739 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
3740 ipr_cmd
->sibling
= reset_cmd
;
3741 reset_cmd
->sibling
= ipr_cmd
;
3742 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
3743 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
3744 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
3745 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
3746 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
3748 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
3749 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3754 * ipr_cancel_op - Cancel specified op
3755 * @scsi_cmd: scsi command struct
3757 * This function cancels specified op.
3762 static int ipr_cancel_op(struct scsi_cmnd
* scsi_cmd
)
3764 struct ipr_cmnd
*ipr_cmd
;
3765 struct ipr_ioa_cfg
*ioa_cfg
;
3766 struct ipr_resource_entry
*res
;
3767 struct ipr_cmd_pkt
*cmd_pkt
;
3772 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
3773 res
= scsi_cmd
->device
->hostdata
;
3775 /* If we are currently going through reset/reload, return failed.
3776 * This will force the mid-layer to call ipr_eh_host_reset,
3777 * which will then go to sleep and wait for the reset to complete
3779 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->ioa_is_dead
)
3781 if (!res
|| !ipr_is_gscsi(res
))
3784 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
3785 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
3786 ipr_cmd
->done
= ipr_scsi_eh_done
;
3795 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
3796 ipr_cmd
->ioarcb
.res_handle
= res
->cfgte
.res_handle
;
3797 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
3798 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
3799 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
3800 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
3802 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
3804 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
3805 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3808 * If the abort task timed out and we sent a bus reset, we will get
3809 * one the following responses to the abort
3811 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
3816 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
3817 if (!ipr_is_naca_model(res
))
3818 res
->needs_sync_complete
= 1;
3821 return (IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
);
3825 * ipr_eh_abort - Abort a single op
3826 * @scsi_cmd: scsi command struct
3831 static int ipr_eh_abort(struct scsi_cmnd
* scsi_cmd
)
3833 unsigned long flags
;
3838 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
3839 rc
= ipr_cancel_op(scsi_cmd
);
3840 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
3847 * ipr_handle_other_interrupt - Handle "other" interrupts
3848 * @ioa_cfg: ioa config struct
3849 * @int_reg: interrupt register
3852 * IRQ_NONE / IRQ_HANDLED
3854 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
3855 volatile u32 int_reg
)
3857 irqreturn_t rc
= IRQ_HANDLED
;
3859 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
3860 /* Mask the interrupt */
3861 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
3863 /* Clear the interrupt */
3864 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.clr_interrupt_reg
);
3865 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
3867 list_del(&ioa_cfg
->reset_cmd
->queue
);
3868 del_timer(&ioa_cfg
->reset_cmd
->timer
);
3869 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
3871 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
3872 ioa_cfg
->ioa_unit_checked
= 1;
3874 dev_err(&ioa_cfg
->pdev
->dev
,
3875 "Permanent IOA failure. 0x%08X\n", int_reg
);
3877 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
3878 ioa_cfg
->sdt_state
= GET_DUMP
;
3880 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
3881 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3888 * ipr_isr - Interrupt service routine
3890 * @devp: pointer to ioa config struct
3893 * IRQ_NONE / IRQ_HANDLED
3895 static irqreturn_t
ipr_isr(int irq
, void *devp
)
3897 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
3898 unsigned long lock_flags
= 0;
3899 volatile u32 int_reg
, int_mask_reg
;
3902 struct ipr_cmnd
*ipr_cmd
;
3903 irqreturn_t rc
= IRQ_NONE
;
3905 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3907 /* If interrupts are disabled, ignore the interrupt */
3908 if (!ioa_cfg
->allow_interrupts
) {
3909 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3913 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
3914 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
3916 /* If an interrupt on the adapter did not occur, ignore it */
3917 if (unlikely((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0)) {
3918 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3925 while ((be32_to_cpu(*ioa_cfg
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
3926 ioa_cfg
->toggle_bit
) {
3928 cmd_index
= (be32_to_cpu(*ioa_cfg
->hrrq_curr
) &
3929 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
3931 if (unlikely(cmd_index
>= IPR_NUM_CMD_BLKS
)) {
3932 ioa_cfg
->errors_logged
++;
3933 dev_err(&ioa_cfg
->pdev
->dev
, "Invalid response handle from IOA\n");
3935 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
3936 ioa_cfg
->sdt_state
= GET_DUMP
;
3938 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3939 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3943 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
3945 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3947 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
3949 list_del(&ipr_cmd
->queue
);
3950 del_timer(&ipr_cmd
->timer
);
3951 ipr_cmd
->done(ipr_cmd
);
3955 if (ioa_cfg
->hrrq_curr
< ioa_cfg
->hrrq_end
) {
3956 ioa_cfg
->hrrq_curr
++;
3958 ioa_cfg
->hrrq_curr
= ioa_cfg
->hrrq_start
;
3959 ioa_cfg
->toggle_bit
^= 1u;
3963 if (ipr_cmd
!= NULL
) {
3964 /* Clear the PCI interrupt */
3965 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg
);
3966 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
3971 if (unlikely(rc
== IRQ_NONE
))
3972 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
3974 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3979 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3980 * @ioa_cfg: ioa config struct
3981 * @ipr_cmd: ipr command struct
3984 * 0 on success / -1 on failure
3986 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
3987 struct ipr_cmnd
*ipr_cmd
)
3990 struct scatterlist
*sglist
;
3992 u32 ioadl_flags
= 0;
3993 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
3994 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3995 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
3997 length
= scsi_cmd
->request_bufflen
;
4002 if (scsi_cmd
->use_sg
) {
4003 ipr_cmd
->dma_use_sg
= pci_map_sg(ioa_cfg
->pdev
,
4004 scsi_cmd
->request_buffer
,
4006 scsi_cmd
->sc_data_direction
);
4008 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
4009 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
4010 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
4011 ioarcb
->write_data_transfer_length
= cpu_to_be32(length
);
4012 ioarcb
->write_ioadl_len
=
4013 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
4014 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
4015 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
4016 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
4017 ioarcb
->read_ioadl_len
=
4018 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
4021 sglist
= scsi_cmd
->request_buffer
;
4023 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
4024 ioadl
[i
].flags_and_data_len
=
4025 cpu_to_be32(ioadl_flags
| sg_dma_len(&sglist
[i
]));
4027 cpu_to_be32(sg_dma_address(&sglist
[i
]));
4030 if (likely(ipr_cmd
->dma_use_sg
)) {
4031 ioadl
[i
-1].flags_and_data_len
|=
4032 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
4035 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
4037 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
4038 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
4039 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
4040 ioarcb
->write_data_transfer_length
= cpu_to_be32(length
);
4041 ioarcb
->write_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
4042 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
4043 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
4044 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
4045 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
4048 ipr_cmd
->dma_handle
= pci_map_single(ioa_cfg
->pdev
,
4049 scsi_cmd
->request_buffer
, length
,
4050 scsi_cmd
->sc_data_direction
);
4052 if (likely(!pci_dma_mapping_error(ipr_cmd
->dma_handle
))) {
4053 ipr_cmd
->dma_use_sg
= 1;
4054 ioadl
[0].flags_and_data_len
=
4055 cpu_to_be32(ioadl_flags
| length
| IPR_IOADL_FLAGS_LAST
);
4056 ioadl
[0].address
= cpu_to_be32(ipr_cmd
->dma_handle
);
4059 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_single failed!\n");
4066 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4067 * @scsi_cmd: scsi command struct
4072 static u8
ipr_get_task_attributes(struct scsi_cmnd
*scsi_cmd
)
4075 u8 rc
= IPR_FLAGS_LO_UNTAGGED_TASK
;
4077 if (scsi_populate_tag_msg(scsi_cmd
, tag
)) {
4079 case MSG_SIMPLE_TAG
:
4080 rc
= IPR_FLAGS_LO_SIMPLE_TASK
;
4083 rc
= IPR_FLAGS_LO_HEAD_OF_Q_TASK
;
4085 case MSG_ORDERED_TAG
:
4086 rc
= IPR_FLAGS_LO_ORDERED_TASK
;
4095 * ipr_erp_done - Process completion of ERP for a device
4096 * @ipr_cmd: ipr command struct
4098 * This function copies the sense buffer into the scsi_cmd
4099 * struct and pushes the scsi_done function.
4104 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
4106 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
4107 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
4108 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4109 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4111 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
4112 scsi_cmd
->result
|= (DID_ERROR
<< 16);
4113 scmd_printk(KERN_ERR
, scsi_cmd
,
4114 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
4116 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
4117 SCSI_SENSE_BUFFERSIZE
);
4121 if (!ipr_is_naca_model(res
))
4122 res
->needs_sync_complete
= 1;
4125 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
4126 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4127 scsi_cmd
->scsi_done(scsi_cmd
);
4131 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4132 * @ipr_cmd: ipr command struct
4137 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
4139 struct ipr_ioarcb
*ioarcb
;
4140 struct ipr_ioasa
*ioasa
;
4142 ioarcb
= &ipr_cmd
->ioarcb
;
4143 ioasa
= &ipr_cmd
->ioasa
;
4145 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
4146 ioarcb
->write_data_transfer_length
= 0;
4147 ioarcb
->read_data_transfer_length
= 0;
4148 ioarcb
->write_ioadl_len
= 0;
4149 ioarcb
->read_ioadl_len
= 0;
4151 ioasa
->residual_data_len
= 0;
4155 * ipr_erp_request_sense - Send request sense to a device
4156 * @ipr_cmd: ipr command struct
4158 * This function sends a request sense to a device as a result
4159 * of a check condition.
4164 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
4166 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
4167 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4169 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
4170 ipr_erp_done(ipr_cmd
);
4174 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
4176 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
4177 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
4178 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
4179 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
4180 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
4181 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
4183 ipr_cmd
->ioadl
[0].flags_and_data_len
=
4184 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| SCSI_SENSE_BUFFERSIZE
);
4185 ipr_cmd
->ioadl
[0].address
=
4186 cpu_to_be32(ipr_cmd
->sense_buffer_dma
);
4188 ipr_cmd
->ioarcb
.read_ioadl_len
=
4189 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
4190 ipr_cmd
->ioarcb
.read_data_transfer_length
=
4191 cpu_to_be32(SCSI_SENSE_BUFFERSIZE
);
4193 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
4194 IPR_REQUEST_SENSE_TIMEOUT
* 2);
4198 * ipr_erp_cancel_all - Send cancel all to a device
4199 * @ipr_cmd: ipr command struct
4201 * This function sends a cancel all to a device to clear the
4202 * queue. If we are running TCQ on the device, QERR is set to 1,
4203 * which means all outstanding ops have been dropped on the floor.
4204 * Cancel all will return them to us.
4209 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
4211 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
4212 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
4213 struct ipr_cmd_pkt
*cmd_pkt
;
4217 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
4219 if (!scsi_get_tag_type(scsi_cmd
->device
)) {
4220 ipr_erp_request_sense(ipr_cmd
);
4224 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
4225 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
4226 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
4228 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
4229 IPR_CANCEL_ALL_TIMEOUT
);
4233 * ipr_dump_ioasa - Dump contents of IOASA
4234 * @ioa_cfg: ioa config struct
4235 * @ipr_cmd: ipr command struct
4236 * @res: resource entry struct
4238 * This function is invoked by the interrupt handler when ops
4239 * fail. It will log the IOASA if appropriate. Only called
4245 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
4246 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
4251 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
4252 __be32
*ioasa_data
= (__be32
*)ioasa
;
4255 ioasc
= be32_to_cpu(ioasa
->ioasc
) & IPR_IOASC_IOASC_MASK
;
4260 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
4263 error_index
= ipr_get_error(ioasc
);
4265 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
4266 /* Don't log an error if the IOA already logged one */
4267 if (ioasa
->ilid
!= 0)
4270 if (ipr_error_table
[error_index
].log_ioasa
== 0)
4274 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
4276 if (sizeof(struct ipr_ioasa
) < be16_to_cpu(ioasa
->ret_stat_len
))
4277 data_len
= sizeof(struct ipr_ioasa
);
4279 data_len
= be16_to_cpu(ioasa
->ret_stat_len
);
4281 ipr_err("IOASA Dump:\n");
4283 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
4284 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
4285 be32_to_cpu(ioasa_data
[i
]),
4286 be32_to_cpu(ioasa_data
[i
+1]),
4287 be32_to_cpu(ioasa_data
[i
+2]),
4288 be32_to_cpu(ioasa_data
[i
+3]));
4293 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4295 * @sense_buf: sense data buffer
4300 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
4303 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
4304 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
4305 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
4306 u32 ioasc
= be32_to_cpu(ioasa
->ioasc
);
4308 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
4310 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
4313 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
4315 if (ipr_is_vset_device(res
) &&
4316 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
4317 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
4318 sense_buf
[0] = 0x72;
4319 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
4320 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
4321 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
4325 sense_buf
[9] = 0x0A;
4326 sense_buf
[10] = 0x80;
4328 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
4330 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
4331 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
4332 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
4333 sense_buf
[15] = failing_lba
& 0x000000ff;
4335 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
4337 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
4338 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
4339 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
4340 sense_buf
[19] = failing_lba
& 0x000000ff;
4342 sense_buf
[0] = 0x70;
4343 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
4344 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
4345 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
4347 /* Illegal request */
4348 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
4349 (be32_to_cpu(ioasa
->ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
4350 sense_buf
[7] = 10; /* additional length */
4352 /* IOARCB was in error */
4353 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
4354 sense_buf
[15] = 0xC0;
4355 else /* Parameter data was invalid */
4356 sense_buf
[15] = 0x80;
4359 ((IPR_FIELD_POINTER_MASK
&
4360 be32_to_cpu(ioasa
->ioasc_specific
)) >> 8) & 0xff;
4362 (IPR_FIELD_POINTER_MASK
&
4363 be32_to_cpu(ioasa
->ioasc_specific
)) & 0xff;
4365 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
4366 if (ipr_is_vset_device(res
))
4367 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
4369 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
4371 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
4372 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
4373 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
4374 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
4375 sense_buf
[6] = failing_lba
& 0x000000ff;
4378 sense_buf
[7] = 6; /* additional length */
4384 * ipr_get_autosense - Copy autosense data to sense buffer
4385 * @ipr_cmd: ipr command struct
4387 * This function copies the autosense buffer to the buffer
4388 * in the scsi_cmd, if there is autosense available.
4391 * 1 if autosense was available / 0 if not
4393 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
4395 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
4397 if ((be32_to_cpu(ioasa
->ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
4400 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
4401 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
4402 SCSI_SENSE_BUFFERSIZE
));
4407 * ipr_erp_start - Process an error response for a SCSI op
4408 * @ioa_cfg: ioa config struct
4409 * @ipr_cmd: ipr command struct
4411 * This function determines whether or not to initiate ERP
4412 * on the affected device.
4417 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
4418 struct ipr_cmnd
*ipr_cmd
)
4420 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
4421 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
4422 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4425 ipr_scsi_eh_done(ipr_cmd
);
4429 if (ipr_is_gscsi(res
))
4430 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
4432 ipr_gen_sense(ipr_cmd
);
4434 switch (ioasc
& IPR_IOASC_IOASC_MASK
) {
4435 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
4436 if (ipr_is_naca_model(res
))
4437 scsi_cmd
->result
|= (DID_ABORT
<< 16);
4439 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
4441 case IPR_IOASC_IR_RESOURCE_HANDLE
:
4442 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
4443 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
4445 case IPR_IOASC_HW_SEL_TIMEOUT
:
4446 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
4447 if (!ipr_is_naca_model(res
))
4448 res
->needs_sync_complete
= 1;
4450 case IPR_IOASC_SYNC_REQUIRED
:
4452 res
->needs_sync_complete
= 1;
4453 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
4455 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
4456 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
4457 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
4459 case IPR_IOASC_BUS_WAS_RESET
:
4460 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
4462 * Report the bus reset and ask for a retry. The device
4463 * will give CC/UA the next command.
4465 if (!res
->resetting_device
)
4466 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
4467 scsi_cmd
->result
|= (DID_ERROR
<< 16);
4468 if (!ipr_is_naca_model(res
))
4469 res
->needs_sync_complete
= 1;
4471 case IPR_IOASC_HW_DEV_BUS_STATUS
:
4472 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
4473 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
4474 if (!ipr_get_autosense(ipr_cmd
)) {
4475 if (!ipr_is_naca_model(res
)) {
4476 ipr_erp_cancel_all(ipr_cmd
);
4481 if (!ipr_is_naca_model(res
))
4482 res
->needs_sync_complete
= 1;
4484 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
4487 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
4488 scsi_cmd
->result
|= (DID_ERROR
<< 16);
4489 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
4490 res
->needs_sync_complete
= 1;
4494 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
4495 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4496 scsi_cmd
->scsi_done(scsi_cmd
);
4500 * ipr_scsi_done - mid-layer done function
4501 * @ipr_cmd: ipr command struct
4503 * This function is invoked by the interrupt handler for
4504 * ops generated by the SCSI mid-layer
4509 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
4511 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4512 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
4513 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4515 scsi_cmd
->resid
= be32_to_cpu(ipr_cmd
->ioasa
.residual_data_len
);
4517 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
4518 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
4519 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4520 scsi_cmd
->scsi_done(scsi_cmd
);
4522 ipr_erp_start(ioa_cfg
, ipr_cmd
);
4526 * ipr_queuecommand - Queue a mid-layer request
4527 * @scsi_cmd: scsi command struct
4528 * @done: done function
4530 * This function queues a request generated by the mid-layer.
4534 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4535 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4537 static int ipr_queuecommand(struct scsi_cmnd
*scsi_cmd
,
4538 void (*done
) (struct scsi_cmnd
*))
4540 struct ipr_ioa_cfg
*ioa_cfg
;
4541 struct ipr_resource_entry
*res
;
4542 struct ipr_ioarcb
*ioarcb
;
4543 struct ipr_cmnd
*ipr_cmd
;
4546 scsi_cmd
->scsi_done
= done
;
4547 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
4548 res
= scsi_cmd
->device
->hostdata
;
4549 scsi_cmd
->result
= (DID_OK
<< 16);
4552 * We are currently blocking all devices due to a host reset
4553 * We have told the host to stop giving us new requests, but
4554 * ERP ops don't count. FIXME
4556 if (unlikely(!ioa_cfg
->allow_cmds
&& !ioa_cfg
->ioa_is_dead
))
4557 return SCSI_MLQUEUE_HOST_BUSY
;
4560 * FIXME - Create scsi_set_host_offline interface
4561 * and the ioa_is_dead check can be removed
4563 if (unlikely(ioa_cfg
->ioa_is_dead
|| !res
)) {
4564 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
4565 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
4566 scsi_cmd
->scsi_done(scsi_cmd
);
4570 if (ipr_is_gata(res
) && res
->sata_port
)
4571 return ata_sas_queuecmd(scsi_cmd
, done
, res
->sata_port
->ap
);
4573 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4574 ioarcb
= &ipr_cmd
->ioarcb
;
4575 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
4577 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
4578 ipr_cmd
->scsi_cmd
= scsi_cmd
;
4579 ioarcb
->res_handle
= res
->cfgte
.res_handle
;
4580 ipr_cmd
->done
= ipr_scsi_done
;
4581 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_PHYS_LOC(res
->cfgte
.res_addr
));
4583 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
4584 if (scsi_cmd
->underflow
== 0)
4585 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
4587 if (res
->needs_sync_complete
) {
4588 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
4589 res
->needs_sync_complete
= 0;
4592 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
4593 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
4594 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
4595 ioarcb
->cmd_pkt
.flags_lo
|= ipr_get_task_attributes(scsi_cmd
);
4598 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
4599 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
))
4600 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
4602 if (likely(rc
== 0))
4603 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
4605 if (likely(rc
== 0)) {
4607 writel(be32_to_cpu(ipr_cmd
->ioarcb
.ioarcb_host_pci_addr
),
4608 ioa_cfg
->regs
.ioarrin_reg
);
4610 list_move_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4611 return SCSI_MLQUEUE_HOST_BUSY
;
4618 * ipr_ioctl - IOCTL handler
4619 * @sdev: scsi device struct
4624 * 0 on success / other on failure
4626 int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
4628 struct ipr_resource_entry
*res
;
4630 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4631 if (res
&& ipr_is_gata(res
))
4632 return ata_scsi_ioctl(sdev
, cmd
, arg
);
4638 * ipr_info - Get information about the card/driver
4639 * @scsi_host: scsi host struct
4642 * pointer to buffer with description string
4644 static const char * ipr_ioa_info(struct Scsi_Host
*host
)
4646 static char buffer
[512];
4647 struct ipr_ioa_cfg
*ioa_cfg
;
4648 unsigned long lock_flags
= 0;
4650 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
4652 spin_lock_irqsave(host
->host_lock
, lock_flags
);
4653 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
4654 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
4659 static struct scsi_host_template driver_template
= {
4660 .module
= THIS_MODULE
,
4662 .info
= ipr_ioa_info
,
4664 .queuecommand
= ipr_queuecommand
,
4665 .eh_abort_handler
= ipr_eh_abort
,
4666 .eh_device_reset_handler
= ipr_eh_dev_reset
,
4667 .eh_host_reset_handler
= ipr_eh_host_reset
,
4668 .slave_alloc
= ipr_slave_alloc
,
4669 .slave_configure
= ipr_slave_configure
,
4670 .slave_destroy
= ipr_slave_destroy
,
4671 .target_alloc
= ipr_target_alloc
,
4672 .target_destroy
= ipr_target_destroy
,
4673 .change_queue_depth
= ipr_change_queue_depth
,
4674 .change_queue_type
= ipr_change_queue_type
,
4675 .bios_param
= ipr_biosparam
,
4676 .can_queue
= IPR_MAX_COMMANDS
,
4678 .sg_tablesize
= IPR_MAX_SGLIST
,
4679 .max_sectors
= IPR_IOA_MAX_SECTORS
,
4680 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
4681 .use_clustering
= ENABLE_CLUSTERING
,
4682 .shost_attrs
= ipr_ioa_attrs
,
4683 .sdev_attrs
= ipr_dev_attrs
,
4684 .proc_name
= IPR_NAME
4688 * ipr_ata_phy_reset - libata phy_reset handler
4689 * @ap: ata port to reset
4692 static void ipr_ata_phy_reset(struct ata_port
*ap
)
4694 unsigned long flags
;
4695 struct ipr_sata_port
*sata_port
= ap
->private_data
;
4696 struct ipr_resource_entry
*res
= sata_port
->res
;
4697 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
4701 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
4702 while(ioa_cfg
->in_reset_reload
) {
4703 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
4704 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4705 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
4708 if (!ioa_cfg
->allow_cmds
)
4711 rc
= ipr_device_reset(ioa_cfg
, res
);
4714 ap
->ops
->port_disable(ap
);
4718 switch(res
->cfgte
.proto
) {
4719 case IPR_PROTO_SATA
:
4720 case IPR_PROTO_SAS_STP
:
4721 ap
->device
[0].class = ATA_DEV_ATA
;
4723 case IPR_PROTO_SATA_ATAPI
:
4724 case IPR_PROTO_SAS_STP_ATAPI
:
4725 ap
->device
[0].class = ATA_DEV_ATAPI
;
4728 ap
->device
[0].class = ATA_DEV_UNKNOWN
;
4729 ap
->ops
->port_disable(ap
);
4734 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
4739 * ipr_ata_post_internal - Cleanup after an internal command
4740 * @qc: ATA queued command
4745 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
4747 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
4748 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
4749 struct ipr_cmnd
*ipr_cmd
;
4750 unsigned long flags
;
4752 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
4753 while(ioa_cfg
->in_reset_reload
) {
4754 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
4755 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4756 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
4759 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
4760 if (ipr_cmd
->qc
== qc
) {
4761 ipr_device_reset(ioa_cfg
, sata_port
->res
);
4765 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
4769 * ipr_tf_read - Read the current ATA taskfile for the ATA port
4771 * @tf: destination ATA taskfile
4776 static void ipr_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
4778 struct ipr_sata_port
*sata_port
= ap
->private_data
;
4779 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
4781 tf
->feature
= g
->error
;
4782 tf
->nsect
= g
->nsect
;
4786 tf
->device
= g
->device
;
4787 tf
->command
= g
->status
;
4788 tf
->hob_nsect
= g
->hob_nsect
;
4789 tf
->hob_lbal
= g
->hob_lbal
;
4790 tf
->hob_lbam
= g
->hob_lbam
;
4791 tf
->hob_lbah
= g
->hob_lbah
;
4792 tf
->ctl
= g
->alt_status
;
4796 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
4797 * @regs: destination
4798 * @tf: source ATA taskfile
4803 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
4804 struct ata_taskfile
*tf
)
4806 regs
->feature
= tf
->feature
;
4807 regs
->nsect
= tf
->nsect
;
4808 regs
->lbal
= tf
->lbal
;
4809 regs
->lbam
= tf
->lbam
;
4810 regs
->lbah
= tf
->lbah
;
4811 regs
->device
= tf
->device
;
4812 regs
->command
= tf
->command
;
4813 regs
->hob_feature
= tf
->hob_feature
;
4814 regs
->hob_nsect
= tf
->hob_nsect
;
4815 regs
->hob_lbal
= tf
->hob_lbal
;
4816 regs
->hob_lbam
= tf
->hob_lbam
;
4817 regs
->hob_lbah
= tf
->hob_lbah
;
4818 regs
->ctl
= tf
->ctl
;
4822 * ipr_sata_done - done function for SATA commands
4823 * @ipr_cmd: ipr command struct
4825 * This function is invoked by the interrupt handler for
4826 * ops generated by the SCSI mid-layer to SATA devices
4831 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
4833 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4834 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
4835 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
4836 struct ipr_resource_entry
*res
= sata_port
->res
;
4837 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4839 memcpy(&sata_port
->ioasa
, &ipr_cmd
->ioasa
.u
.gata
,
4840 sizeof(struct ipr_ioasa_gata
));
4841 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
4843 if (be32_to_cpu(ipr_cmd
->ioasa
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
4844 scsi_report_device_reset(ioa_cfg
->host
, res
->cfgte
.res_addr
.bus
,
4845 res
->cfgte
.res_addr
.target
);
4847 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
4848 qc
->err_mask
|= __ac_err_mask(ipr_cmd
->ioasa
.u
.gata
.status
);
4850 qc
->err_mask
|= ac_err_mask(ipr_cmd
->ioasa
.u
.gata
.status
);
4851 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4852 ata_qc_complete(qc
);
4856 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
4857 * @ipr_cmd: ipr command struct
4858 * @qc: ATA queued command
4861 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
4862 struct ata_queued_cmd
*qc
)
4864 u32 ioadl_flags
= 0;
4865 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
4866 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
4867 int len
= qc
->nbytes
+ qc
->pad_len
;
4868 struct scatterlist
*sg
;
4873 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
4874 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
4875 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
4876 ioarcb
->write_data_transfer_length
= cpu_to_be32(len
);
4877 ioarcb
->write_ioadl_len
=
4878 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
4879 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
4880 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
4881 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
4882 ioarcb
->read_ioadl_len
=
4883 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
4886 ata_for_each_sg(sg
, qc
) {
4887 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
4888 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
4889 if (ata_sg_is_last(sg
, qc
))
4890 ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
4897 * ipr_qc_issue - Issue a SATA qc to a device
4898 * @qc: queued command
4903 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
4905 struct ata_port
*ap
= qc
->ap
;
4906 struct ipr_sata_port
*sata_port
= ap
->private_data
;
4907 struct ipr_resource_entry
*res
= sata_port
->res
;
4908 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
4909 struct ipr_cmnd
*ipr_cmd
;
4910 struct ipr_ioarcb
*ioarcb
;
4911 struct ipr_ioarcb_ata_regs
*regs
;
4913 if (unlikely(!ioa_cfg
->allow_cmds
|| ioa_cfg
->ioa_is_dead
))
4916 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4917 ioarcb
= &ipr_cmd
->ioarcb
;
4918 regs
= &ioarcb
->add_data
.u
.regs
;
4920 memset(&ioarcb
->add_data
, 0, sizeof(ioarcb
->add_data
));
4921 ioarcb
->add_cmd_parms_len
= cpu_to_be32(sizeof(ioarcb
->add_data
.u
.regs
));
4923 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
4925 ipr_cmd
->done
= ipr_sata_done
;
4926 ipr_cmd
->ioarcb
.res_handle
= res
->cfgte
.res_handle
;
4927 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
4928 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
4929 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
4930 ipr_cmd
->dma_use_sg
= qc
->pad_len
? qc
->n_elem
+ 1 : qc
->n_elem
;
4932 ipr_build_ata_ioadl(ipr_cmd
, qc
);
4933 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
4934 ipr_copy_sata_tf(regs
, &qc
->tf
);
4935 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
4936 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_PHYS_LOC(res
->cfgte
.res_addr
));
4938 switch (qc
->tf
.protocol
) {
4939 case ATA_PROT_NODATA
:
4944 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
4947 case ATA_PROT_ATAPI
:
4948 case ATA_PROT_ATAPI_NODATA
:
4949 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
4952 case ATA_PROT_ATAPI_DMA
:
4953 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
4954 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
4963 writel(be32_to_cpu(ioarcb
->ioarcb_host_pci_addr
),
4964 ioa_cfg
->regs
.ioarrin_reg
);
4969 * ipr_ata_check_status - Return last ATA status
4975 static u8
ipr_ata_check_status(struct ata_port
*ap
)
4977 struct ipr_sata_port
*sata_port
= ap
->private_data
;
4978 return sata_port
->ioasa
.status
;
4982 * ipr_ata_check_altstatus - Return last ATA altstatus
4988 static u8
ipr_ata_check_altstatus(struct ata_port
*ap
)
4990 struct ipr_sata_port
*sata_port
= ap
->private_data
;
4991 return sata_port
->ioasa
.alt_status
;
4994 static struct ata_port_operations ipr_sata_ops
= {
4995 .port_disable
= ata_port_disable
,
4996 .check_status
= ipr_ata_check_status
,
4997 .check_altstatus
= ipr_ata_check_altstatus
,
4998 .dev_select
= ata_noop_dev_select
,
4999 .phy_reset
= ipr_ata_phy_reset
,
5000 .post_internal_cmd
= ipr_ata_post_internal
,
5001 .tf_read
= ipr_tf_read
,
5002 .qc_prep
= ata_noop_qc_prep
,
5003 .qc_issue
= ipr_qc_issue
,
5004 .port_start
= ata_sas_port_start
,
5005 .port_stop
= ata_sas_port_stop
5008 static struct ata_port_info sata_port_info
= {
5009 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
| ATA_FLAG_SATA_RESET
|
5010 ATA_FLAG_MMIO
| ATA_FLAG_PIO_DMA
,
5011 .pio_mask
= 0x10, /* pio4 */
5013 .udma_mask
= 0x7f, /* udma0-6 */
5014 .port_ops
= &ipr_sata_ops
5017 #ifdef CONFIG_PPC_PSERIES
5018 static const u16 ipr_blocked_processors
[] = {
5030 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5031 * @ioa_cfg: ioa cfg struct
5033 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5034 * certain pSeries hardware. This function determines if the given
5035 * adapter is in one of these confgurations or not.
5038 * 1 if adapter is not supported / 0 if adapter is supported
5040 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
5045 if (ioa_cfg
->type
== 0x5702) {
5046 if (pci_read_config_byte(ioa_cfg
->pdev
, PCI_REVISION_ID
,
5047 &rev_id
) == PCIBIOS_SUCCESSFUL
) {
5049 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++){
5050 if (__is_processor(ipr_blocked_processors
[i
]))
5059 #define ipr_invalid_adapter(ioa_cfg) 0
5063 * ipr_ioa_bringdown_done - IOA bring down completion.
5064 * @ipr_cmd: ipr command struct
5066 * This function processes the completion of an adapter bring down.
5067 * It wakes any reset sleepers.
5072 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
5074 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5077 ioa_cfg
->in_reset_reload
= 0;
5078 ioa_cfg
->reset_retries
= 0;
5079 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5080 wake_up_all(&ioa_cfg
->reset_wait_q
);
5082 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
5083 scsi_unblock_requests(ioa_cfg
->host
);
5084 spin_lock_irq(ioa_cfg
->host
->host_lock
);
5087 return IPR_RC_JOB_RETURN
;
5091 * ipr_ioa_reset_done - IOA reset completion.
5092 * @ipr_cmd: ipr command struct
5094 * This function processes the completion of an adapter reset.
5095 * It schedules any necessary mid-layer add/removes and
5096 * wakes any reset sleepers.
5101 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
5103 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5104 struct ipr_resource_entry
*res
;
5105 struct ipr_hostrcb
*hostrcb
, *temp
;
5109 ioa_cfg
->in_reset_reload
= 0;
5110 ioa_cfg
->allow_cmds
= 1;
5111 ioa_cfg
->reset_cmd
= NULL
;
5112 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
5114 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5115 if (ioa_cfg
->allow_ml_add_del
&& (res
->add_to_ml
|| res
->del_from_ml
)) {
5120 schedule_work(&ioa_cfg
->work_q
);
5122 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
5123 list_del(&hostrcb
->queue
);
5124 if (i
++ < IPR_NUM_LOG_HCAMS
)
5125 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
5127 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
5130 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
5132 ioa_cfg
->reset_retries
= 0;
5133 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5134 wake_up_all(&ioa_cfg
->reset_wait_q
);
5136 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
5137 scsi_unblock_requests(ioa_cfg
->host
);
5138 spin_lock_irq(ioa_cfg
->host
->host_lock
);
5140 if (!ioa_cfg
->allow_cmds
)
5141 scsi_block_requests(ioa_cfg
->host
);
5144 return IPR_RC_JOB_RETURN
;
5148 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5149 * @supported_dev: supported device struct
5150 * @vpids: vendor product id struct
5155 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
5156 struct ipr_std_inq_vpids
*vpids
)
5158 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
5159 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
5160 supported_dev
->num_records
= 1;
5161 supported_dev
->data_length
=
5162 cpu_to_be16(sizeof(struct ipr_supported_device
));
5163 supported_dev
->reserved
= 0;
5167 * ipr_set_supported_devs - Send Set Supported Devices for a device
5168 * @ipr_cmd: ipr command struct
5170 * This function send a Set Supported Devices to the adapter
5173 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5175 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
5177 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5178 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
5179 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
5180 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5181 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
5183 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
5185 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
5186 if (!ipr_is_scsi_disk(res
))
5189 ipr_cmd
->u
.res
= res
;
5190 ipr_set_sup_dev_dflt(supp_dev
, &res
->cfgte
.std_inq_data
.vpids
);
5192 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5193 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5194 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5196 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
5197 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
5198 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
5200 ioadl
->flags_and_data_len
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST
|
5201 sizeof(struct ipr_supported_device
));
5202 ioadl
->address
= cpu_to_be32(ioa_cfg
->vpd_cbs_dma
+
5203 offsetof(struct ipr_misc_cbs
, supp_dev
));
5204 ioarcb
->write_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
5205 ioarcb
->write_data_transfer_length
=
5206 cpu_to_be32(sizeof(struct ipr_supported_device
));
5208 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
5209 IPR_SET_SUP_DEVICE_TIMEOUT
);
5211 ipr_cmd
->job_step
= ipr_set_supported_devs
;
5212 return IPR_RC_JOB_RETURN
;
5215 return IPR_RC_JOB_CONTINUE
;
5219 * ipr_setup_write_cache - Disable write cache if needed
5220 * @ipr_cmd: ipr command struct
5222 * This function sets up adapters write cache to desired setting
5225 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5227 static int ipr_setup_write_cache(struct ipr_cmnd
*ipr_cmd
)
5229 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5231 ipr_cmd
->job_step
= ipr_set_supported_devs
;
5232 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
5233 struct ipr_resource_entry
, queue
);
5235 if (ioa_cfg
->cache_state
!= CACHE_DISABLED
)
5236 return IPR_RC_JOB_CONTINUE
;
5238 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5239 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5240 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
5241 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
5243 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5245 return IPR_RC_JOB_RETURN
;
5249 * ipr_get_mode_page - Locate specified mode page
5250 * @mode_pages: mode page buffer
5251 * @page_code: page code to find
5252 * @len: minimum required length for mode page
5255 * pointer to mode page / NULL on failure
5257 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
5258 u32 page_code
, u32 len
)
5260 struct ipr_mode_page_hdr
*mode_hdr
;
5264 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
5267 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
5268 mode_hdr
= (struct ipr_mode_page_hdr
*)
5269 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
5272 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
5273 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
5277 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
5278 mode_hdr
->page_length
);
5279 length
-= page_length
;
5280 mode_hdr
= (struct ipr_mode_page_hdr
*)
5281 ((unsigned long)mode_hdr
+ page_length
);
5288 * ipr_check_term_power - Check for term power errors
5289 * @ioa_cfg: ioa config struct
5290 * @mode_pages: IOAFP mode pages buffer
5292 * Check the IOAFP's mode page 28 for term power errors
5297 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
5298 struct ipr_mode_pages
*mode_pages
)
5302 struct ipr_dev_bus_entry
*bus
;
5303 struct ipr_mode_page28
*mode_page
;
5305 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
5306 sizeof(struct ipr_mode_page28
));
5308 entry_length
= mode_page
->entry_length
;
5310 bus
= mode_page
->bus
;
5312 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
5313 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
5314 dev_err(&ioa_cfg
->pdev
->dev
,
5315 "Term power is absent on scsi bus %d\n",
5319 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
5324 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5325 * @ioa_cfg: ioa config struct
5327 * Looks through the config table checking for SES devices. If
5328 * the SES device is in the SES table indicating a maximum SCSI
5329 * bus speed, the speed is limited for the bus.
5334 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
5339 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
5340 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
5341 ioa_cfg
->bus_attr
[i
].bus_width
);
5343 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
5344 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
5349 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5350 * @ioa_cfg: ioa config struct
5351 * @mode_pages: mode page 28 buffer
5353 * Updates mode page 28 based on driver configuration
5358 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
5359 struct ipr_mode_pages
*mode_pages
)
5361 int i
, entry_length
;
5362 struct ipr_dev_bus_entry
*bus
;
5363 struct ipr_bus_attributes
*bus_attr
;
5364 struct ipr_mode_page28
*mode_page
;
5366 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
5367 sizeof(struct ipr_mode_page28
));
5369 entry_length
= mode_page
->entry_length
;
5371 /* Loop for each device bus entry */
5372 for (i
= 0, bus
= mode_page
->bus
;
5373 i
< mode_page
->num_entries
;
5374 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
5375 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
5376 dev_err(&ioa_cfg
->pdev
->dev
,
5377 "Invalid resource address reported: 0x%08X\n",
5378 IPR_GET_PHYS_LOC(bus
->res_addr
));
5382 bus_attr
= &ioa_cfg
->bus_attr
[i
];
5383 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
5384 bus
->bus_width
= bus_attr
->bus_width
;
5385 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
5386 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
5387 if (bus_attr
->qas_enabled
)
5388 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
5390 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
5395 * ipr_build_mode_select - Build a mode select command
5396 * @ipr_cmd: ipr command struct
5397 * @res_handle: resource handle to send command to
5398 * @parm: Byte 2 of Mode Sense command
5399 * @dma_addr: DMA buffer address
5400 * @xfer_len: data transfer length
5405 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
5406 __be32 res_handle
, u8 parm
, u32 dma_addr
,
5409 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
5410 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5412 ioarcb
->res_handle
= res_handle
;
5413 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
5414 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5415 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
5416 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
5417 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
5419 ioadl
->flags_and_data_len
=
5420 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST
| xfer_len
);
5421 ioadl
->address
= cpu_to_be32(dma_addr
);
5422 ioarcb
->write_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
5423 ioarcb
->write_data_transfer_length
= cpu_to_be32(xfer_len
);
5427 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5428 * @ipr_cmd: ipr command struct
5430 * This function sets up the SCSI bus attributes and sends
5431 * a Mode Select for Page 28 to activate them.
5436 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
5438 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5439 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
5443 ipr_scsi_bus_speed_limit(ioa_cfg
);
5444 ipr_check_term_power(ioa_cfg
, mode_pages
);
5445 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
5446 length
= mode_pages
->hdr
.length
+ 1;
5447 mode_pages
->hdr
.length
= 0;
5449 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
5450 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
5453 ipr_cmd
->job_step
= ipr_setup_write_cache
;
5454 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5457 return IPR_RC_JOB_RETURN
;
5461 * ipr_build_mode_sense - Builds a mode sense command
5462 * @ipr_cmd: ipr command struct
5463 * @res: resource entry struct
5464 * @parm: Byte 2 of mode sense command
5465 * @dma_addr: DMA address of mode sense buffer
5466 * @xfer_len: Size of DMA buffer
5471 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
5473 u8 parm
, u32 dma_addr
, u8 xfer_len
)
5475 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
5476 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5478 ioarcb
->res_handle
= res_handle
;
5479 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
5480 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
5481 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
5482 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
5484 ioadl
->flags_and_data_len
=
5485 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| xfer_len
);
5486 ioadl
->address
= cpu_to_be32(dma_addr
);
5487 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
5488 ioarcb
->read_data_transfer_length
= cpu_to_be32(xfer_len
);
5492 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5493 * @ipr_cmd: ipr command struct
5495 * This function handles the failure of an IOA bringup command.
5500 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
5502 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5503 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
5505 dev_err(&ioa_cfg
->pdev
->dev
,
5506 "0x%02X failed with IOASC: 0x%08X\n",
5507 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
5509 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5510 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5511 return IPR_RC_JOB_RETURN
;
5515 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5516 * @ipr_cmd: ipr command struct
5518 * This function handles the failure of a Mode Sense to the IOAFP.
5519 * Some adapters do not handle all mode pages.
5522 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5524 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
5526 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
5528 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
5529 ipr_cmd
->job_step
= ipr_setup_write_cache
;
5530 return IPR_RC_JOB_CONTINUE
;
5533 return ipr_reset_cmd_failed(ipr_cmd
);
5537 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5538 * @ipr_cmd: ipr command struct
5540 * This function send a Page 28 mode sense to the IOA to
5541 * retrieve SCSI bus attributes.
5546 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
5548 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5551 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
5552 0x28, ioa_cfg
->vpd_cbs_dma
+
5553 offsetof(struct ipr_misc_cbs
, mode_pages
),
5554 sizeof(struct ipr_mode_pages
));
5556 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
5557 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
5559 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5562 return IPR_RC_JOB_RETURN
;
5566 * ipr_init_res_table - Initialize the resource table
5567 * @ipr_cmd: ipr command struct
5569 * This function looks through the existing resource table, comparing
5570 * it with the config table. This function will take care of old/new
5571 * devices and schedule adding/removing them from the mid-layer
5575 * IPR_RC_JOB_CONTINUE
5577 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
5579 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5580 struct ipr_resource_entry
*res
, *temp
;
5581 struct ipr_config_table_entry
*cfgte
;
5586 if (ioa_cfg
->cfg_table
->hdr
.flags
& IPR_UCODE_DOWNLOAD_REQ
)
5587 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
5589 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
5590 list_move_tail(&res
->queue
, &old_res
);
5592 for (i
= 0; i
< ioa_cfg
->cfg_table
->hdr
.num_entries
; i
++) {
5593 cfgte
= &ioa_cfg
->cfg_table
->dev
[i
];
5596 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
5597 if (!memcmp(&res
->cfgte
.res_addr
,
5598 &cfgte
->res_addr
, sizeof(cfgte
->res_addr
))) {
5599 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
5606 if (list_empty(&ioa_cfg
->free_res_q
)) {
5607 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
5612 res
= list_entry(ioa_cfg
->free_res_q
.next
,
5613 struct ipr_resource_entry
, queue
);
5614 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
5615 ipr_init_res_entry(res
);
5620 memcpy(&res
->cfgte
, cfgte
, sizeof(struct ipr_config_table_entry
));
5623 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
5625 res
->del_from_ml
= 1;
5626 res
->cfgte
.res_handle
= IPR_INVALID_RES_HANDLE
;
5627 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
5629 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
5633 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
5636 return IPR_RC_JOB_CONTINUE
;
5640 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5641 * @ipr_cmd: ipr command struct
5643 * This function sends a Query IOA Configuration command
5644 * to the adapter to retrieve the IOA configuration table.
5649 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
5651 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5652 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5653 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
5654 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
5657 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
5658 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
5659 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
5660 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5661 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5663 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
5664 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_config_table
) >> 8) & 0xff;
5665 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_config_table
) & 0xff;
5667 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
5668 ioarcb
->read_data_transfer_length
=
5669 cpu_to_be32(sizeof(struct ipr_config_table
));
5671 ioadl
->address
= cpu_to_be32(ioa_cfg
->cfg_table_dma
);
5672 ioadl
->flags_and_data_len
=
5673 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| sizeof(struct ipr_config_table
));
5675 ipr_cmd
->job_step
= ipr_init_res_table
;
5677 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5680 return IPR_RC_JOB_RETURN
;
5684 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5685 * @ipr_cmd: ipr command struct
5687 * This utility function sends an inquiry to the adapter.
5692 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
5693 u32 dma_addr
, u8 xfer_len
)
5695 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5696 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
5699 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
5700 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5702 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
5703 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
5704 ioarcb
->cmd_pkt
.cdb
[2] = page
;
5705 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
5707 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
5708 ioarcb
->read_data_transfer_length
= cpu_to_be32(xfer_len
);
5710 ioadl
->address
= cpu_to_be32(dma_addr
);
5711 ioadl
->flags_and_data_len
=
5712 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| xfer_len
);
5714 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5719 * ipr_inquiry_page_supported - Is the given inquiry page supported
5720 * @page0: inquiry page 0 buffer
5723 * This function determines if the specified inquiry page is supported.
5726 * 1 if page is supported / 0 if not
5728 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
5732 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
5733 if (page0
->page
[i
] == page
)
5740 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5741 * @ipr_cmd: ipr command struct
5743 * This function sends a Page 3 inquiry to the adapter
5744 * to retrieve software VPD information.
5747 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5749 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
5751 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5752 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
5756 if (!ipr_inquiry_page_supported(page0
, 1))
5757 ioa_cfg
->cache_state
= CACHE_NONE
;
5759 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
5761 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
5762 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
5763 sizeof(struct ipr_inquiry_page3
));
5766 return IPR_RC_JOB_RETURN
;
5770 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5771 * @ipr_cmd: ipr command struct
5773 * This function sends a Page 0 inquiry to the adapter
5774 * to retrieve supported inquiry pages.
5777 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5779 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
5781 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5786 /* Grab the type out of the VPD and store it away */
5787 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
5789 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
5791 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
5793 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
5794 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
5795 sizeof(struct ipr_inquiry_page0
));
5798 return IPR_RC_JOB_RETURN
;
5802 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5803 * @ipr_cmd: ipr command struct
5805 * This function sends a standard inquiry to the adapter.
5810 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
5812 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5815 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
5817 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
5818 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
5819 sizeof(struct ipr_ioa_vpd
));
5822 return IPR_RC_JOB_RETURN
;
5826 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5827 * @ipr_cmd: ipr command struct
5829 * This function send an Identify Host Request Response Queue
5830 * command to establish the HRRQ with the adapter.
5835 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd
*ipr_cmd
)
5837 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5838 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5841 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
5843 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
5844 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5846 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5847 ioarcb
->cmd_pkt
.cdb
[2] =
5848 ((u32
) ioa_cfg
->host_rrq_dma
>> 24) & 0xff;
5849 ioarcb
->cmd_pkt
.cdb
[3] =
5850 ((u32
) ioa_cfg
->host_rrq_dma
>> 16) & 0xff;
5851 ioarcb
->cmd_pkt
.cdb
[4] =
5852 ((u32
) ioa_cfg
->host_rrq_dma
>> 8) & 0xff;
5853 ioarcb
->cmd_pkt
.cdb
[5] =
5854 ((u32
) ioa_cfg
->host_rrq_dma
) & 0xff;
5855 ioarcb
->cmd_pkt
.cdb
[7] =
5856 ((sizeof(u32
) * IPR_NUM_CMD_BLKS
) >> 8) & 0xff;
5857 ioarcb
->cmd_pkt
.cdb
[8] =
5858 (sizeof(u32
) * IPR_NUM_CMD_BLKS
) & 0xff;
5860 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
5862 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5865 return IPR_RC_JOB_RETURN
;
5869 * ipr_reset_timer_done - Adapter reset timer function
5870 * @ipr_cmd: ipr command struct
5872 * Description: This function is used in adapter reset processing
5873 * for timing events. If the reset_cmd pointer in the IOA
5874 * config struct is not this adapter's we are doing nested
5875 * resets and fail_all_ops will take care of freeing the
5881 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
5883 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5884 unsigned long lock_flags
= 0;
5886 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5888 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
5889 list_del(&ipr_cmd
->queue
);
5890 ipr_cmd
->done(ipr_cmd
);
5893 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5897 * ipr_reset_start_timer - Start a timer for adapter reset job
5898 * @ipr_cmd: ipr command struct
5899 * @timeout: timeout value
5901 * Description: This function is used in adapter reset processing
5902 * for timing events. If the reset_cmd pointer in the IOA
5903 * config struct is not this adapter's we are doing nested
5904 * resets and fail_all_ops will take care of freeing the
5910 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
5911 unsigned long timeout
)
5913 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->ioa_cfg
->pending_q
);
5914 ipr_cmd
->done
= ipr_reset_ioa_job
;
5916 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
5917 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
5918 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
5919 add_timer(&ipr_cmd
->timer
);
5923 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5924 * @ioa_cfg: ioa cfg struct
5929 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
5931 memset(ioa_cfg
->host_rrq
, 0, sizeof(u32
) * IPR_NUM_CMD_BLKS
);
5933 /* Initialize Host RRQ pointers */
5934 ioa_cfg
->hrrq_start
= ioa_cfg
->host_rrq
;
5935 ioa_cfg
->hrrq_end
= &ioa_cfg
->host_rrq
[IPR_NUM_CMD_BLKS
- 1];
5936 ioa_cfg
->hrrq_curr
= ioa_cfg
->hrrq_start
;
5937 ioa_cfg
->toggle_bit
= 1;
5939 /* Zero out config table */
5940 memset(ioa_cfg
->cfg_table
, 0, sizeof(struct ipr_config_table
));
5944 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5945 * @ipr_cmd: ipr command struct
5947 * This function reinitializes some control blocks and
5948 * enables destructive diagnostics on the adapter.
5953 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
5955 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5956 volatile u32 int_reg
;
5959 ipr_cmd
->job_step
= ipr_ioafp_indentify_hrrq
;
5960 ipr_init_ioa_mem(ioa_cfg
);
5962 ioa_cfg
->allow_interrupts
= 1;
5963 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5965 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5966 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
5967 ioa_cfg
->regs
.clr_interrupt_mask_reg
);
5968 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5969 return IPR_RC_JOB_CONTINUE
;
5972 /* Enable destructive diagnostics on IOA */
5973 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg
);
5975 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
5976 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5978 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
5980 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
5981 ipr_cmd
->timer
.expires
= jiffies
+ (ipr_transop_timeout
* HZ
);
5982 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
5983 ipr_cmd
->done
= ipr_reset_ioa_job
;
5984 add_timer(&ipr_cmd
->timer
);
5985 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
5988 return IPR_RC_JOB_RETURN
;
5992 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5993 * @ipr_cmd: ipr command struct
5995 * This function is invoked when an adapter dump has run out
5996 * of processing time.
5999 * IPR_RC_JOB_CONTINUE
6001 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
6003 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6005 if (ioa_cfg
->sdt_state
== GET_DUMP
)
6006 ioa_cfg
->sdt_state
= ABORT_DUMP
;
6008 ipr_cmd
->job_step
= ipr_reset_alert
;
6010 return IPR_RC_JOB_CONTINUE
;
6014 * ipr_unit_check_no_data - Log a unit check/no data error log
6015 * @ioa_cfg: ioa config struct
6017 * Logs an error indicating the adapter unit checked, but for some
6018 * reason, we were unable to fetch the unit check buffer.
6023 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
6025 ioa_cfg
->errors_logged
++;
6026 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
6030 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6031 * @ioa_cfg: ioa config struct
6033 * Fetches the unit check buffer from the adapter by clocking the data
6034 * through the mailbox register.
6039 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
6041 unsigned long mailbox
;
6042 struct ipr_hostrcb
*hostrcb
;
6043 struct ipr_uc_sdt sdt
;
6046 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
6048 if (!ipr_sdt_is_fmt2(mailbox
)) {
6049 ipr_unit_check_no_data(ioa_cfg
);
6053 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
6054 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
6055 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
6057 if (rc
|| (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
) ||
6058 !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
)) {
6059 ipr_unit_check_no_data(ioa_cfg
);
6063 /* Find length of the first sdt entry (UC buffer) */
6064 length
= (be32_to_cpu(sdt
.entry
[0].end_offset
) -
6065 be32_to_cpu(sdt
.entry
[0].bar_str_offset
)) & IPR_FMT2_MBX_ADDR_MASK
;
6067 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
6068 struct ipr_hostrcb
, queue
);
6069 list_del(&hostrcb
->queue
);
6070 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
6072 rc
= ipr_get_ldump_data_section(ioa_cfg
,
6073 be32_to_cpu(sdt
.entry
[0].bar_str_offset
),
6074 (__be32
*)&hostrcb
->hcam
,
6075 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
6078 ipr_handle_log_data(ioa_cfg
, hostrcb
);
6080 ipr_unit_check_no_data(ioa_cfg
);
6082 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
6086 * ipr_reset_restore_cfg_space - Restore PCI config space.
6087 * @ipr_cmd: ipr command struct
6089 * Description: This function restores the saved PCI config space of
6090 * the adapter, fails all outstanding ops back to the callers, and
6091 * fetches the dump/unit check if applicable to this reset.
6094 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6096 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
6098 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6102 pci_unblock_user_cfg_access(ioa_cfg
->pdev
);
6103 rc
= pci_restore_state(ioa_cfg
->pdev
);
6105 if (rc
!= PCIBIOS_SUCCESSFUL
) {
6106 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
6107 return IPR_RC_JOB_CONTINUE
;
6110 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
6111 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
6112 return IPR_RC_JOB_CONTINUE
;
6115 ipr_fail_all_ops(ioa_cfg
);
6117 if (ioa_cfg
->ioa_unit_checked
) {
6118 ioa_cfg
->ioa_unit_checked
= 0;
6119 ipr_get_unit_check_buffer(ioa_cfg
);
6120 ipr_cmd
->job_step
= ipr_reset_alert
;
6121 ipr_reset_start_timer(ipr_cmd
, 0);
6122 return IPR_RC_JOB_RETURN
;
6125 if (ioa_cfg
->in_ioa_bringdown
) {
6126 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
6128 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
6130 if (GET_DUMP
== ioa_cfg
->sdt_state
) {
6131 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_TIMEOUT
);
6132 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
6133 schedule_work(&ioa_cfg
->work_q
);
6134 return IPR_RC_JOB_RETURN
;
6139 return IPR_RC_JOB_CONTINUE
;
6143 * ipr_reset_start_bist - Run BIST on the adapter.
6144 * @ipr_cmd: ipr command struct
6146 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6149 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6151 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
6153 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6157 pci_block_user_cfg_access(ioa_cfg
->pdev
);
6158 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
6160 if (rc
!= PCIBIOS_SUCCESSFUL
) {
6161 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
6162 rc
= IPR_RC_JOB_CONTINUE
;
6164 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
6165 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
6166 rc
= IPR_RC_JOB_RETURN
;
6174 * ipr_reset_allowed - Query whether or not IOA can be reset
6175 * @ioa_cfg: ioa config struct
6178 * 0 if reset not allowed / non-zero if reset is allowed
6180 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
6182 volatile u32 temp_reg
;
6184 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
6185 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
6189 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6190 * @ipr_cmd: ipr command struct
6192 * Description: This function waits for adapter permission to run BIST,
6193 * then runs BIST. If the adapter does not give permission after a
6194 * reasonable time, we will reset the adapter anyway. The impact of
6195 * resetting the adapter without warning the adapter is the risk of
6196 * losing the persistent error log on the adapter. If the adapter is
6197 * reset while it is writing to the flash on the adapter, the flash
6198 * segment will have bad ECC and be zeroed.
6201 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6203 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
6205 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6206 int rc
= IPR_RC_JOB_RETURN
;
6208 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
6209 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
6210 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
6212 ipr_cmd
->job_step
= ipr_reset_start_bist
;
6213 rc
= IPR_RC_JOB_CONTINUE
;
6220 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6221 * @ipr_cmd: ipr command struct
6223 * Description: This function alerts the adapter that it will be reset.
6224 * If memory space is not currently enabled, proceed directly
6225 * to running BIST on the adapter. The timer must always be started
6226 * so we guarantee we do not run BIST from ipr_isr.
6231 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
6233 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6238 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
6240 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
6241 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
6242 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg
);
6243 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
6245 ipr_cmd
->job_step
= ipr_reset_start_bist
;
6248 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
6249 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
6252 return IPR_RC_JOB_RETURN
;
6256 * ipr_reset_ucode_download_done - Microcode download completion
6257 * @ipr_cmd: ipr command struct
6259 * Description: This function unmaps the microcode download buffer.
6262 * IPR_RC_JOB_CONTINUE
6264 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
6266 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6267 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
6269 pci_unmap_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
6270 sglist
->num_sg
, DMA_TO_DEVICE
);
6272 ipr_cmd
->job_step
= ipr_reset_alert
;
6273 return IPR_RC_JOB_CONTINUE
;
6277 * ipr_reset_ucode_download - Download microcode to the adapter
6278 * @ipr_cmd: ipr command struct
6280 * Description: This function checks to see if it there is microcode
6281 * to download to the adapter. If there is, a download is performed.
6284 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6286 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
6288 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6289 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
6292 ipr_cmd
->job_step
= ipr_reset_alert
;
6295 return IPR_RC_JOB_CONTINUE
;
6297 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
6298 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
6299 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
6300 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
6301 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
6302 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
6303 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
6305 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
6306 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
6308 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
6309 IPR_WRITE_BUFFER_TIMEOUT
);
6312 return IPR_RC_JOB_RETURN
;
6316 * ipr_reset_shutdown_ioa - Shutdown the adapter
6317 * @ipr_cmd: ipr command struct
6319 * Description: This function issues an adapter shutdown of the
6320 * specified type to the specified adapter as part of the
6321 * adapter reset job.
6324 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6326 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
6328 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6329 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
6330 unsigned long timeout
;
6331 int rc
= IPR_RC_JOB_CONTINUE
;
6334 if (shutdown_type
!= IPR_SHUTDOWN_NONE
&& !ioa_cfg
->ioa_is_dead
) {
6335 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
6336 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6337 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
6338 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
6340 if (shutdown_type
== IPR_SHUTDOWN_ABBREV
)
6341 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
6342 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
6343 timeout
= IPR_INTERNAL_TIMEOUT
;
6345 timeout
= IPR_SHUTDOWN_TIMEOUT
;
6347 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
6349 rc
= IPR_RC_JOB_RETURN
;
6350 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
6352 ipr_cmd
->job_step
= ipr_reset_alert
;
6359 * ipr_reset_ioa_job - Adapter reset job
6360 * @ipr_cmd: ipr command struct
6362 * Description: This function is the job router for the adapter reset job.
6367 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
6370 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6373 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
6375 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
6377 * We are doing nested adapter resets and this is
6378 * not the current reset job.
6380 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
6384 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
6385 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
6386 if (rc
== IPR_RC_JOB_RETURN
)
6390 ipr_reinit_ipr_cmnd(ipr_cmd
);
6391 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
6392 rc
= ipr_cmd
->job_step(ipr_cmd
);
6393 } while(rc
== IPR_RC_JOB_CONTINUE
);
6397 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6398 * @ioa_cfg: ioa config struct
6399 * @job_step: first job step of reset job
6400 * @shutdown_type: shutdown type
6402 * Description: This function will initiate the reset of the given adapter
6403 * starting at the selected job step.
6404 * If the caller needs to wait on the completion of the reset,
6405 * the caller must sleep on the reset_wait_q.
6410 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
6411 int (*job_step
) (struct ipr_cmnd
*),
6412 enum ipr_shutdown_type shutdown_type
)
6414 struct ipr_cmnd
*ipr_cmd
;
6416 ioa_cfg
->in_reset_reload
= 1;
6417 ioa_cfg
->allow_cmds
= 0;
6418 scsi_block_requests(ioa_cfg
->host
);
6420 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
6421 ioa_cfg
->reset_cmd
= ipr_cmd
;
6422 ipr_cmd
->job_step
= job_step
;
6423 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
6425 ipr_reset_ioa_job(ipr_cmd
);
6429 * ipr_initiate_ioa_reset - Initiate an adapter reset
6430 * @ioa_cfg: ioa config struct
6431 * @shutdown_type: shutdown type
6433 * Description: This function will initiate the reset of the given adapter.
6434 * If the caller needs to wait on the completion of the reset,
6435 * the caller must sleep on the reset_wait_q.
6440 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
6441 enum ipr_shutdown_type shutdown_type
)
6443 if (ioa_cfg
->ioa_is_dead
)
6446 if (ioa_cfg
->in_reset_reload
&& ioa_cfg
->sdt_state
== GET_DUMP
)
6447 ioa_cfg
->sdt_state
= ABORT_DUMP
;
6449 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
6450 dev_err(&ioa_cfg
->pdev
->dev
,
6451 "IOA taken offline - error recovery failed\n");
6453 ioa_cfg
->reset_retries
= 0;
6454 ioa_cfg
->ioa_is_dead
= 1;
6456 if (ioa_cfg
->in_ioa_bringdown
) {
6457 ioa_cfg
->reset_cmd
= NULL
;
6458 ioa_cfg
->in_reset_reload
= 0;
6459 ipr_fail_all_ops(ioa_cfg
);
6460 wake_up_all(&ioa_cfg
->reset_wait_q
);
6462 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
6463 scsi_unblock_requests(ioa_cfg
->host
);
6464 spin_lock_irq(ioa_cfg
->host
->host_lock
);
6467 ioa_cfg
->in_ioa_bringdown
= 1;
6468 shutdown_type
= IPR_SHUTDOWN_NONE
;
6472 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
6477 * ipr_reset_freeze - Hold off all I/O activity
6478 * @ipr_cmd: ipr command struct
6480 * Description: If the PCI slot is frozen, hold off all I/O
6481 * activity; then, as soon as the slot is available again,
6482 * initiate an adapter reset.
6484 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
6486 /* Disallow new interrupts, avoid loop */
6487 ipr_cmd
->ioa_cfg
->allow_interrupts
= 0;
6488 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->ioa_cfg
->pending_q
);
6489 ipr_cmd
->done
= ipr_reset_ioa_job
;
6490 return IPR_RC_JOB_RETURN
;
6494 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6495 * @pdev: PCI device struct
6497 * Description: This routine is called to tell us that the PCI bus
6498 * is down. Can't do anything here, except put the device driver
6499 * into a holding pattern, waiting for the PCI bus to come back.
6501 static void ipr_pci_frozen(struct pci_dev
*pdev
)
6503 unsigned long flags
= 0;
6504 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
6506 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6507 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
6508 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6512 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6513 * @pdev: PCI device struct
6515 * Description: This routine is called by the pci error recovery
6516 * code after the PCI slot has been reset, just before we
6517 * should resume normal operations.
6519 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
6521 unsigned long flags
= 0;
6522 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
6524 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6525 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
6527 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6528 return PCI_ERS_RESULT_RECOVERED
;
6532 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6533 * @pdev: PCI device struct
6535 * Description: This routine is called when the PCI bus has
6536 * permanently failed.
6538 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
6540 unsigned long flags
= 0;
6541 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
6543 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6544 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
6545 ioa_cfg
->sdt_state
= ABORT_DUMP
;
6546 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
;
6547 ioa_cfg
->in_ioa_bringdown
= 1;
6548 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
6549 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6553 * ipr_pci_error_detected - Called when a PCI error is detected.
6554 * @pdev: PCI device struct
6555 * @state: PCI channel state
6557 * Description: Called when a PCI error is detected.
6560 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6562 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
6563 pci_channel_state_t state
)
6566 case pci_channel_io_frozen
:
6567 ipr_pci_frozen(pdev
);
6568 return PCI_ERS_RESULT_NEED_RESET
;
6569 case pci_channel_io_perm_failure
:
6570 ipr_pci_perm_failure(pdev
);
6571 return PCI_ERS_RESULT_DISCONNECT
;
6576 return PCI_ERS_RESULT_NEED_RESET
;
6580 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6581 * @ioa_cfg: ioa cfg struct
6583 * Description: This is the second phase of adapter intialization
6584 * This function takes care of initilizing the adapter to the point
6585 * where it can accept new commands.
6588 * 0 on sucess / -EIO on failure
6590 static int __devinit
ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
6593 unsigned long host_lock_flags
= 0;
6596 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6597 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
6598 if (ioa_cfg
->needs_hard_reset
) {
6599 ioa_cfg
->needs_hard_reset
= 0;
6600 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
6602 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
6605 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6606 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6607 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6609 if (ioa_cfg
->ioa_is_dead
) {
6611 } else if (ipr_invalid_adapter(ioa_cfg
)) {
6615 dev_err(&ioa_cfg
->pdev
->dev
,
6616 "Adapter not supported in this hardware configuration.\n");
6619 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6626 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6627 * @ioa_cfg: ioa config struct
6632 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
6636 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
6637 if (ioa_cfg
->ipr_cmnd_list
[i
])
6638 pci_pool_free(ioa_cfg
->ipr_cmd_pool
,
6639 ioa_cfg
->ipr_cmnd_list
[i
],
6640 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
6642 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
6645 if (ioa_cfg
->ipr_cmd_pool
)
6646 pci_pool_destroy (ioa_cfg
->ipr_cmd_pool
);
6648 ioa_cfg
->ipr_cmd_pool
= NULL
;
6652 * ipr_free_mem - Frees memory allocated for an adapter
6653 * @ioa_cfg: ioa cfg struct
6658 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
6662 kfree(ioa_cfg
->res_entries
);
6663 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_misc_cbs
),
6664 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
6665 ipr_free_cmd_blks(ioa_cfg
);
6666 pci_free_consistent(ioa_cfg
->pdev
, sizeof(u32
) * IPR_NUM_CMD_BLKS
,
6667 ioa_cfg
->host_rrq
, ioa_cfg
->host_rrq_dma
);
6668 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_config_table
),
6670 ioa_cfg
->cfg_table_dma
);
6672 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
6673 pci_free_consistent(ioa_cfg
->pdev
,
6674 sizeof(struct ipr_hostrcb
),
6675 ioa_cfg
->hostrcb
[i
],
6676 ioa_cfg
->hostrcb_dma
[i
]);
6679 ipr_free_dump(ioa_cfg
);
6680 kfree(ioa_cfg
->trace
);
6684 * ipr_free_all_resources - Free all allocated resources for an adapter.
6685 * @ipr_cmd: ipr command struct
6687 * This function frees all allocated resources for the
6688 * specified adapter.
6693 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
6695 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
6698 free_irq(pdev
->irq
, ioa_cfg
);
6699 iounmap(ioa_cfg
->hdw_dma_regs
);
6700 pci_release_regions(pdev
);
6701 ipr_free_mem(ioa_cfg
);
6702 scsi_host_put(ioa_cfg
->host
);
6703 pci_disable_device(pdev
);
6708 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6709 * @ioa_cfg: ioa config struct
6712 * 0 on success / -ENOMEM on allocation failure
6714 static int __devinit
ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
6716 struct ipr_cmnd
*ipr_cmd
;
6717 struct ipr_ioarcb
*ioarcb
;
6718 dma_addr_t dma_addr
;
6721 ioa_cfg
->ipr_cmd_pool
= pci_pool_create (IPR_NAME
, ioa_cfg
->pdev
,
6722 sizeof(struct ipr_cmnd
), 8, 0);
6724 if (!ioa_cfg
->ipr_cmd_pool
)
6727 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
6728 ipr_cmd
= pci_pool_alloc (ioa_cfg
->ipr_cmd_pool
, SLAB_KERNEL
, &dma_addr
);
6731 ipr_free_cmd_blks(ioa_cfg
);
6735 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
6736 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
6737 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
6739 ioarcb
= &ipr_cmd
->ioarcb
;
6740 ioarcb
->ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
6741 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
6742 ioarcb
->write_ioadl_addr
=
6743 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, ioadl
));
6744 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6745 ioarcb
->ioasa_host_pci_addr
=
6746 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, ioasa
));
6747 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
6748 ipr_cmd
->cmd_index
= i
;
6749 ipr_cmd
->ioa_cfg
= ioa_cfg
;
6750 ipr_cmd
->sense_buffer_dma
= dma_addr
+
6751 offsetof(struct ipr_cmnd
, sense_buffer
);
6753 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
6760 * ipr_alloc_mem - Allocate memory for an adapter
6761 * @ioa_cfg: ioa config struct
6764 * 0 on success / non-zero for error
6766 static int __devinit
ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
6768 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
6769 int i
, rc
= -ENOMEM
;
6772 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
6773 IPR_MAX_PHYSICAL_DEVS
, GFP_KERNEL
);
6775 if (!ioa_cfg
->res_entries
)
6778 for (i
= 0; i
< IPR_MAX_PHYSICAL_DEVS
; i
++)
6779 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
6781 ioa_cfg
->vpd_cbs
= pci_alloc_consistent(ioa_cfg
->pdev
,
6782 sizeof(struct ipr_misc_cbs
),
6783 &ioa_cfg
->vpd_cbs_dma
);
6785 if (!ioa_cfg
->vpd_cbs
)
6786 goto out_free_res_entries
;
6788 if (ipr_alloc_cmd_blks(ioa_cfg
))
6789 goto out_free_vpd_cbs
;
6791 ioa_cfg
->host_rrq
= pci_alloc_consistent(ioa_cfg
->pdev
,
6792 sizeof(u32
) * IPR_NUM_CMD_BLKS
,
6793 &ioa_cfg
->host_rrq_dma
);
6795 if (!ioa_cfg
->host_rrq
)
6796 goto out_ipr_free_cmd_blocks
;
6798 ioa_cfg
->cfg_table
= pci_alloc_consistent(ioa_cfg
->pdev
,
6799 sizeof(struct ipr_config_table
),
6800 &ioa_cfg
->cfg_table_dma
);
6802 if (!ioa_cfg
->cfg_table
)
6803 goto out_free_host_rrq
;
6805 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
6806 ioa_cfg
->hostrcb
[i
] = pci_alloc_consistent(ioa_cfg
->pdev
,
6807 sizeof(struct ipr_hostrcb
),
6808 &ioa_cfg
->hostrcb_dma
[i
]);
6810 if (!ioa_cfg
->hostrcb
[i
])
6811 goto out_free_hostrcb_dma
;
6813 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
6814 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
6815 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
6818 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
6819 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
6821 if (!ioa_cfg
->trace
)
6822 goto out_free_hostrcb_dma
;
6829 out_free_hostrcb_dma
:
6831 pci_free_consistent(pdev
, sizeof(struct ipr_hostrcb
),
6832 ioa_cfg
->hostrcb
[i
],
6833 ioa_cfg
->hostrcb_dma
[i
]);
6835 pci_free_consistent(pdev
, sizeof(struct ipr_config_table
),
6836 ioa_cfg
->cfg_table
, ioa_cfg
->cfg_table_dma
);
6838 pci_free_consistent(pdev
, sizeof(u32
) * IPR_NUM_CMD_BLKS
,
6839 ioa_cfg
->host_rrq
, ioa_cfg
->host_rrq_dma
);
6840 out_ipr_free_cmd_blocks
:
6841 ipr_free_cmd_blks(ioa_cfg
);
6843 pci_free_consistent(pdev
, sizeof(struct ipr_misc_cbs
),
6844 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
6845 out_free_res_entries
:
6846 kfree(ioa_cfg
->res_entries
);
6851 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6852 * @ioa_cfg: ioa config struct
6857 static void __devinit
ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
6861 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
6862 ioa_cfg
->bus_attr
[i
].bus
= i
;
6863 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
6864 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
6865 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
6866 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
6868 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
6873 * ipr_init_ioa_cfg - Initialize IOA config struct
6874 * @ioa_cfg: ioa config struct
6875 * @host: scsi host struct
6876 * @pdev: PCI dev struct
6881 static void __devinit
ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
6882 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
6884 const struct ipr_interrupt_offsets
*p
;
6885 struct ipr_interrupts
*t
;
6888 ioa_cfg
->host
= host
;
6889 ioa_cfg
->pdev
= pdev
;
6890 ioa_cfg
->log_level
= ipr_log_level
;
6891 ioa_cfg
->doorbell
= IPR_DOORBELL
;
6892 if (!ipr_auto_create
)
6893 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
6894 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
6895 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
6896 sprintf(ioa_cfg
->ipr_free_label
, IPR_FREEQ_LABEL
);
6897 sprintf(ioa_cfg
->ipr_pending_label
, IPR_PENDQ_LABEL
);
6898 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
6899 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
6900 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
6901 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
6903 INIT_LIST_HEAD(&ioa_cfg
->free_q
);
6904 INIT_LIST_HEAD(&ioa_cfg
->pending_q
);
6905 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
6906 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
6907 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
6908 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
6909 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
, ioa_cfg
);
6910 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
6911 ioa_cfg
->sdt_state
= INACTIVE
;
6912 if (ipr_enable_cache
)
6913 ioa_cfg
->cache_state
= CACHE_ENABLED
;
6915 ioa_cfg
->cache_state
= CACHE_DISABLED
;
6917 ipr_initialize_bus_attr(ioa_cfg
);
6919 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
6920 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
6921 host
->max_channel
= IPR_MAX_BUS_TO_SCAN
;
6922 host
->unique_id
= host
->host_no
;
6923 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
6924 pci_set_drvdata(pdev
, ioa_cfg
);
6926 p
= &ioa_cfg
->chip_cfg
->regs
;
6928 base
= ioa_cfg
->hdw_dma_regs
;
6930 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
6931 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
6932 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
6933 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
6934 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
6935 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
6936 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
6937 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
6938 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
6942 * ipr_get_chip_cfg - Find adapter chip configuration
6943 * @dev_id: PCI device id struct
6946 * ptr to chip config on success / NULL on failure
6948 static const struct ipr_chip_cfg_t
* __devinit
6949 ipr_get_chip_cfg(const struct pci_device_id
*dev_id
)
6953 if (dev_id
->driver_data
)
6954 return (const struct ipr_chip_cfg_t
*)dev_id
->driver_data
;
6956 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
6957 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
6958 ipr_chip
[i
].device
== dev_id
->device
)
6959 return ipr_chip
[i
].cfg
;
6964 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6965 * @pdev: PCI device struct
6966 * @dev_id: PCI device id struct
6969 * 0 on success / non-zero on failure
6971 static int __devinit
ipr_probe_ioa(struct pci_dev
*pdev
,
6972 const struct pci_device_id
*dev_id
)
6974 struct ipr_ioa_cfg
*ioa_cfg
;
6975 struct Scsi_Host
*host
;
6976 unsigned long ipr_regs_pci
;
6977 void __iomem
*ipr_regs
;
6978 int rc
= PCIBIOS_SUCCESSFUL
;
6979 volatile u32 mask
, uproc
;
6983 if ((rc
= pci_enable_device(pdev
))) {
6984 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
6988 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
6990 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
6993 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
6998 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
6999 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
7000 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
,
7001 sata_port_info
.flags
, &ipr_sata_ops
);
7003 ioa_cfg
->chip_cfg
= ipr_get_chip_cfg(dev_id
);
7005 if (!ioa_cfg
->chip_cfg
) {
7006 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
7007 dev_id
->vendor
, dev_id
->device
);
7008 goto out_scsi_host_put
;
7011 ipr_regs_pci
= pci_resource_start(pdev
, 0);
7013 rc
= pci_request_regions(pdev
, IPR_NAME
);
7016 "Couldn't register memory range of registers\n");
7017 goto out_scsi_host_put
;
7020 ipr_regs
= ioremap(ipr_regs_pci
, pci_resource_len(pdev
, 0));
7024 "Couldn't map memory range of registers\n");
7026 goto out_release_regions
;
7029 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
7030 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
7031 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
7033 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
7035 pci_set_master(pdev
);
7037 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
7039 dev_err(&pdev
->dev
, "Failed to set PCI DMA mask\n");
7043 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
7044 ioa_cfg
->chip_cfg
->cache_line_size
);
7046 if (rc
!= PCIBIOS_SUCCESSFUL
) {
7047 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
7052 /* Save away PCI config space for use following IOA reset */
7053 rc
= pci_save_state(pdev
);
7055 if (rc
!= PCIBIOS_SUCCESSFUL
) {
7056 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
7061 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
7064 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
7067 rc
= ipr_alloc_mem(ioa_cfg
);
7070 "Couldn't allocate enough memory for device driver!\n");
7075 * If HRRQ updated interrupt is not masked, or reset alert is set,
7076 * the card is in an unknown state and needs a hard reset
7078 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7079 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg
);
7080 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
7081 ioa_cfg
->needs_hard_reset
= 1;
7083 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
7084 rc
= request_irq(pdev
->irq
, ipr_isr
, IRQF_SHARED
, IPR_NAME
, ioa_cfg
);
7087 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
7092 spin_lock(&ipr_driver_lock
);
7093 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
7094 spin_unlock(&ipr_driver_lock
);
7101 ipr_free_mem(ioa_cfg
);
7104 out_release_regions
:
7105 pci_release_regions(pdev
);
7107 scsi_host_put(host
);
7109 pci_disable_device(pdev
);
7114 * ipr_scan_vsets - Scans for VSET devices
7115 * @ioa_cfg: ioa config struct
7117 * Description: Since the VSET resources do not follow SAM in that we can have
7118 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7123 static void ipr_scan_vsets(struct ipr_ioa_cfg
*ioa_cfg
)
7127 for (target
= 0; target
< IPR_MAX_NUM_TARGETS_PER_BUS
; target
++)
7128 for (lun
= 0; lun
< IPR_MAX_NUM_VSET_LUNS_PER_TARGET
; lun
++ )
7129 scsi_add_device(ioa_cfg
->host
, IPR_VSET_BUS
, target
, lun
);
7133 * ipr_initiate_ioa_bringdown - Bring down an adapter
7134 * @ioa_cfg: ioa config struct
7135 * @shutdown_type: shutdown type
7137 * Description: This function will initiate bringing down the adapter.
7138 * This consists of issuing an IOA shutdown to the adapter
7139 * to flush the cache, and running BIST.
7140 * If the caller needs to wait on the completion of the reset,
7141 * the caller must sleep on the reset_wait_q.
7146 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
7147 enum ipr_shutdown_type shutdown_type
)
7150 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
7151 ioa_cfg
->sdt_state
= ABORT_DUMP
;
7152 ioa_cfg
->reset_retries
= 0;
7153 ioa_cfg
->in_ioa_bringdown
= 1;
7154 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
7159 * __ipr_remove - Remove a single adapter
7160 * @pdev: pci device struct
7162 * Adapter hot plug remove entry point.
7167 static void __ipr_remove(struct pci_dev
*pdev
)
7169 unsigned long host_lock_flags
= 0;
7170 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
7173 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
7174 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
7176 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
7177 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
7178 flush_scheduled_work();
7179 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
7181 spin_lock(&ipr_driver_lock
);
7182 list_del(&ioa_cfg
->queue
);
7183 spin_unlock(&ipr_driver_lock
);
7185 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
7186 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
7187 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
7189 ipr_free_all_resources(ioa_cfg
);
7195 * ipr_remove - IOA hot plug remove entry point
7196 * @pdev: pci device struct
7198 * Adapter hot plug remove entry point.
7203 static void ipr_remove(struct pci_dev
*pdev
)
7205 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
7209 ipr_remove_trace_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
7211 ipr_remove_dump_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
7213 scsi_remove_host(ioa_cfg
->host
);
7221 * ipr_probe - Adapter hot plug add entry point
7224 * 0 on success / non-zero on failure
7226 static int __devinit
ipr_probe(struct pci_dev
*pdev
,
7227 const struct pci_device_id
*dev_id
)
7229 struct ipr_ioa_cfg
*ioa_cfg
;
7232 rc
= ipr_probe_ioa(pdev
, dev_id
);
7237 ioa_cfg
= pci_get_drvdata(pdev
);
7238 rc
= ipr_probe_ioa_part2(ioa_cfg
);
7245 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
7252 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
7256 scsi_remove_host(ioa_cfg
->host
);
7261 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
7265 ipr_remove_trace_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
7267 scsi_remove_host(ioa_cfg
->host
);
7272 scsi_scan_host(ioa_cfg
->host
);
7273 ipr_scan_vsets(ioa_cfg
);
7274 scsi_add_device(ioa_cfg
->host
, IPR_IOA_BUS
, IPR_IOA_TARGET
, IPR_IOA_LUN
);
7275 ioa_cfg
->allow_ml_add_del
= 1;
7276 ioa_cfg
->host
->max_channel
= IPR_VSET_BUS
;
7277 schedule_work(&ioa_cfg
->work_q
);
7282 * ipr_shutdown - Shutdown handler.
7283 * @pdev: pci device struct
7285 * This function is invoked upon system shutdown/reboot. It will issue
7286 * an adapter shutdown to the adapter to flush the write cache.
7291 static void ipr_shutdown(struct pci_dev
*pdev
)
7293 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
7294 unsigned long lock_flags
= 0;
7296 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
7297 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
7298 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
7299 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
7302 static struct pci_device_id ipr_pci_table
[] __devinitdata
= {
7303 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
7304 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
,
7305 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7306 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
7307 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
,
7308 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7309 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
7310 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
,
7311 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7312 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
7313 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
,
7314 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7315 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
7316 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
,
7317 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7318 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
7319 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
,
7320 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7321 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
7322 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
,
7323 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7324 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
7325 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
,
7326 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7327 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
7328 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
,
7329 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7330 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
7331 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
,
7332 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7333 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
7334 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
,
7335 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7336 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
7337 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
,
7338 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7339 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
7340 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
,
7341 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7342 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
7343 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
,
7344 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7345 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
7346 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B8
,
7347 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7348 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
7349 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
,
7350 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
7351 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
7352 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
,
7353 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[1] },
7354 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
7355 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
,
7356 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[1] },
7357 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
7358 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
,
7359 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[1] },
7360 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
7361 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
,
7362 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[1] },
7365 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
7367 static struct pci_error_handlers ipr_err_handler
= {
7368 .error_detected
= ipr_pci_error_detected
,
7369 .slot_reset
= ipr_pci_slot_reset
,
7372 static struct pci_driver ipr_driver
= {
7374 .id_table
= ipr_pci_table
,
7376 .remove
= ipr_remove
,
7377 .shutdown
= ipr_shutdown
,
7378 .err_handler
= &ipr_err_handler
,
7382 * ipr_init - Module entry point
7385 * 0 on success / negative value on failure
7387 static int __init
ipr_init(void)
7389 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7390 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
7392 return pci_register_driver(&ipr_driver
);
7396 * ipr_exit - Module unload
7398 * Module unload entry point.
7403 static void __exit
ipr_exit(void)
7405 pci_unregister_driver(&ipr_driver
);
7408 module_init(ipr_init
);
7409 module_exit(ipr_exit
);