[SCSI] ipr: Support new device queueing model
[usb.git] / drivers / scsi / ipr.c
blob2f84f26624bf8778e49f2d82d4ce21e15f5e73d3
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Notes:
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
83 #include "ipr.h"
86 * Global Data
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static unsigned int ipr_enable_cache = 1;
95 static unsigned int ipr_debug = 0;
96 static int ipr_auto_create = 1;
97 static DEFINE_SPINLOCK(ipr_driver_lock);
99 /* This table describes the differences between DMA controller chips */
100 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone and Citrine */
102 .mailbox = 0x0042C,
103 .cache_line_size = 0x20,
105 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_reg = 0x00228,
109 .sense_interrupt_reg = 0x00224,
110 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218
116 { /* Snipe and Scamp */
117 .mailbox = 0x0052C,
118 .cache_line_size = 0x20,
120 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_reg = 0x00284,
124 .sense_interrupt_reg = 0x00280,
125 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294
133 static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 static int ipr_max_bus_speeds [] = {
141 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146 module_param_named(max_speed, ipr_max_speed, uint, 0);
147 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148 module_param_named(log_level, ipr_log_level, uint, 0);
149 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150 module_param_named(testmode, ipr_testmode, int, 0);
151 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152 module_param_named(fastfail, ipr_fastfail, int, 0);
153 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
156 module_param_named(enable_cache, ipr_enable_cache, int, 0);
157 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
158 module_param_named(debug, ipr_debug, int, 0);
159 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
160 module_param_named(auto_create, ipr_auto_create, int, 0);
161 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(IPR_DRIVER_VERSION);
165 static const char *ipr_gpdd_dev_end_states[] = {
166 "Command complete",
167 "Terminated by host",
168 "Terminated by device reset",
169 "Terminated by bus reset",
170 "Unknown",
171 "Command not started"
174 static const char *ipr_gpdd_dev_bus_phases[] = {
175 "Bus free",
176 "Arbitration",
177 "Selection",
178 "Message out",
179 "Command",
180 "Message in",
181 "Data out",
182 "Data in",
183 "Status",
184 "Reselection",
185 "Unknown"
188 /* A constant array of IOASCs/URCs/Error Messages */
189 static const
190 struct ipr_error_table_t ipr_error_table[] = {
191 {0x00000000, 1, 1,
192 "8155: An unknown error was received"},
193 {0x00330000, 0, 0,
194 "Soft underlength error"},
195 {0x005A0000, 0, 0,
196 "Command to be cancelled not found"},
197 {0x00808000, 0, 0,
198 "Qualified success"},
199 {0x01080000, 1, 1,
200 "FFFE: Soft device bus error recovered by the IOA"},
201 {0x01170600, 0, 1,
202 "FFF9: Device sector reassign successful"},
203 {0x01170900, 0, 1,
204 "FFF7: Media error recovered by device rewrite procedures"},
205 {0x01180200, 0, 1,
206 "7001: IOA sector reassignment successful"},
207 {0x01180500, 0, 1,
208 "FFF9: Soft media error. Sector reassignment recommended"},
209 {0x01180600, 0, 1,
210 "FFF7: Media error recovered by IOA rewrite procedures"},
211 {0x01418000, 0, 1,
212 "FF3D: Soft PCI bus error recovered by the IOA"},
213 {0x01440000, 1, 1,
214 "FFF6: Device hardware error recovered by the IOA"},
215 {0x01448100, 0, 1,
216 "FFF6: Device hardware error recovered by the device"},
217 {0x01448200, 1, 1,
218 "FF3D: Soft IOA error recovered by the IOA"},
219 {0x01448300, 0, 1,
220 "FFFA: Undefined device response recovered by the IOA"},
221 {0x014A0000, 1, 1,
222 "FFF6: Device bus error, message or command phase"},
223 {0x015D0000, 0, 1,
224 "FFF6: Failure prediction threshold exceeded"},
225 {0x015D9200, 0, 1,
226 "8009: Impending cache battery pack failure"},
227 {0x02040400, 0, 0,
228 "34FF: Disk device format in progress"},
229 {0x023F0000, 0, 0,
230 "Synchronization required"},
231 {0x024E0000, 0, 0,
232 "No ready, IOA shutdown"},
233 {0x025A0000, 0, 0,
234 "Not ready, IOA has been shutdown"},
235 {0x02670100, 0, 1,
236 "3020: Storage subsystem configuration error"},
237 {0x03110B00, 0, 0,
238 "FFF5: Medium error, data unreadable, recommend reassign"},
239 {0x03110C00, 0, 0,
240 "7000: Medium error, data unreadable, do not reassign"},
241 {0x03310000, 0, 1,
242 "FFF3: Disk media format bad"},
243 {0x04050000, 0, 1,
244 "3002: Addressed device failed to respond to selection"},
245 {0x04080000, 1, 1,
246 "3100: Device bus error"},
247 {0x04080100, 0, 1,
248 "3109: IOA timed out a device command"},
249 {0x04088000, 0, 0,
250 "3120: SCSI bus is not operational"},
251 {0x04118000, 0, 1,
252 "9000: IOA reserved area data check"},
253 {0x04118100, 0, 1,
254 "9001: IOA reserved area invalid data pattern"},
255 {0x04118200, 0, 1,
256 "9002: IOA reserved area LRC error"},
257 {0x04320000, 0, 1,
258 "102E: Out of alternate sectors for disk storage"},
259 {0x04330000, 1, 1,
260 "FFF4: Data transfer underlength error"},
261 {0x04338000, 1, 1,
262 "FFF4: Data transfer overlength error"},
263 {0x043E0100, 0, 1,
264 "3400: Logical unit failure"},
265 {0x04408500, 0, 1,
266 "FFF4: Device microcode is corrupt"},
267 {0x04418000, 1, 1,
268 "8150: PCI bus error"},
269 {0x04430000, 1, 0,
270 "Unsupported device bus message received"},
271 {0x04440000, 1, 1,
272 "FFF4: Disk device problem"},
273 {0x04448200, 1, 1,
274 "8150: Permanent IOA failure"},
275 {0x04448300, 0, 1,
276 "3010: Disk device returned wrong response to IOA"},
277 {0x04448400, 0, 1,
278 "8151: IOA microcode error"},
279 {0x04448500, 0, 0,
280 "Device bus status error"},
281 {0x04448600, 0, 1,
282 "8157: IOA error requiring IOA reset to recover"},
283 {0x04490000, 0, 0,
284 "Message reject received from the device"},
285 {0x04449200, 0, 1,
286 "8008: A permanent cache battery pack failure occurred"},
287 {0x0444A000, 0, 1,
288 "9090: Disk unit has been modified after the last known status"},
289 {0x0444A200, 0, 1,
290 "9081: IOA detected device error"},
291 {0x0444A300, 0, 1,
292 "9082: IOA detected device error"},
293 {0x044A0000, 1, 1,
294 "3110: Device bus error, message or command phase"},
295 {0x04670400, 0, 1,
296 "9091: Incorrect hardware configuration change has been detected"},
297 {0x04678000, 0, 1,
298 "9073: Invalid multi-adapter configuration"},
299 {0x046E0000, 0, 1,
300 "FFF4: Command to logical unit failed"},
301 {0x05240000, 1, 0,
302 "Illegal request, invalid request type or request packet"},
303 {0x05250000, 0, 0,
304 "Illegal request, invalid resource handle"},
305 {0x05258000, 0, 0,
306 "Illegal request, commands not allowed to this device"},
307 {0x05258100, 0, 0,
308 "Illegal request, command not allowed to a secondary adapter"},
309 {0x05260000, 0, 0,
310 "Illegal request, invalid field in parameter list"},
311 {0x05260100, 0, 0,
312 "Illegal request, parameter not supported"},
313 {0x05260200, 0, 0,
314 "Illegal request, parameter value invalid"},
315 {0x052C0000, 0, 0,
316 "Illegal request, command sequence error"},
317 {0x052C8000, 1, 0,
318 "Illegal request, dual adapter support not enabled"},
319 {0x06040500, 0, 1,
320 "9031: Array protection temporarily suspended, protection resuming"},
321 {0x06040600, 0, 1,
322 "9040: Array protection temporarily suspended, protection resuming"},
323 {0x06290000, 0, 1,
324 "FFFB: SCSI bus was reset"},
325 {0x06290500, 0, 0,
326 "FFFE: SCSI bus transition to single ended"},
327 {0x06290600, 0, 0,
328 "FFFE: SCSI bus transition to LVD"},
329 {0x06298000, 0, 1,
330 "FFFB: SCSI bus was reset by another initiator"},
331 {0x063F0300, 0, 1,
332 "3029: A device replacement has occurred"},
333 {0x064C8000, 0, 1,
334 "9051: IOA cache data exists for a missing or failed device"},
335 {0x064C8100, 0, 1,
336 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
337 {0x06670100, 0, 1,
338 "9025: Disk unit is not supported at its physical location"},
339 {0x06670600, 0, 1,
340 "3020: IOA detected a SCSI bus configuration error"},
341 {0x06678000, 0, 1,
342 "3150: SCSI bus configuration error"},
343 {0x06678100, 0, 1,
344 "9074: Asymmetric advanced function disk configuration"},
345 {0x06690200, 0, 1,
346 "9041: Array protection temporarily suspended"},
347 {0x06698200, 0, 1,
348 "9042: Corrupt array parity detected on specified device"},
349 {0x066B0200, 0, 1,
350 "9030: Array no longer protected due to missing or failed disk unit"},
351 {0x066B8000, 0, 1,
352 "9071: Link operational transition"},
353 {0x066B8100, 0, 1,
354 "9072: Link not operational transition"},
355 {0x066B8200, 0, 1,
356 "9032: Array exposed but still protected"},
357 {0x07270000, 0, 0,
358 "Failure due to other device"},
359 {0x07278000, 0, 1,
360 "9008: IOA does not support functions expected by devices"},
361 {0x07278100, 0, 1,
362 "9010: Cache data associated with attached devices cannot be found"},
363 {0x07278200, 0, 1,
364 "9011: Cache data belongs to devices other than those attached"},
365 {0x07278400, 0, 1,
366 "9020: Array missing 2 or more devices with only 1 device present"},
367 {0x07278500, 0, 1,
368 "9021: Array missing 2 or more devices with 2 or more devices present"},
369 {0x07278600, 0, 1,
370 "9022: Exposed array is missing a required device"},
371 {0x07278700, 0, 1,
372 "9023: Array member(s) not at required physical locations"},
373 {0x07278800, 0, 1,
374 "9024: Array not functional due to present hardware configuration"},
375 {0x07278900, 0, 1,
376 "9026: Array not functional due to present hardware configuration"},
377 {0x07278A00, 0, 1,
378 "9027: Array is missing a device and parity is out of sync"},
379 {0x07278B00, 0, 1,
380 "9028: Maximum number of arrays already exist"},
381 {0x07278C00, 0, 1,
382 "9050: Required cache data cannot be located for a disk unit"},
383 {0x07278D00, 0, 1,
384 "9052: Cache data exists for a device that has been modified"},
385 {0x07278F00, 0, 1,
386 "9054: IOA resources not available due to previous problems"},
387 {0x07279100, 0, 1,
388 "9092: Disk unit requires initialization before use"},
389 {0x07279200, 0, 1,
390 "9029: Incorrect hardware configuration change has been detected"},
391 {0x07279600, 0, 1,
392 "9060: One or more disk pairs are missing from an array"},
393 {0x07279700, 0, 1,
394 "9061: One or more disks are missing from an array"},
395 {0x07279800, 0, 1,
396 "9062: One or more disks are missing from an array"},
397 {0x07279900, 0, 1,
398 "9063: Maximum number of functional arrays has been exceeded"},
399 {0x0B260000, 0, 0,
400 "Aborted command, invalid descriptor"},
401 {0x0B5A0000, 0, 0,
402 "Command terminated by host"}
405 static const struct ipr_ses_table_entry ipr_ses_table[] = {
406 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
407 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
408 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
409 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
410 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
411 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
412 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
413 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
414 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
415 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
416 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
417 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
418 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
422 * Function Prototypes
424 static int ipr_reset_alert(struct ipr_cmnd *);
425 static void ipr_process_ccn(struct ipr_cmnd *);
426 static void ipr_process_error(struct ipr_cmnd *);
427 static void ipr_reset_ioa_job(struct ipr_cmnd *);
428 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
429 enum ipr_shutdown_type);
431 #ifdef CONFIG_SCSI_IPR_TRACE
433 * ipr_trc_hook - Add a trace entry to the driver trace
434 * @ipr_cmd: ipr command struct
435 * @type: trace type
436 * @add_data: additional data
438 * Return value:
439 * none
441 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
442 u8 type, u32 add_data)
444 struct ipr_trace_entry *trace_entry;
445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
447 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
448 trace_entry->time = jiffies;
449 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
450 trace_entry->type = type;
451 trace_entry->cmd_index = ipr_cmd->cmd_index;
452 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
453 trace_entry->u.add_data = add_data;
455 #else
456 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
457 #endif
460 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
461 * @ipr_cmd: ipr command struct
463 * Return value:
464 * none
466 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
468 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
469 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
471 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
472 ioarcb->write_data_transfer_length = 0;
473 ioarcb->read_data_transfer_length = 0;
474 ioarcb->write_ioadl_len = 0;
475 ioarcb->read_ioadl_len = 0;
476 ioasa->ioasc = 0;
477 ioasa->residual_data_len = 0;
479 ipr_cmd->scsi_cmd = NULL;
480 ipr_cmd->sense_buffer[0] = 0;
481 ipr_cmd->dma_use_sg = 0;
485 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
486 * @ipr_cmd: ipr command struct
488 * Return value:
489 * none
491 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
493 ipr_reinit_ipr_cmnd(ipr_cmd);
494 ipr_cmd->u.scratch = 0;
495 ipr_cmd->sibling = NULL;
496 init_timer(&ipr_cmd->timer);
500 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
501 * @ioa_cfg: ioa config struct
503 * Return value:
504 * pointer to ipr command struct
506 static
507 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
509 struct ipr_cmnd *ipr_cmd;
511 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
512 list_del(&ipr_cmd->queue);
513 ipr_init_ipr_cmnd(ipr_cmd);
515 return ipr_cmd;
519 * ipr_unmap_sglist - Unmap scatterlist if mapped
520 * @ioa_cfg: ioa config struct
521 * @ipr_cmd: ipr command struct
523 * Return value:
524 * nothing
526 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
527 struct ipr_cmnd *ipr_cmd)
529 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
531 if (ipr_cmd->dma_use_sg) {
532 if (scsi_cmd->use_sg > 0) {
533 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
534 scsi_cmd->use_sg,
535 scsi_cmd->sc_data_direction);
536 } else {
537 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
538 scsi_cmd->request_bufflen,
539 scsi_cmd->sc_data_direction);
545 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
546 * @ioa_cfg: ioa config struct
547 * @clr_ints: interrupts to clear
549 * This function masks all interrupts on the adapter, then clears the
550 * interrupts specified in the mask
552 * Return value:
553 * none
555 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
556 u32 clr_ints)
558 volatile u32 int_reg;
560 /* Stop new interrupts */
561 ioa_cfg->allow_interrupts = 0;
563 /* Set interrupt mask to stop all new interrupts */
564 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
566 /* Clear any pending interrupts */
567 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
568 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
572 * ipr_save_pcix_cmd_reg - Save PCI-X command register
573 * @ioa_cfg: ioa config struct
575 * Return value:
576 * 0 on success / -EIO on failure
578 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
580 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
582 if (pcix_cmd_reg == 0) {
583 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
584 return -EIO;
587 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
590 return -EIO;
593 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
594 return 0;
598 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
599 * @ioa_cfg: ioa config struct
601 * Return value:
602 * 0 on success / -EIO on failure
604 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
606 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
608 if (pcix_cmd_reg) {
609 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
610 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
611 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
612 return -EIO;
614 } else {
615 dev_err(&ioa_cfg->pdev->dev,
616 "Failed to setup PCI-X command register\n");
617 return -EIO;
620 return 0;
624 * ipr_scsi_eh_done - mid-layer done function for aborted ops
625 * @ipr_cmd: ipr command struct
627 * This function is invoked by the interrupt handler for
628 * ops generated by the SCSI mid-layer which are being aborted.
630 * Return value:
631 * none
633 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
638 scsi_cmd->result |= (DID_ERROR << 16);
640 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
641 scsi_cmd->scsi_done(scsi_cmd);
642 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
646 * ipr_fail_all_ops - Fails all outstanding ops.
647 * @ioa_cfg: ioa config struct
649 * This function fails all outstanding ops.
651 * Return value:
652 * none
654 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
656 struct ipr_cmnd *ipr_cmd, *temp;
658 ENTER;
659 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
660 list_del(&ipr_cmd->queue);
662 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
663 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
665 if (ipr_cmd->scsi_cmd)
666 ipr_cmd->done = ipr_scsi_eh_done;
668 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
669 del_timer(&ipr_cmd->timer);
670 ipr_cmd->done(ipr_cmd);
673 LEAVE;
677 * ipr_do_req - Send driver initiated requests.
678 * @ipr_cmd: ipr command struct
679 * @done: done function
680 * @timeout_func: timeout function
681 * @timeout: timeout value
683 * This function sends the specified command to the adapter with the
684 * timeout given. The done function is invoked on command completion.
686 * Return value:
687 * none
689 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
690 void (*done) (struct ipr_cmnd *),
691 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
695 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
697 ipr_cmd->done = done;
699 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
700 ipr_cmd->timer.expires = jiffies + timeout;
701 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
703 add_timer(&ipr_cmd->timer);
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
707 mb();
708 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
709 ioa_cfg->regs.ioarrin_reg);
713 * ipr_internal_cmd_done - Op done function for an internally generated op.
714 * @ipr_cmd: ipr command struct
716 * This function is the op done function for an internally generated,
717 * blocking op. It simply wakes the sleeping thread.
719 * Return value:
720 * none
722 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
724 if (ipr_cmd->sibling)
725 ipr_cmd->sibling = NULL;
726 else
727 complete(&ipr_cmd->completion);
731 * ipr_send_blocking_cmd - Send command and sleep on its completion.
732 * @ipr_cmd: ipr command struct
733 * @timeout_func: function to invoke if command times out
734 * @timeout: timeout
736 * Return value:
737 * none
739 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
740 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
741 u32 timeout)
743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
745 init_completion(&ipr_cmd->completion);
746 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
748 spin_unlock_irq(ioa_cfg->host->host_lock);
749 wait_for_completion(&ipr_cmd->completion);
750 spin_lock_irq(ioa_cfg->host->host_lock);
754 * ipr_send_hcam - Send an HCAM to the adapter.
755 * @ioa_cfg: ioa config struct
756 * @type: HCAM type
757 * @hostrcb: hostrcb struct
759 * This function will send a Host Controlled Async command to the adapter.
760 * If HCAMs are currently not allowed to be issued to the adapter, it will
761 * place the hostrcb on the free queue.
763 * Return value:
764 * none
766 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
767 struct ipr_hostrcb *hostrcb)
769 struct ipr_cmnd *ipr_cmd;
770 struct ipr_ioarcb *ioarcb;
772 if (ioa_cfg->allow_cmds) {
773 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
774 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
775 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
777 ipr_cmd->u.hostrcb = hostrcb;
778 ioarcb = &ipr_cmd->ioarcb;
780 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
781 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
782 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
783 ioarcb->cmd_pkt.cdb[1] = type;
784 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
785 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
787 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
788 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
789 ipr_cmd->ioadl[0].flags_and_data_len =
790 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
791 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
793 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
794 ipr_cmd->done = ipr_process_ccn;
795 else
796 ipr_cmd->done = ipr_process_error;
798 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
800 mb();
801 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
802 ioa_cfg->regs.ioarrin_reg);
803 } else {
804 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
809 * ipr_init_res_entry - Initialize a resource entry struct.
810 * @res: resource entry struct
812 * Return value:
813 * none
815 static void ipr_init_res_entry(struct ipr_resource_entry *res)
817 res->needs_sync_complete = 0;
818 res->in_erp = 0;
819 res->add_to_ml = 0;
820 res->del_from_ml = 0;
821 res->resetting_device = 0;
822 res->sdev = NULL;
826 * ipr_handle_config_change - Handle a config change from the adapter
827 * @ioa_cfg: ioa config struct
828 * @hostrcb: hostrcb
830 * Return value:
831 * none
833 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
834 struct ipr_hostrcb *hostrcb)
836 struct ipr_resource_entry *res = NULL;
837 struct ipr_config_table_entry *cfgte;
838 u32 is_ndn = 1;
840 cfgte = &hostrcb->hcam.u.ccn.cfgte;
842 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
843 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
844 sizeof(cfgte->res_addr))) {
845 is_ndn = 0;
846 break;
850 if (is_ndn) {
851 if (list_empty(&ioa_cfg->free_res_q)) {
852 ipr_send_hcam(ioa_cfg,
853 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
854 hostrcb);
855 return;
858 res = list_entry(ioa_cfg->free_res_q.next,
859 struct ipr_resource_entry, queue);
861 list_del(&res->queue);
862 ipr_init_res_entry(res);
863 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
866 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
868 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
869 if (res->sdev) {
870 res->sdev->hostdata = NULL;
871 res->del_from_ml = 1;
872 if (ioa_cfg->allow_ml_add_del)
873 schedule_work(&ioa_cfg->work_q);
874 } else
875 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
876 } else if (!res->sdev) {
877 res->add_to_ml = 1;
878 if (ioa_cfg->allow_ml_add_del)
879 schedule_work(&ioa_cfg->work_q);
882 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
886 * ipr_process_ccn - Op done function for a CCN.
887 * @ipr_cmd: ipr command struct
889 * This function is the op done function for a configuration
890 * change notification host controlled async from the adapter.
892 * Return value:
893 * none
895 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
898 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
899 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
901 list_del(&hostrcb->queue);
902 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
904 if (ioasc) {
905 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
906 dev_err(&ioa_cfg->pdev->dev,
907 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
909 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
910 } else {
911 ipr_handle_config_change(ioa_cfg, hostrcb);
916 * ipr_log_vpd - Log the passed VPD to the error log.
917 * @vpd: vendor/product id/sn struct
919 * Return value:
920 * none
922 static void ipr_log_vpd(struct ipr_vpd *vpd)
924 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
925 + IPR_SERIAL_NUM_LEN];
927 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
928 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
929 IPR_PROD_ID_LEN);
930 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
931 ipr_err("Vendor/Product ID: %s\n", buffer);
933 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
934 buffer[IPR_SERIAL_NUM_LEN] = '\0';
935 ipr_err(" Serial Number: %s\n", buffer);
939 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
940 * @vpd: vendor/product id/sn/wwn struct
942 * Return value:
943 * none
945 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
947 ipr_log_vpd(&vpd->vpd);
948 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
949 be32_to_cpu(vpd->wwid[1]));
953 * ipr_log_enhanced_cache_error - Log a cache error.
954 * @ioa_cfg: ioa config struct
955 * @hostrcb: hostrcb struct
957 * Return value:
958 * none
960 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
961 struct ipr_hostrcb *hostrcb)
963 struct ipr_hostrcb_type_12_error *error =
964 &hostrcb->hcam.u.error.u.type_12_error;
966 ipr_err("-----Current Configuration-----\n");
967 ipr_err("Cache Directory Card Information:\n");
968 ipr_log_ext_vpd(&error->ioa_vpd);
969 ipr_err("Adapter Card Information:\n");
970 ipr_log_ext_vpd(&error->cfc_vpd);
972 ipr_err("-----Expected Configuration-----\n");
973 ipr_err("Cache Directory Card Information:\n");
974 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
975 ipr_err("Adapter Card Information:\n");
976 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
978 ipr_err("Additional IOA Data: %08X %08X %08X\n",
979 be32_to_cpu(error->ioa_data[0]),
980 be32_to_cpu(error->ioa_data[1]),
981 be32_to_cpu(error->ioa_data[2]));
985 * ipr_log_cache_error - Log a cache error.
986 * @ioa_cfg: ioa config struct
987 * @hostrcb: hostrcb struct
989 * Return value:
990 * none
992 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
993 struct ipr_hostrcb *hostrcb)
995 struct ipr_hostrcb_type_02_error *error =
996 &hostrcb->hcam.u.error.u.type_02_error;
998 ipr_err("-----Current Configuration-----\n");
999 ipr_err("Cache Directory Card Information:\n");
1000 ipr_log_vpd(&error->ioa_vpd);
1001 ipr_err("Adapter Card Information:\n");
1002 ipr_log_vpd(&error->cfc_vpd);
1004 ipr_err("-----Expected Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
1006 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1007 ipr_err("Adapter Card Information:\n");
1008 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1010 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1011 be32_to_cpu(error->ioa_data[0]),
1012 be32_to_cpu(error->ioa_data[1]),
1013 be32_to_cpu(error->ioa_data[2]));
1017 * ipr_log_enhanced_config_error - Log a configuration error.
1018 * @ioa_cfg: ioa config struct
1019 * @hostrcb: hostrcb struct
1021 * Return value:
1022 * none
1024 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1025 struct ipr_hostrcb *hostrcb)
1027 int errors_logged, i;
1028 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1029 struct ipr_hostrcb_type_13_error *error;
1031 error = &hostrcb->hcam.u.error.u.type_13_error;
1032 errors_logged = be32_to_cpu(error->errors_logged);
1034 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1035 be32_to_cpu(error->errors_detected), errors_logged);
1037 dev_entry = error->dev;
1039 for (i = 0; i < errors_logged; i++, dev_entry++) {
1040 ipr_err_separator;
1042 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1043 ipr_log_ext_vpd(&dev_entry->vpd);
1045 ipr_err("-----New Device Information-----\n");
1046 ipr_log_ext_vpd(&dev_entry->new_vpd);
1048 ipr_err("Cache Directory Card Information:\n");
1049 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1051 ipr_err("Adapter Card Information:\n");
1052 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1057 * ipr_log_config_error - Log a configuration error.
1058 * @ioa_cfg: ioa config struct
1059 * @hostrcb: hostrcb struct
1061 * Return value:
1062 * none
1064 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1065 struct ipr_hostrcb *hostrcb)
1067 int errors_logged, i;
1068 struct ipr_hostrcb_device_data_entry *dev_entry;
1069 struct ipr_hostrcb_type_03_error *error;
1071 error = &hostrcb->hcam.u.error.u.type_03_error;
1072 errors_logged = be32_to_cpu(error->errors_logged);
1074 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1075 be32_to_cpu(error->errors_detected), errors_logged);
1077 dev_entry = error->dev;
1079 for (i = 0; i < errors_logged; i++, dev_entry++) {
1080 ipr_err_separator;
1082 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1083 ipr_log_vpd(&dev_entry->vpd);
1085 ipr_err("-----New Device Information-----\n");
1086 ipr_log_vpd(&dev_entry->new_vpd);
1088 ipr_err("Cache Directory Card Information:\n");
1089 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1091 ipr_err("Adapter Card Information:\n");
1092 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1094 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1095 be32_to_cpu(dev_entry->ioa_data[0]),
1096 be32_to_cpu(dev_entry->ioa_data[1]),
1097 be32_to_cpu(dev_entry->ioa_data[2]),
1098 be32_to_cpu(dev_entry->ioa_data[3]),
1099 be32_to_cpu(dev_entry->ioa_data[4]));
1104 * ipr_log_enhanced_array_error - Log an array configuration error.
1105 * @ioa_cfg: ioa config struct
1106 * @hostrcb: hostrcb struct
1108 * Return value:
1109 * none
1111 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1112 struct ipr_hostrcb *hostrcb)
1114 int i, num_entries;
1115 struct ipr_hostrcb_type_14_error *error;
1116 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1117 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1119 error = &hostrcb->hcam.u.error.u.type_14_error;
1121 ipr_err_separator;
1123 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1124 error->protection_level,
1125 ioa_cfg->host->host_no,
1126 error->last_func_vset_res_addr.bus,
1127 error->last_func_vset_res_addr.target,
1128 error->last_func_vset_res_addr.lun);
1130 ipr_err_separator;
1132 array_entry = error->array_member;
1133 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1134 sizeof(error->array_member));
1136 for (i = 0; i < num_entries; i++, array_entry++) {
1137 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1138 continue;
1140 if (be32_to_cpu(error->exposed_mode_adn) == i)
1141 ipr_err("Exposed Array Member %d:\n", i);
1142 else
1143 ipr_err("Array Member %d:\n", i);
1145 ipr_log_ext_vpd(&array_entry->vpd);
1146 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1147 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1148 "Expected Location");
1150 ipr_err_separator;
1155 * ipr_log_array_error - Log an array configuration error.
1156 * @ioa_cfg: ioa config struct
1157 * @hostrcb: hostrcb struct
1159 * Return value:
1160 * none
1162 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1163 struct ipr_hostrcb *hostrcb)
1165 int i;
1166 struct ipr_hostrcb_type_04_error *error;
1167 struct ipr_hostrcb_array_data_entry *array_entry;
1168 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1170 error = &hostrcb->hcam.u.error.u.type_04_error;
1172 ipr_err_separator;
1174 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1175 error->protection_level,
1176 ioa_cfg->host->host_no,
1177 error->last_func_vset_res_addr.bus,
1178 error->last_func_vset_res_addr.target,
1179 error->last_func_vset_res_addr.lun);
1181 ipr_err_separator;
1183 array_entry = error->array_member;
1185 for (i = 0; i < 18; i++) {
1186 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1187 continue;
1189 if (be32_to_cpu(error->exposed_mode_adn) == i)
1190 ipr_err("Exposed Array Member %d:\n", i);
1191 else
1192 ipr_err("Array Member %d:\n", i);
1194 ipr_log_vpd(&array_entry->vpd);
1196 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1197 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1198 "Expected Location");
1200 ipr_err_separator;
1202 if (i == 9)
1203 array_entry = error->array_member2;
1204 else
1205 array_entry++;
1210 * ipr_log_hex_data - Log additional hex IOA error data.
1211 * @data: IOA error data
1212 * @len: data length
1214 * Return value:
1215 * none
1217 static void ipr_log_hex_data(u32 *data, int len)
1219 int i;
1221 if (len == 0)
1222 return;
1224 for (i = 0; i < len / 4; i += 4) {
1225 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1226 be32_to_cpu(data[i]),
1227 be32_to_cpu(data[i+1]),
1228 be32_to_cpu(data[i+2]),
1229 be32_to_cpu(data[i+3]));
1234 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1235 * @ioa_cfg: ioa config struct
1236 * @hostrcb: hostrcb struct
1238 * Return value:
1239 * none
1241 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1242 struct ipr_hostrcb *hostrcb)
1244 struct ipr_hostrcb_type_17_error *error;
1246 error = &hostrcb->hcam.u.error.u.type_17_error;
1247 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1249 ipr_err("%s\n", error->failure_reason);
1250 ipr_err("Remote Adapter VPD:\n");
1251 ipr_log_ext_vpd(&error->vpd);
1252 ipr_log_hex_data(error->data,
1253 be32_to_cpu(hostrcb->hcam.length) -
1254 (offsetof(struct ipr_hostrcb_error, u) +
1255 offsetof(struct ipr_hostrcb_type_17_error, data)));
1259 * ipr_log_dual_ioa_error - Log a dual adapter error.
1260 * @ioa_cfg: ioa config struct
1261 * @hostrcb: hostrcb struct
1263 * Return value:
1264 * none
1266 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1267 struct ipr_hostrcb *hostrcb)
1269 struct ipr_hostrcb_type_07_error *error;
1271 error = &hostrcb->hcam.u.error.u.type_07_error;
1272 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1274 ipr_err("%s\n", error->failure_reason);
1275 ipr_err("Remote Adapter VPD:\n");
1276 ipr_log_vpd(&error->vpd);
1277 ipr_log_hex_data(error->data,
1278 be32_to_cpu(hostrcb->hcam.length) -
1279 (offsetof(struct ipr_hostrcb_error, u) +
1280 offsetof(struct ipr_hostrcb_type_07_error, data)));
1284 * ipr_log_generic_error - Log an adapter error.
1285 * @ioa_cfg: ioa config struct
1286 * @hostrcb: hostrcb struct
1288 * Return value:
1289 * none
1291 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1292 struct ipr_hostrcb *hostrcb)
1294 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1295 be32_to_cpu(hostrcb->hcam.length));
1299 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1300 * @ioasc: IOASC
1302 * This function will return the index of into the ipr_error_table
1303 * for the specified IOASC. If the IOASC is not in the table,
1304 * 0 will be returned, which points to the entry used for unknown errors.
1306 * Return value:
1307 * index into the ipr_error_table
1309 static u32 ipr_get_error(u32 ioasc)
1311 int i;
1313 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1314 if (ipr_error_table[i].ioasc == ioasc)
1315 return i;
1317 return 0;
1321 * ipr_handle_log_data - Log an adapter error.
1322 * @ioa_cfg: ioa config struct
1323 * @hostrcb: hostrcb struct
1325 * This function logs an adapter error to the system.
1327 * Return value:
1328 * none
1330 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1331 struct ipr_hostrcb *hostrcb)
1333 u32 ioasc;
1334 int error_index;
1336 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1337 return;
1339 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1340 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1342 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1344 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1345 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1346 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1347 scsi_report_bus_reset(ioa_cfg->host,
1348 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1351 error_index = ipr_get_error(ioasc);
1353 if (!ipr_error_table[error_index].log_hcam)
1354 return;
1356 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1357 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1358 "%s\n", ipr_error_table[error_index].error);
1359 } else {
1360 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1361 ipr_error_table[error_index].error);
1364 /* Set indication we have logged an error */
1365 ioa_cfg->errors_logged++;
1367 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1368 return;
1369 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1370 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1372 switch (hostrcb->hcam.overlay_id) {
1373 case IPR_HOST_RCB_OVERLAY_ID_2:
1374 ipr_log_cache_error(ioa_cfg, hostrcb);
1375 break;
1376 case IPR_HOST_RCB_OVERLAY_ID_3:
1377 ipr_log_config_error(ioa_cfg, hostrcb);
1378 break;
1379 case IPR_HOST_RCB_OVERLAY_ID_4:
1380 case IPR_HOST_RCB_OVERLAY_ID_6:
1381 ipr_log_array_error(ioa_cfg, hostrcb);
1382 break;
1383 case IPR_HOST_RCB_OVERLAY_ID_7:
1384 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1385 break;
1386 case IPR_HOST_RCB_OVERLAY_ID_12:
1387 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1388 break;
1389 case IPR_HOST_RCB_OVERLAY_ID_13:
1390 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1391 break;
1392 case IPR_HOST_RCB_OVERLAY_ID_14:
1393 case IPR_HOST_RCB_OVERLAY_ID_16:
1394 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1395 break;
1396 case IPR_HOST_RCB_OVERLAY_ID_17:
1397 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1398 break;
1399 case IPR_HOST_RCB_OVERLAY_ID_1:
1400 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1401 default:
1402 ipr_log_generic_error(ioa_cfg, hostrcb);
1403 break;
1408 * ipr_process_error - Op done function for an adapter error log.
1409 * @ipr_cmd: ipr command struct
1411 * This function is the op done function for an error log host
1412 * controlled async from the adapter. It will log the error and
1413 * send the HCAM back to the adapter.
1415 * Return value:
1416 * none
1418 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1422 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1424 list_del(&hostrcb->queue);
1425 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1427 if (!ioasc) {
1428 ipr_handle_log_data(ioa_cfg, hostrcb);
1429 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1430 dev_err(&ioa_cfg->pdev->dev,
1431 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1434 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1438 * ipr_timeout - An internally generated op has timed out.
1439 * @ipr_cmd: ipr command struct
1441 * This function blocks host requests and initiates an
1442 * adapter reset.
1444 * Return value:
1445 * none
1447 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1449 unsigned long lock_flags = 0;
1450 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1452 ENTER;
1453 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1455 ioa_cfg->errors_logged++;
1456 dev_err(&ioa_cfg->pdev->dev,
1457 "Adapter being reset due to command timeout.\n");
1459 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1460 ioa_cfg->sdt_state = GET_DUMP;
1462 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1463 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1466 LEAVE;
1470 * ipr_oper_timeout - Adapter timed out transitioning to operational
1471 * @ipr_cmd: ipr command struct
1473 * This function blocks host requests and initiates an
1474 * adapter reset.
1476 * Return value:
1477 * none
1479 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1481 unsigned long lock_flags = 0;
1482 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1484 ENTER;
1485 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1487 ioa_cfg->errors_logged++;
1488 dev_err(&ioa_cfg->pdev->dev,
1489 "Adapter timed out transitioning to operational.\n");
1491 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1492 ioa_cfg->sdt_state = GET_DUMP;
1494 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1495 if (ipr_fastfail)
1496 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1497 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1501 LEAVE;
1505 * ipr_reset_reload - Reset/Reload the IOA
1506 * @ioa_cfg: ioa config struct
1507 * @shutdown_type: shutdown type
1509 * This function resets the adapter and re-initializes it.
1510 * This function assumes that all new host commands have been stopped.
1511 * Return value:
1512 * SUCCESS / FAILED
1514 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1515 enum ipr_shutdown_type shutdown_type)
1517 if (!ioa_cfg->in_reset_reload)
1518 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1520 spin_unlock_irq(ioa_cfg->host->host_lock);
1521 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1522 spin_lock_irq(ioa_cfg->host->host_lock);
1524 /* If we got hit with a host reset while we were already resetting
1525 the adapter for some reason, and the reset failed. */
1526 if (ioa_cfg->ioa_is_dead) {
1527 ipr_trace;
1528 return FAILED;
1531 return SUCCESS;
1535 * ipr_find_ses_entry - Find matching SES in SES table
1536 * @res: resource entry struct of SES
1538 * Return value:
1539 * pointer to SES table entry / NULL on failure
1541 static const struct ipr_ses_table_entry *
1542 ipr_find_ses_entry(struct ipr_resource_entry *res)
1544 int i, j, matches;
1545 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1547 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1548 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1549 if (ste->compare_product_id_byte[j] == 'X') {
1550 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1551 matches++;
1552 else
1553 break;
1554 } else
1555 matches++;
1558 if (matches == IPR_PROD_ID_LEN)
1559 return ste;
1562 return NULL;
1566 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1567 * @ioa_cfg: ioa config struct
1568 * @bus: SCSI bus
1569 * @bus_width: bus width
1571 * Return value:
1572 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1573 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1574 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1575 * max 160MHz = max 320MB/sec).
1577 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1579 struct ipr_resource_entry *res;
1580 const struct ipr_ses_table_entry *ste;
1581 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1583 /* Loop through each config table entry in the config table buffer */
1584 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1585 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1586 continue;
1588 if (bus != res->cfgte.res_addr.bus)
1589 continue;
1591 if (!(ste = ipr_find_ses_entry(res)))
1592 continue;
1594 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1597 return max_xfer_rate;
1601 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1602 * @ioa_cfg: ioa config struct
1603 * @max_delay: max delay in micro-seconds to wait
1605 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1607 * Return value:
1608 * 0 on success / other on failure
1610 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1612 volatile u32 pcii_reg;
1613 int delay = 1;
1615 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1616 while (delay < max_delay) {
1617 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1619 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1620 return 0;
1622 /* udelay cannot be used if delay is more than a few milliseconds */
1623 if ((delay / 1000) > MAX_UDELAY_MS)
1624 mdelay(delay / 1000);
1625 else
1626 udelay(delay);
1628 delay += delay;
1630 return -EIO;
1634 * ipr_get_ldump_data_section - Dump IOA memory
1635 * @ioa_cfg: ioa config struct
1636 * @start_addr: adapter address to dump
1637 * @dest: destination kernel buffer
1638 * @length_in_words: length to dump in 4 byte words
1640 * Return value:
1641 * 0 on success / -EIO on failure
1643 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1644 u32 start_addr,
1645 __be32 *dest, u32 length_in_words)
1647 volatile u32 temp_pcii_reg;
1648 int i, delay = 0;
1650 /* Write IOA interrupt reg starting LDUMP state */
1651 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1652 ioa_cfg->regs.set_uproc_interrupt_reg);
1654 /* Wait for IO debug acknowledge */
1655 if (ipr_wait_iodbg_ack(ioa_cfg,
1656 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1657 dev_err(&ioa_cfg->pdev->dev,
1658 "IOA dump long data transfer timeout\n");
1659 return -EIO;
1662 /* Signal LDUMP interlocked - clear IO debug ack */
1663 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1664 ioa_cfg->regs.clr_interrupt_reg);
1666 /* Write Mailbox with starting address */
1667 writel(start_addr, ioa_cfg->ioa_mailbox);
1669 /* Signal address valid - clear IOA Reset alert */
1670 writel(IPR_UPROCI_RESET_ALERT,
1671 ioa_cfg->regs.clr_uproc_interrupt_reg);
1673 for (i = 0; i < length_in_words; i++) {
1674 /* Wait for IO debug acknowledge */
1675 if (ipr_wait_iodbg_ack(ioa_cfg,
1676 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1677 dev_err(&ioa_cfg->pdev->dev,
1678 "IOA dump short data transfer timeout\n");
1679 return -EIO;
1682 /* Read data from mailbox and increment destination pointer */
1683 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1684 dest++;
1686 /* For all but the last word of data, signal data received */
1687 if (i < (length_in_words - 1)) {
1688 /* Signal dump data received - Clear IO debug Ack */
1689 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1690 ioa_cfg->regs.clr_interrupt_reg);
1694 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1695 writel(IPR_UPROCI_RESET_ALERT,
1696 ioa_cfg->regs.set_uproc_interrupt_reg);
1698 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1699 ioa_cfg->regs.clr_uproc_interrupt_reg);
1701 /* Signal dump data received - Clear IO debug Ack */
1702 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1703 ioa_cfg->regs.clr_interrupt_reg);
1705 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1706 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1707 temp_pcii_reg =
1708 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1710 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1711 return 0;
1713 udelay(10);
1714 delay += 10;
1717 return 0;
1720 #ifdef CONFIG_SCSI_IPR_DUMP
1722 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1723 * @ioa_cfg: ioa config struct
1724 * @pci_address: adapter address
1725 * @length: length of data to copy
1727 * Copy data from PCI adapter to kernel buffer.
1728 * Note: length MUST be a 4 byte multiple
1729 * Return value:
1730 * 0 on success / other on failure
1732 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1733 unsigned long pci_address, u32 length)
1735 int bytes_copied = 0;
1736 int cur_len, rc, rem_len, rem_page_len;
1737 __be32 *page;
1738 unsigned long lock_flags = 0;
1739 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1741 while (bytes_copied < length &&
1742 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1743 if (ioa_dump->page_offset >= PAGE_SIZE ||
1744 ioa_dump->page_offset == 0) {
1745 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1747 if (!page) {
1748 ipr_trace;
1749 return bytes_copied;
1752 ioa_dump->page_offset = 0;
1753 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1754 ioa_dump->next_page_index++;
1755 } else
1756 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1758 rem_len = length - bytes_copied;
1759 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1760 cur_len = min(rem_len, rem_page_len);
1762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1763 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1764 rc = -EIO;
1765 } else {
1766 rc = ipr_get_ldump_data_section(ioa_cfg,
1767 pci_address + bytes_copied,
1768 &page[ioa_dump->page_offset / 4],
1769 (cur_len / sizeof(u32)));
1771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1773 if (!rc) {
1774 ioa_dump->page_offset += cur_len;
1775 bytes_copied += cur_len;
1776 } else {
1777 ipr_trace;
1778 break;
1780 schedule();
1783 return bytes_copied;
1787 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1788 * @hdr: dump entry header struct
1790 * Return value:
1791 * nothing
1793 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1795 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1796 hdr->num_elems = 1;
1797 hdr->offset = sizeof(*hdr);
1798 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1802 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1803 * @ioa_cfg: ioa config struct
1804 * @driver_dump: driver dump struct
1806 * Return value:
1807 * nothing
1809 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1810 struct ipr_driver_dump *driver_dump)
1812 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1814 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1815 driver_dump->ioa_type_entry.hdr.len =
1816 sizeof(struct ipr_dump_ioa_type_entry) -
1817 sizeof(struct ipr_dump_entry_header);
1818 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1819 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1820 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1821 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1822 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1823 ucode_vpd->minor_release[1];
1824 driver_dump->hdr.num_entries++;
1828 * ipr_dump_version_data - Fill in the driver version in the dump.
1829 * @ioa_cfg: ioa config struct
1830 * @driver_dump: driver dump struct
1832 * Return value:
1833 * nothing
1835 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1836 struct ipr_driver_dump *driver_dump)
1838 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1839 driver_dump->version_entry.hdr.len =
1840 sizeof(struct ipr_dump_version_entry) -
1841 sizeof(struct ipr_dump_entry_header);
1842 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1843 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1844 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1845 driver_dump->hdr.num_entries++;
1849 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1850 * @ioa_cfg: ioa config struct
1851 * @driver_dump: driver dump struct
1853 * Return value:
1854 * nothing
1856 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1857 struct ipr_driver_dump *driver_dump)
1859 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1860 driver_dump->trace_entry.hdr.len =
1861 sizeof(struct ipr_dump_trace_entry) -
1862 sizeof(struct ipr_dump_entry_header);
1863 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1864 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1865 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1866 driver_dump->hdr.num_entries++;
1870 * ipr_dump_location_data - Fill in the IOA location in the dump.
1871 * @ioa_cfg: ioa config struct
1872 * @driver_dump: driver dump struct
1874 * Return value:
1875 * nothing
1877 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1878 struct ipr_driver_dump *driver_dump)
1880 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1881 driver_dump->location_entry.hdr.len =
1882 sizeof(struct ipr_dump_location_entry) -
1883 sizeof(struct ipr_dump_entry_header);
1884 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1885 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1886 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1887 driver_dump->hdr.num_entries++;
1891 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1892 * @ioa_cfg: ioa config struct
1893 * @dump: dump struct
1895 * Return value:
1896 * nothing
1898 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1900 unsigned long start_addr, sdt_word;
1901 unsigned long lock_flags = 0;
1902 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1903 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1904 u32 num_entries, start_off, end_off;
1905 u32 bytes_to_copy, bytes_copied, rc;
1906 struct ipr_sdt *sdt;
1907 int i;
1909 ENTER;
1911 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1913 if (ioa_cfg->sdt_state != GET_DUMP) {
1914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1915 return;
1918 start_addr = readl(ioa_cfg->ioa_mailbox);
1920 if (!ipr_sdt_is_fmt2(start_addr)) {
1921 dev_err(&ioa_cfg->pdev->dev,
1922 "Invalid dump table format: %lx\n", start_addr);
1923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1924 return;
1927 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1929 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1931 /* Initialize the overall dump header */
1932 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1933 driver_dump->hdr.num_entries = 1;
1934 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1935 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1936 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1937 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1939 ipr_dump_version_data(ioa_cfg, driver_dump);
1940 ipr_dump_location_data(ioa_cfg, driver_dump);
1941 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1942 ipr_dump_trace_data(ioa_cfg, driver_dump);
1944 /* Update dump_header */
1945 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1947 /* IOA Dump entry */
1948 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1949 ioa_dump->format = IPR_SDT_FMT2;
1950 ioa_dump->hdr.len = 0;
1951 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1952 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1954 /* First entries in sdt are actually a list of dump addresses and
1955 lengths to gather the real dump data. sdt represents the pointer
1956 to the ioa generated dump table. Dump data will be extracted based
1957 on entries in this table */
1958 sdt = &ioa_dump->sdt;
1960 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1961 sizeof(struct ipr_sdt) / sizeof(__be32));
1963 /* Smart Dump table is ready to use and the first entry is valid */
1964 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1965 dev_err(&ioa_cfg->pdev->dev,
1966 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1967 rc, be32_to_cpu(sdt->hdr.state));
1968 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1969 ioa_cfg->sdt_state = DUMP_OBTAINED;
1970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1971 return;
1974 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1976 if (num_entries > IPR_NUM_SDT_ENTRIES)
1977 num_entries = IPR_NUM_SDT_ENTRIES;
1979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1981 for (i = 0; i < num_entries; i++) {
1982 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1983 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1984 break;
1987 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1988 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1989 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1990 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1992 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1993 bytes_to_copy = end_off - start_off;
1994 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1995 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1996 continue;
1999 /* Copy data from adapter to driver buffers */
2000 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2001 bytes_to_copy);
2003 ioa_dump->hdr.len += bytes_copied;
2005 if (bytes_copied != bytes_to_copy) {
2006 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2007 break;
2013 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2015 /* Update dump_header */
2016 driver_dump->hdr.len += ioa_dump->hdr.len;
2017 wmb();
2018 ioa_cfg->sdt_state = DUMP_OBTAINED;
2019 LEAVE;
2022 #else
2023 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2024 #endif
2027 * ipr_release_dump - Free adapter dump memory
2028 * @kref: kref struct
2030 * Return value:
2031 * nothing
2033 static void ipr_release_dump(struct kref *kref)
2035 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2036 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2037 unsigned long lock_flags = 0;
2038 int i;
2040 ENTER;
2041 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2042 ioa_cfg->dump = NULL;
2043 ioa_cfg->sdt_state = INACTIVE;
2044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2046 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2047 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2049 kfree(dump);
2050 LEAVE;
2054 * ipr_worker_thread - Worker thread
2055 * @data: ioa config struct
2057 * Called at task level from a work thread. This function takes care
2058 * of adding and removing device from the mid-layer as configuration
2059 * changes are detected by the adapter.
2061 * Return value:
2062 * nothing
2064 static void ipr_worker_thread(void *data)
2066 unsigned long lock_flags;
2067 struct ipr_resource_entry *res;
2068 struct scsi_device *sdev;
2069 struct ipr_dump *dump;
2070 struct ipr_ioa_cfg *ioa_cfg = data;
2071 u8 bus, target, lun;
2072 int did_work;
2074 ENTER;
2075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2077 if (ioa_cfg->sdt_state == GET_DUMP) {
2078 dump = ioa_cfg->dump;
2079 if (!dump) {
2080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2081 return;
2083 kref_get(&dump->kref);
2084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2085 ipr_get_ioa_dump(ioa_cfg, dump);
2086 kref_put(&dump->kref, ipr_release_dump);
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2090 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2092 return;
2095 restart:
2096 do {
2097 did_work = 0;
2098 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2100 return;
2103 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2104 if (res->del_from_ml && res->sdev) {
2105 did_work = 1;
2106 sdev = res->sdev;
2107 if (!scsi_device_get(sdev)) {
2108 res->sdev = NULL;
2109 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2111 scsi_remove_device(sdev);
2112 scsi_device_put(sdev);
2113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2115 break;
2118 } while(did_work);
2120 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2121 if (res->add_to_ml) {
2122 bus = res->cfgte.res_addr.bus;
2123 target = res->cfgte.res_addr.target;
2124 lun = res->cfgte.res_addr.lun;
2125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2126 scsi_add_device(ioa_cfg->host, bus, target, lun);
2127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2128 goto restart;
2132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2133 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
2134 LEAVE;
2137 #ifdef CONFIG_SCSI_IPR_TRACE
2139 * ipr_read_trace - Dump the adapter trace
2140 * @kobj: kobject struct
2141 * @buf: buffer
2142 * @off: offset
2143 * @count: buffer size
2145 * Return value:
2146 * number of bytes printed to buffer
2148 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2149 loff_t off, size_t count)
2151 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2152 struct Scsi_Host *shost = class_to_shost(cdev);
2153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2154 unsigned long lock_flags = 0;
2155 int size = IPR_TRACE_SIZE;
2156 char *src = (char *)ioa_cfg->trace;
2158 if (off > size)
2159 return 0;
2160 if (off + count > size) {
2161 size -= off;
2162 count = size;
2165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2166 memcpy(buf, &src[off], count);
2167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2168 return count;
2171 static struct bin_attribute ipr_trace_attr = {
2172 .attr = {
2173 .name = "trace",
2174 .mode = S_IRUGO,
2176 .size = 0,
2177 .read = ipr_read_trace,
2179 #endif
2181 static const struct {
2182 enum ipr_cache_state state;
2183 char *name;
2184 } cache_state [] = {
2185 { CACHE_NONE, "none" },
2186 { CACHE_DISABLED, "disabled" },
2187 { CACHE_ENABLED, "enabled" }
2191 * ipr_show_write_caching - Show the write caching attribute
2192 * @class_dev: class device struct
2193 * @buf: buffer
2195 * Return value:
2196 * number of bytes printed to buffer
2198 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2200 struct Scsi_Host *shost = class_to_shost(class_dev);
2201 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2202 unsigned long lock_flags = 0;
2203 int i, len = 0;
2205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2206 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2207 if (cache_state[i].state == ioa_cfg->cache_state) {
2208 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2209 break;
2212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2213 return len;
2218 * ipr_store_write_caching - Enable/disable adapter write cache
2219 * @class_dev: class_device struct
2220 * @buf: buffer
2221 * @count: buffer size
2223 * This function will enable/disable adapter write cache.
2225 * Return value:
2226 * count on success / other on failure
2228 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2229 const char *buf, size_t count)
2231 struct Scsi_Host *shost = class_to_shost(class_dev);
2232 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2233 unsigned long lock_flags = 0;
2234 enum ipr_cache_state new_state = CACHE_INVALID;
2235 int i;
2237 if (!capable(CAP_SYS_ADMIN))
2238 return -EACCES;
2239 if (ioa_cfg->cache_state == CACHE_NONE)
2240 return -EINVAL;
2242 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2243 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2244 new_state = cache_state[i].state;
2245 break;
2249 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2250 return -EINVAL;
2252 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2253 if (ioa_cfg->cache_state == new_state) {
2254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2255 return count;
2258 ioa_cfg->cache_state = new_state;
2259 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2260 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2261 if (!ioa_cfg->in_reset_reload)
2262 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2264 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2266 return count;
2269 static struct class_device_attribute ipr_ioa_cache_attr = {
2270 .attr = {
2271 .name = "write_cache",
2272 .mode = S_IRUGO | S_IWUSR,
2274 .show = ipr_show_write_caching,
2275 .store = ipr_store_write_caching
2279 * ipr_show_fw_version - Show the firmware version
2280 * @class_dev: class device struct
2281 * @buf: buffer
2283 * Return value:
2284 * number of bytes printed to buffer
2286 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2288 struct Scsi_Host *shost = class_to_shost(class_dev);
2289 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2290 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2291 unsigned long lock_flags = 0;
2292 int len;
2294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2295 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2296 ucode_vpd->major_release, ucode_vpd->card_type,
2297 ucode_vpd->minor_release[0],
2298 ucode_vpd->minor_release[1]);
2299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2300 return len;
2303 static struct class_device_attribute ipr_fw_version_attr = {
2304 .attr = {
2305 .name = "fw_version",
2306 .mode = S_IRUGO,
2308 .show = ipr_show_fw_version,
2312 * ipr_show_log_level - Show the adapter's error logging level
2313 * @class_dev: class device struct
2314 * @buf: buffer
2316 * Return value:
2317 * number of bytes printed to buffer
2319 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2321 struct Scsi_Host *shost = class_to_shost(class_dev);
2322 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2323 unsigned long lock_flags = 0;
2324 int len;
2326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2327 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2329 return len;
2333 * ipr_store_log_level - Change the adapter's error logging level
2334 * @class_dev: class device struct
2335 * @buf: buffer
2337 * Return value:
2338 * number of bytes printed to buffer
2340 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2341 const char *buf, size_t count)
2343 struct Scsi_Host *shost = class_to_shost(class_dev);
2344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2345 unsigned long lock_flags = 0;
2347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2348 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2350 return strlen(buf);
2353 static struct class_device_attribute ipr_log_level_attr = {
2354 .attr = {
2355 .name = "log_level",
2356 .mode = S_IRUGO | S_IWUSR,
2358 .show = ipr_show_log_level,
2359 .store = ipr_store_log_level
2363 * ipr_store_diagnostics - IOA Diagnostics interface
2364 * @class_dev: class_device struct
2365 * @buf: buffer
2366 * @count: buffer size
2368 * This function will reset the adapter and wait a reasonable
2369 * amount of time for any errors that the adapter might log.
2371 * Return value:
2372 * count on success / other on failure
2374 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2375 const char *buf, size_t count)
2377 struct Scsi_Host *shost = class_to_shost(class_dev);
2378 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2379 unsigned long lock_flags = 0;
2380 int rc = count;
2382 if (!capable(CAP_SYS_ADMIN))
2383 return -EACCES;
2385 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2387 ioa_cfg->errors_logged = 0;
2388 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2390 if (ioa_cfg->in_reset_reload) {
2391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2392 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2394 /* Wait for a second for any errors to be logged */
2395 msleep(1000);
2396 } else {
2397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2398 return -EIO;
2401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2402 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2403 rc = -EIO;
2404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2406 return rc;
2409 static struct class_device_attribute ipr_diagnostics_attr = {
2410 .attr = {
2411 .name = "run_diagnostics",
2412 .mode = S_IWUSR,
2414 .store = ipr_store_diagnostics
2418 * ipr_show_adapter_state - Show the adapter's state
2419 * @class_dev: class device struct
2420 * @buf: buffer
2422 * Return value:
2423 * number of bytes printed to buffer
2425 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2427 struct Scsi_Host *shost = class_to_shost(class_dev);
2428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2429 unsigned long lock_flags = 0;
2430 int len;
2432 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2433 if (ioa_cfg->ioa_is_dead)
2434 len = snprintf(buf, PAGE_SIZE, "offline\n");
2435 else
2436 len = snprintf(buf, PAGE_SIZE, "online\n");
2437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2438 return len;
2442 * ipr_store_adapter_state - Change adapter state
2443 * @class_dev: class_device struct
2444 * @buf: buffer
2445 * @count: buffer size
2447 * This function will change the adapter's state.
2449 * Return value:
2450 * count on success / other on failure
2452 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2453 const char *buf, size_t count)
2455 struct Scsi_Host *shost = class_to_shost(class_dev);
2456 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2457 unsigned long lock_flags;
2458 int result = count;
2460 if (!capable(CAP_SYS_ADMIN))
2461 return -EACCES;
2463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2464 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2465 ioa_cfg->ioa_is_dead = 0;
2466 ioa_cfg->reset_retries = 0;
2467 ioa_cfg->in_ioa_bringdown = 0;
2468 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2470 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2473 return result;
2476 static struct class_device_attribute ipr_ioa_state_attr = {
2477 .attr = {
2478 .name = "state",
2479 .mode = S_IRUGO | S_IWUSR,
2481 .show = ipr_show_adapter_state,
2482 .store = ipr_store_adapter_state
2486 * ipr_store_reset_adapter - Reset the adapter
2487 * @class_dev: class_device struct
2488 * @buf: buffer
2489 * @count: buffer size
2491 * This function will reset the adapter.
2493 * Return value:
2494 * count on success / other on failure
2496 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2497 const char *buf, size_t count)
2499 struct Scsi_Host *shost = class_to_shost(class_dev);
2500 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2501 unsigned long lock_flags;
2502 int result = count;
2504 if (!capable(CAP_SYS_ADMIN))
2505 return -EACCES;
2507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2508 if (!ioa_cfg->in_reset_reload)
2509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2513 return result;
2516 static struct class_device_attribute ipr_ioa_reset_attr = {
2517 .attr = {
2518 .name = "reset_host",
2519 .mode = S_IWUSR,
2521 .store = ipr_store_reset_adapter
2525 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2526 * @buf_len: buffer length
2528 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2529 * list to use for microcode download
2531 * Return value:
2532 * pointer to sglist / NULL on failure
2534 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2536 int sg_size, order, bsize_elem, num_elem, i, j;
2537 struct ipr_sglist *sglist;
2538 struct scatterlist *scatterlist;
2539 struct page *page;
2541 /* Get the minimum size per scatter/gather element */
2542 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2544 /* Get the actual size per element */
2545 order = get_order(sg_size);
2547 /* Determine the actual number of bytes per element */
2548 bsize_elem = PAGE_SIZE * (1 << order);
2550 /* Determine the actual number of sg entries needed */
2551 if (buf_len % bsize_elem)
2552 num_elem = (buf_len / bsize_elem) + 1;
2553 else
2554 num_elem = buf_len / bsize_elem;
2556 /* Allocate a scatter/gather list for the DMA */
2557 sglist = kzalloc(sizeof(struct ipr_sglist) +
2558 (sizeof(struct scatterlist) * (num_elem - 1)),
2559 GFP_KERNEL);
2561 if (sglist == NULL) {
2562 ipr_trace;
2563 return NULL;
2566 scatterlist = sglist->scatterlist;
2568 sglist->order = order;
2569 sglist->num_sg = num_elem;
2571 /* Allocate a bunch of sg elements */
2572 for (i = 0; i < num_elem; i++) {
2573 page = alloc_pages(GFP_KERNEL, order);
2574 if (!page) {
2575 ipr_trace;
2577 /* Free up what we already allocated */
2578 for (j = i - 1; j >= 0; j--)
2579 __free_pages(scatterlist[j].page, order);
2580 kfree(sglist);
2581 return NULL;
2584 scatterlist[i].page = page;
2587 return sglist;
2591 * ipr_free_ucode_buffer - Frees a microcode download buffer
2592 * @p_dnld: scatter/gather list pointer
2594 * Free a DMA'able ucode download buffer previously allocated with
2595 * ipr_alloc_ucode_buffer
2597 * Return value:
2598 * nothing
2600 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2602 int i;
2604 for (i = 0; i < sglist->num_sg; i++)
2605 __free_pages(sglist->scatterlist[i].page, sglist->order);
2607 kfree(sglist);
2611 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2612 * @sglist: scatter/gather list pointer
2613 * @buffer: buffer pointer
2614 * @len: buffer length
2616 * Copy a microcode image from a user buffer into a buffer allocated by
2617 * ipr_alloc_ucode_buffer
2619 * Return value:
2620 * 0 on success / other on failure
2622 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2623 u8 *buffer, u32 len)
2625 int bsize_elem, i, result = 0;
2626 struct scatterlist *scatterlist;
2627 void *kaddr;
2629 /* Determine the actual number of bytes per element */
2630 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2632 scatterlist = sglist->scatterlist;
2634 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2635 kaddr = kmap(scatterlist[i].page);
2636 memcpy(kaddr, buffer, bsize_elem);
2637 kunmap(scatterlist[i].page);
2639 scatterlist[i].length = bsize_elem;
2641 if (result != 0) {
2642 ipr_trace;
2643 return result;
2647 if (len % bsize_elem) {
2648 kaddr = kmap(scatterlist[i].page);
2649 memcpy(kaddr, buffer, len % bsize_elem);
2650 kunmap(scatterlist[i].page);
2652 scatterlist[i].length = len % bsize_elem;
2655 sglist->buffer_len = len;
2656 return result;
2660 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2661 * @ipr_cmd: ipr command struct
2662 * @sglist: scatter/gather list
2664 * Builds a microcode download IOA data list (IOADL).
2667 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2668 struct ipr_sglist *sglist)
2670 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2671 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2672 struct scatterlist *scatterlist = sglist->scatterlist;
2673 int i;
2675 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2676 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2677 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2678 ioarcb->write_ioadl_len =
2679 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2681 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2682 ioadl[i].flags_and_data_len =
2683 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2684 ioadl[i].address =
2685 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2688 ioadl[i-1].flags_and_data_len |=
2689 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2693 * ipr_update_ioa_ucode - Update IOA's microcode
2694 * @ioa_cfg: ioa config struct
2695 * @sglist: scatter/gather list
2697 * Initiate an adapter reset to update the IOA's microcode
2699 * Return value:
2700 * 0 on success / -EIO on failure
2702 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2703 struct ipr_sglist *sglist)
2705 unsigned long lock_flags;
2707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709 if (ioa_cfg->ucode_sglist) {
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711 dev_err(&ioa_cfg->pdev->dev,
2712 "Microcode download already in progress\n");
2713 return -EIO;
2716 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2717 sglist->num_sg, DMA_TO_DEVICE);
2719 if (!sglist->num_dma_sg) {
2720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 dev_err(&ioa_cfg->pdev->dev,
2722 "Failed to map microcode download buffer!\n");
2723 return -EIO;
2726 ioa_cfg->ucode_sglist = sglist;
2727 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2729 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2731 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2732 ioa_cfg->ucode_sglist = NULL;
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2734 return 0;
2738 * ipr_store_update_fw - Update the firmware on the adapter
2739 * @class_dev: class_device struct
2740 * @buf: buffer
2741 * @count: buffer size
2743 * This function will update the firmware on the adapter.
2745 * Return value:
2746 * count on success / other on failure
2748 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2749 const char *buf, size_t count)
2751 struct Scsi_Host *shost = class_to_shost(class_dev);
2752 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2753 struct ipr_ucode_image_header *image_hdr;
2754 const struct firmware *fw_entry;
2755 struct ipr_sglist *sglist;
2756 char fname[100];
2757 char *src;
2758 int len, result, dnld_size;
2760 if (!capable(CAP_SYS_ADMIN))
2761 return -EACCES;
2763 len = snprintf(fname, 99, "%s", buf);
2764 fname[len-1] = '\0';
2766 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2767 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2768 return -EIO;
2771 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2773 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2774 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2775 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2776 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2777 release_firmware(fw_entry);
2778 return -EINVAL;
2781 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2782 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2783 sglist = ipr_alloc_ucode_buffer(dnld_size);
2785 if (!sglist) {
2786 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2787 release_firmware(fw_entry);
2788 return -ENOMEM;
2791 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2793 if (result) {
2794 dev_err(&ioa_cfg->pdev->dev,
2795 "Microcode buffer copy to DMA buffer failed\n");
2796 goto out;
2799 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2801 if (!result)
2802 result = count;
2803 out:
2804 ipr_free_ucode_buffer(sglist);
2805 release_firmware(fw_entry);
2806 return result;
2809 static struct class_device_attribute ipr_update_fw_attr = {
2810 .attr = {
2811 .name = "update_fw",
2812 .mode = S_IWUSR,
2814 .store = ipr_store_update_fw
2817 static struct class_device_attribute *ipr_ioa_attrs[] = {
2818 &ipr_fw_version_attr,
2819 &ipr_log_level_attr,
2820 &ipr_diagnostics_attr,
2821 &ipr_ioa_state_attr,
2822 &ipr_ioa_reset_attr,
2823 &ipr_update_fw_attr,
2824 &ipr_ioa_cache_attr,
2825 NULL,
2828 #ifdef CONFIG_SCSI_IPR_DUMP
2830 * ipr_read_dump - Dump the adapter
2831 * @kobj: kobject struct
2832 * @buf: buffer
2833 * @off: offset
2834 * @count: buffer size
2836 * Return value:
2837 * number of bytes printed to buffer
2839 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2840 loff_t off, size_t count)
2842 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2843 struct Scsi_Host *shost = class_to_shost(cdev);
2844 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2845 struct ipr_dump *dump;
2846 unsigned long lock_flags = 0;
2847 char *src;
2848 int len;
2849 size_t rc = count;
2851 if (!capable(CAP_SYS_ADMIN))
2852 return -EACCES;
2854 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2855 dump = ioa_cfg->dump;
2857 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2859 return 0;
2861 kref_get(&dump->kref);
2862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2864 if (off > dump->driver_dump.hdr.len) {
2865 kref_put(&dump->kref, ipr_release_dump);
2866 return 0;
2869 if (off + count > dump->driver_dump.hdr.len) {
2870 count = dump->driver_dump.hdr.len - off;
2871 rc = count;
2874 if (count && off < sizeof(dump->driver_dump)) {
2875 if (off + count > sizeof(dump->driver_dump))
2876 len = sizeof(dump->driver_dump) - off;
2877 else
2878 len = count;
2879 src = (u8 *)&dump->driver_dump + off;
2880 memcpy(buf, src, len);
2881 buf += len;
2882 off += len;
2883 count -= len;
2886 off -= sizeof(dump->driver_dump);
2888 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2889 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2890 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2891 else
2892 len = count;
2893 src = (u8 *)&dump->ioa_dump + off;
2894 memcpy(buf, src, len);
2895 buf += len;
2896 off += len;
2897 count -= len;
2900 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2902 while (count) {
2903 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2904 len = PAGE_ALIGN(off) - off;
2905 else
2906 len = count;
2907 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2908 src += off & ~PAGE_MASK;
2909 memcpy(buf, src, len);
2910 buf += len;
2911 off += len;
2912 count -= len;
2915 kref_put(&dump->kref, ipr_release_dump);
2916 return rc;
2920 * ipr_alloc_dump - Prepare for adapter dump
2921 * @ioa_cfg: ioa config struct
2923 * Return value:
2924 * 0 on success / other on failure
2926 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2928 struct ipr_dump *dump;
2929 unsigned long lock_flags = 0;
2931 ENTER;
2932 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2934 if (!dump) {
2935 ipr_err("Dump memory allocation failed\n");
2936 return -ENOMEM;
2939 kref_init(&dump->kref);
2940 dump->ioa_cfg = ioa_cfg;
2942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2944 if (INACTIVE != ioa_cfg->sdt_state) {
2945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2946 kfree(dump);
2947 return 0;
2950 ioa_cfg->dump = dump;
2951 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2952 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2953 ioa_cfg->dump_taken = 1;
2954 schedule_work(&ioa_cfg->work_q);
2956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2958 LEAVE;
2959 return 0;
2963 * ipr_free_dump - Free adapter dump memory
2964 * @ioa_cfg: ioa config struct
2966 * Return value:
2967 * 0 on success / other on failure
2969 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2971 struct ipr_dump *dump;
2972 unsigned long lock_flags = 0;
2974 ENTER;
2976 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2977 dump = ioa_cfg->dump;
2978 if (!dump) {
2979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2980 return 0;
2983 ioa_cfg->dump = NULL;
2984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2986 kref_put(&dump->kref, ipr_release_dump);
2988 LEAVE;
2989 return 0;
2993 * ipr_write_dump - Setup dump state of adapter
2994 * @kobj: kobject struct
2995 * @buf: buffer
2996 * @off: offset
2997 * @count: buffer size
2999 * Return value:
3000 * number of bytes printed to buffer
3002 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3003 loff_t off, size_t count)
3005 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3006 struct Scsi_Host *shost = class_to_shost(cdev);
3007 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3008 int rc;
3010 if (!capable(CAP_SYS_ADMIN))
3011 return -EACCES;
3013 if (buf[0] == '1')
3014 rc = ipr_alloc_dump(ioa_cfg);
3015 else if (buf[0] == '0')
3016 rc = ipr_free_dump(ioa_cfg);
3017 else
3018 return -EINVAL;
3020 if (rc)
3021 return rc;
3022 else
3023 return count;
3026 static struct bin_attribute ipr_dump_attr = {
3027 .attr = {
3028 .name = "dump",
3029 .mode = S_IRUSR | S_IWUSR,
3031 .size = 0,
3032 .read = ipr_read_dump,
3033 .write = ipr_write_dump
3035 #else
3036 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3037 #endif
3040 * ipr_change_queue_depth - Change the device's queue depth
3041 * @sdev: scsi device struct
3042 * @qdepth: depth to set
3044 * Return value:
3045 * actual depth set
3047 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3049 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3050 return sdev->queue_depth;
3054 * ipr_change_queue_type - Change the device's queue type
3055 * @dsev: scsi device struct
3056 * @tag_type: type of tags to use
3058 * Return value:
3059 * actual queue type set
3061 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3063 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3064 struct ipr_resource_entry *res;
3065 unsigned long lock_flags = 0;
3067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068 res = (struct ipr_resource_entry *)sdev->hostdata;
3070 if (res) {
3071 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3073 * We don't bother quiescing the device here since the
3074 * adapter firmware does it for us.
3076 scsi_set_tag_type(sdev, tag_type);
3078 if (tag_type)
3079 scsi_activate_tcq(sdev, sdev->queue_depth);
3080 else
3081 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3082 } else
3083 tag_type = 0;
3084 } else
3085 tag_type = 0;
3087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3088 return tag_type;
3092 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3093 * @dev: device struct
3094 * @buf: buffer
3096 * Return value:
3097 * number of bytes printed to buffer
3099 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3101 struct scsi_device *sdev = to_scsi_device(dev);
3102 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3103 struct ipr_resource_entry *res;
3104 unsigned long lock_flags = 0;
3105 ssize_t len = -ENXIO;
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 res = (struct ipr_resource_entry *)sdev->hostdata;
3109 if (res)
3110 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3112 return len;
3115 static struct device_attribute ipr_adapter_handle_attr = {
3116 .attr = {
3117 .name = "adapter_handle",
3118 .mode = S_IRUSR,
3120 .show = ipr_show_adapter_handle
3123 static struct device_attribute *ipr_dev_attrs[] = {
3124 &ipr_adapter_handle_attr,
3125 NULL,
3129 * ipr_biosparam - Return the HSC mapping
3130 * @sdev: scsi device struct
3131 * @block_device: block device pointer
3132 * @capacity: capacity of the device
3133 * @parm: Array containing returned HSC values.
3135 * This function generates the HSC parms that fdisk uses.
3136 * We want to make sure we return something that places partitions
3137 * on 4k boundaries for best performance with the IOA.
3139 * Return value:
3140 * 0 on success
3142 static int ipr_biosparam(struct scsi_device *sdev,
3143 struct block_device *block_device,
3144 sector_t capacity, int *parm)
3146 int heads, sectors;
3147 sector_t cylinders;
3149 heads = 128;
3150 sectors = 32;
3152 cylinders = capacity;
3153 sector_div(cylinders, (128 * 32));
3155 /* return result */
3156 parm[0] = heads;
3157 parm[1] = sectors;
3158 parm[2] = cylinders;
3160 return 0;
3164 * ipr_slave_destroy - Unconfigure a SCSI device
3165 * @sdev: scsi device struct
3167 * Return value:
3168 * nothing
3170 static void ipr_slave_destroy(struct scsi_device *sdev)
3172 struct ipr_resource_entry *res;
3173 struct ipr_ioa_cfg *ioa_cfg;
3174 unsigned long lock_flags = 0;
3176 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 res = (struct ipr_resource_entry *) sdev->hostdata;
3180 if (res) {
3181 sdev->hostdata = NULL;
3182 res->sdev = NULL;
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3188 * ipr_slave_configure - Configure a SCSI device
3189 * @sdev: scsi device struct
3191 * This function configures the specified scsi device.
3193 * Return value:
3194 * 0 on success
3196 static int ipr_slave_configure(struct scsi_device *sdev)
3198 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3199 struct ipr_resource_entry *res;
3200 unsigned long lock_flags = 0;
3202 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3203 res = sdev->hostdata;
3204 if (res) {
3205 if (ipr_is_af_dasd_device(res))
3206 sdev->type = TYPE_RAID;
3207 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3208 sdev->scsi_level = 4;
3209 sdev->no_uld_attach = 1;
3211 if (ipr_is_vset_device(res)) {
3212 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3213 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3215 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
3216 sdev->allow_restart = 1;
3217 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3220 return 0;
3224 * ipr_slave_alloc - Prepare for commands to a device.
3225 * @sdev: scsi device struct
3227 * This function saves a pointer to the resource entry
3228 * in the scsi device struct if the device exists. We
3229 * can then use this pointer in ipr_queuecommand when
3230 * handling new commands.
3232 * Return value:
3233 * 0 on success / -ENXIO if device does not exist
3235 static int ipr_slave_alloc(struct scsi_device *sdev)
3237 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3238 struct ipr_resource_entry *res;
3239 unsigned long lock_flags;
3240 int rc = -ENXIO;
3242 sdev->hostdata = NULL;
3244 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3246 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3247 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3248 (res->cfgte.res_addr.target == sdev->id) &&
3249 (res->cfgte.res_addr.lun == sdev->lun)) {
3250 res->sdev = sdev;
3251 res->add_to_ml = 0;
3252 res->in_erp = 0;
3253 sdev->hostdata = res;
3254 if (!ipr_is_naca_model(res))
3255 res->needs_sync_complete = 1;
3256 rc = 0;
3257 break;
3261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263 return rc;
3267 * ipr_eh_host_reset - Reset the host adapter
3268 * @scsi_cmd: scsi command struct
3270 * Return value:
3271 * SUCCESS / FAILED
3273 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3275 struct ipr_ioa_cfg *ioa_cfg;
3276 int rc;
3278 ENTER;
3279 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3281 dev_err(&ioa_cfg->pdev->dev,
3282 "Adapter being reset as a result of error recovery.\n");
3284 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3285 ioa_cfg->sdt_state = GET_DUMP;
3287 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3289 LEAVE;
3290 return rc;
3293 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3295 int rc;
3297 spin_lock_irq(cmd->device->host->host_lock);
3298 rc = __ipr_eh_host_reset(cmd);
3299 spin_unlock_irq(cmd->device->host->host_lock);
3301 return rc;
3305 * ipr_eh_dev_reset - Reset the device
3306 * @scsi_cmd: scsi command struct
3308 * This function issues a device reset to the affected device.
3309 * A LUN reset will be sent to the device first. If that does
3310 * not work, a target reset will be sent.
3312 * Return value:
3313 * SUCCESS / FAILED
3315 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3317 struct ipr_cmnd *ipr_cmd;
3318 struct ipr_ioa_cfg *ioa_cfg;
3319 struct ipr_resource_entry *res;
3320 struct ipr_cmd_pkt *cmd_pkt;
3321 u32 ioasc;
3323 ENTER;
3324 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3325 res = scsi_cmd->device->hostdata;
3327 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3328 return FAILED;
3331 * If we are currently going through reset/reload, return failed. This will force the
3332 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3333 * reset to complete
3335 if (ioa_cfg->in_reset_reload)
3336 return FAILED;
3337 if (ioa_cfg->ioa_is_dead)
3338 return FAILED;
3340 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3341 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3342 if (ipr_cmd->scsi_cmd)
3343 ipr_cmd->done = ipr_scsi_eh_done;
3347 res->resetting_device = 1;
3349 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3351 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3352 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3353 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3354 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3356 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3357 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3359 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3361 res->resetting_device = 0;
3363 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3365 LEAVE;
3366 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3369 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3371 int rc;
3373 spin_lock_irq(cmd->device->host->host_lock);
3374 rc = __ipr_eh_dev_reset(cmd);
3375 spin_unlock_irq(cmd->device->host->host_lock);
3377 return rc;
3381 * ipr_bus_reset_done - Op done function for bus reset.
3382 * @ipr_cmd: ipr command struct
3384 * This function is the op done function for a bus reset
3386 * Return value:
3387 * none
3389 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3391 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3392 struct ipr_resource_entry *res;
3394 ENTER;
3395 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3396 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3397 sizeof(res->cfgte.res_handle))) {
3398 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3399 break;
3404 * If abort has not completed, indicate the reset has, else call the
3405 * abort's done function to wake the sleeping eh thread
3407 if (ipr_cmd->sibling->sibling)
3408 ipr_cmd->sibling->sibling = NULL;
3409 else
3410 ipr_cmd->sibling->done(ipr_cmd->sibling);
3412 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3413 LEAVE;
3417 * ipr_abort_timeout - An abort task has timed out
3418 * @ipr_cmd: ipr command struct
3420 * This function handles when an abort task times out. If this
3421 * happens we issue a bus reset since we have resources tied
3422 * up that must be freed before returning to the midlayer.
3424 * Return value:
3425 * none
3427 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3429 struct ipr_cmnd *reset_cmd;
3430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3431 struct ipr_cmd_pkt *cmd_pkt;
3432 unsigned long lock_flags = 0;
3434 ENTER;
3435 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3436 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3438 return;
3441 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3442 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3443 ipr_cmd->sibling = reset_cmd;
3444 reset_cmd->sibling = ipr_cmd;
3445 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3446 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3447 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3448 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3449 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3451 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3453 LEAVE;
3457 * ipr_cancel_op - Cancel specified op
3458 * @scsi_cmd: scsi command struct
3460 * This function cancels specified op.
3462 * Return value:
3463 * SUCCESS / FAILED
3465 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3467 struct ipr_cmnd *ipr_cmd;
3468 struct ipr_ioa_cfg *ioa_cfg;
3469 struct ipr_resource_entry *res;
3470 struct ipr_cmd_pkt *cmd_pkt;
3471 u32 ioasc;
3472 int op_found = 0;
3474 ENTER;
3475 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3476 res = scsi_cmd->device->hostdata;
3478 /* If we are currently going through reset/reload, return failed.
3479 * This will force the mid-layer to call ipr_eh_host_reset,
3480 * which will then go to sleep and wait for the reset to complete
3482 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3483 return FAILED;
3484 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3485 return FAILED;
3487 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3488 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3489 ipr_cmd->done = ipr_scsi_eh_done;
3490 op_found = 1;
3491 break;
3495 if (!op_found)
3496 return SUCCESS;
3498 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3499 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3500 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3501 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3502 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3503 ipr_cmd->u.sdev = scsi_cmd->device;
3505 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3506 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3507 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3510 * If the abort task timed out and we sent a bus reset, we will get
3511 * one the following responses to the abort
3513 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3514 ioasc = 0;
3515 ipr_trace;
3518 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3519 if (!ipr_is_naca_model(res))
3520 res->needs_sync_complete = 1;
3522 LEAVE;
3523 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3527 * ipr_eh_abort - Abort a single op
3528 * @scsi_cmd: scsi command struct
3530 * Return value:
3531 * SUCCESS / FAILED
3533 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3535 unsigned long flags;
3536 int rc;
3538 ENTER;
3540 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3541 rc = ipr_cancel_op(scsi_cmd);
3542 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3544 LEAVE;
3545 return rc;
3549 * ipr_handle_other_interrupt - Handle "other" interrupts
3550 * @ioa_cfg: ioa config struct
3551 * @int_reg: interrupt register
3553 * Return value:
3554 * IRQ_NONE / IRQ_HANDLED
3556 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3557 volatile u32 int_reg)
3559 irqreturn_t rc = IRQ_HANDLED;
3561 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3562 /* Mask the interrupt */
3563 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3565 /* Clear the interrupt */
3566 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3567 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3569 list_del(&ioa_cfg->reset_cmd->queue);
3570 del_timer(&ioa_cfg->reset_cmd->timer);
3571 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3572 } else {
3573 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3574 ioa_cfg->ioa_unit_checked = 1;
3575 else
3576 dev_err(&ioa_cfg->pdev->dev,
3577 "Permanent IOA failure. 0x%08X\n", int_reg);
3579 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3580 ioa_cfg->sdt_state = GET_DUMP;
3582 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3583 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3586 return rc;
3590 * ipr_isr - Interrupt service routine
3591 * @irq: irq number
3592 * @devp: pointer to ioa config struct
3593 * @regs: pt_regs struct
3595 * Return value:
3596 * IRQ_NONE / IRQ_HANDLED
3598 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3600 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3601 unsigned long lock_flags = 0;
3602 volatile u32 int_reg, int_mask_reg;
3603 u32 ioasc;
3604 u16 cmd_index;
3605 struct ipr_cmnd *ipr_cmd;
3606 irqreturn_t rc = IRQ_NONE;
3608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3610 /* If interrupts are disabled, ignore the interrupt */
3611 if (!ioa_cfg->allow_interrupts) {
3612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613 return IRQ_NONE;
3616 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3617 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3619 /* If an interrupt on the adapter did not occur, ignore it */
3620 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622 return IRQ_NONE;
3625 while (1) {
3626 ipr_cmd = NULL;
3628 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3629 ioa_cfg->toggle_bit) {
3631 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3632 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3634 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3635 ioa_cfg->errors_logged++;
3636 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3638 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3639 ioa_cfg->sdt_state = GET_DUMP;
3641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3643 return IRQ_HANDLED;
3646 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3648 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3650 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3652 list_del(&ipr_cmd->queue);
3653 del_timer(&ipr_cmd->timer);
3654 ipr_cmd->done(ipr_cmd);
3656 rc = IRQ_HANDLED;
3658 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3659 ioa_cfg->hrrq_curr++;
3660 } else {
3661 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3662 ioa_cfg->toggle_bit ^= 1u;
3666 if (ipr_cmd != NULL) {
3667 /* Clear the PCI interrupt */
3668 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3669 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3670 } else
3671 break;
3674 if (unlikely(rc == IRQ_NONE))
3675 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3678 return rc;
3682 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3683 * @ioa_cfg: ioa config struct
3684 * @ipr_cmd: ipr command struct
3686 * Return value:
3687 * 0 on success / -1 on failure
3689 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3690 struct ipr_cmnd *ipr_cmd)
3692 int i;
3693 struct scatterlist *sglist;
3694 u32 length;
3695 u32 ioadl_flags = 0;
3696 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3698 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3700 length = scsi_cmd->request_bufflen;
3702 if (length == 0)
3703 return 0;
3705 if (scsi_cmd->use_sg) {
3706 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3707 scsi_cmd->request_buffer,
3708 scsi_cmd->use_sg,
3709 scsi_cmd->sc_data_direction);
3711 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3712 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3713 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3714 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3715 ioarcb->write_ioadl_len =
3716 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3717 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3718 ioadl_flags = IPR_IOADL_FLAGS_READ;
3719 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3720 ioarcb->read_ioadl_len =
3721 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3724 sglist = scsi_cmd->request_buffer;
3726 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3727 ioadl[i].flags_and_data_len =
3728 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3729 ioadl[i].address =
3730 cpu_to_be32(sg_dma_address(&sglist[i]));
3733 if (likely(ipr_cmd->dma_use_sg)) {
3734 ioadl[i-1].flags_and_data_len |=
3735 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3736 return 0;
3737 } else
3738 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3739 } else {
3740 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3741 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3742 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3743 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3744 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3745 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3746 ioadl_flags = IPR_IOADL_FLAGS_READ;
3747 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3748 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3751 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3752 scsi_cmd->request_buffer, length,
3753 scsi_cmd->sc_data_direction);
3755 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3756 ipr_cmd->dma_use_sg = 1;
3757 ioadl[0].flags_and_data_len =
3758 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3759 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3760 return 0;
3761 } else
3762 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3765 return -1;
3769 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3770 * @scsi_cmd: scsi command struct
3772 * Return value:
3773 * task attributes
3775 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3777 u8 tag[2];
3778 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3780 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3781 switch (tag[0]) {
3782 case MSG_SIMPLE_TAG:
3783 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3784 break;
3785 case MSG_HEAD_TAG:
3786 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3787 break;
3788 case MSG_ORDERED_TAG:
3789 rc = IPR_FLAGS_LO_ORDERED_TASK;
3790 break;
3794 return rc;
3798 * ipr_erp_done - Process completion of ERP for a device
3799 * @ipr_cmd: ipr command struct
3801 * This function copies the sense buffer into the scsi_cmd
3802 * struct and pushes the scsi_done function.
3804 * Return value:
3805 * nothing
3807 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3809 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3810 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3811 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3812 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3814 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3815 scsi_cmd->result |= (DID_ERROR << 16);
3816 ipr_sdev_err(scsi_cmd->device,
3817 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3818 } else {
3819 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3820 SCSI_SENSE_BUFFERSIZE);
3823 if (res) {
3824 if (!ipr_is_naca_model(res))
3825 res->needs_sync_complete = 1;
3826 res->in_erp = 0;
3828 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3830 scsi_cmd->scsi_done(scsi_cmd);
3834 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3835 * @ipr_cmd: ipr command struct
3837 * Return value:
3838 * none
3840 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3842 struct ipr_ioarcb *ioarcb;
3843 struct ipr_ioasa *ioasa;
3845 ioarcb = &ipr_cmd->ioarcb;
3846 ioasa = &ipr_cmd->ioasa;
3848 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3849 ioarcb->write_data_transfer_length = 0;
3850 ioarcb->read_data_transfer_length = 0;
3851 ioarcb->write_ioadl_len = 0;
3852 ioarcb->read_ioadl_len = 0;
3853 ioasa->ioasc = 0;
3854 ioasa->residual_data_len = 0;
3858 * ipr_erp_request_sense - Send request sense to a device
3859 * @ipr_cmd: ipr command struct
3861 * This function sends a request sense to a device as a result
3862 * of a check condition.
3864 * Return value:
3865 * nothing
3867 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3869 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3870 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3872 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3873 ipr_erp_done(ipr_cmd);
3874 return;
3877 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3879 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3880 cmd_pkt->cdb[0] = REQUEST_SENSE;
3881 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3882 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3883 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3884 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3886 ipr_cmd->ioadl[0].flags_and_data_len =
3887 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3888 ipr_cmd->ioadl[0].address =
3889 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3891 ipr_cmd->ioarcb.read_ioadl_len =
3892 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3893 ipr_cmd->ioarcb.read_data_transfer_length =
3894 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3896 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3897 IPR_REQUEST_SENSE_TIMEOUT * 2);
3901 * ipr_erp_cancel_all - Send cancel all to a device
3902 * @ipr_cmd: ipr command struct
3904 * This function sends a cancel all to a device to clear the
3905 * queue. If we are running TCQ on the device, QERR is set to 1,
3906 * which means all outstanding ops have been dropped on the floor.
3907 * Cancel all will return them to us.
3909 * Return value:
3910 * nothing
3912 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3914 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3915 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3916 struct ipr_cmd_pkt *cmd_pkt;
3918 res->in_erp = 1;
3920 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3922 if (!scsi_get_tag_type(scsi_cmd->device)) {
3923 ipr_erp_request_sense(ipr_cmd);
3924 return;
3927 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3928 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3929 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3931 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3932 IPR_CANCEL_ALL_TIMEOUT);
3936 * ipr_dump_ioasa - Dump contents of IOASA
3937 * @ioa_cfg: ioa config struct
3938 * @ipr_cmd: ipr command struct
3940 * This function is invoked by the interrupt handler when ops
3941 * fail. It will log the IOASA if appropriate. Only called
3942 * for GPDD ops.
3944 * Return value:
3945 * none
3947 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3948 struct ipr_cmnd *ipr_cmd)
3950 int i;
3951 u16 data_len;
3952 u32 ioasc;
3953 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3954 __be32 *ioasa_data = (__be32 *)ioasa;
3955 int error_index;
3957 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3959 if (0 == ioasc)
3960 return;
3962 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3963 return;
3965 error_index = ipr_get_error(ioasc);
3967 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3968 /* Don't log an error if the IOA already logged one */
3969 if (ioasa->ilid != 0)
3970 return;
3972 if (ipr_error_table[error_index].log_ioasa == 0)
3973 return;
3976 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3977 ipr_error_table[error_index].error);
3979 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3980 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3981 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3982 "Device End state: %s Phase: %s\n",
3983 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3984 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3987 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3988 data_len = sizeof(struct ipr_ioasa);
3989 else
3990 data_len = be16_to_cpu(ioasa->ret_stat_len);
3992 ipr_err("IOASA Dump:\n");
3994 for (i = 0; i < data_len / 4; i += 4) {
3995 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3996 be32_to_cpu(ioasa_data[i]),
3997 be32_to_cpu(ioasa_data[i+1]),
3998 be32_to_cpu(ioasa_data[i+2]),
3999 be32_to_cpu(ioasa_data[i+3]));
4004 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4005 * @ioasa: IOASA
4006 * @sense_buf: sense data buffer
4008 * Return value:
4009 * none
4011 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4013 u32 failing_lba;
4014 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4015 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4016 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4017 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4019 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4021 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4022 return;
4024 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4026 if (ipr_is_vset_device(res) &&
4027 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4028 ioasa->u.vset.failing_lba_hi != 0) {
4029 sense_buf[0] = 0x72;
4030 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4031 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4032 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4034 sense_buf[7] = 12;
4035 sense_buf[8] = 0;
4036 sense_buf[9] = 0x0A;
4037 sense_buf[10] = 0x80;
4039 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4041 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4042 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4043 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4044 sense_buf[15] = failing_lba & 0x000000ff;
4046 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4048 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4049 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4050 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4051 sense_buf[19] = failing_lba & 0x000000ff;
4052 } else {
4053 sense_buf[0] = 0x70;
4054 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4055 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4056 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4058 /* Illegal request */
4059 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4060 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4061 sense_buf[7] = 10; /* additional length */
4063 /* IOARCB was in error */
4064 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4065 sense_buf[15] = 0xC0;
4066 else /* Parameter data was invalid */
4067 sense_buf[15] = 0x80;
4069 sense_buf[16] =
4070 ((IPR_FIELD_POINTER_MASK &
4071 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4072 sense_buf[17] =
4073 (IPR_FIELD_POINTER_MASK &
4074 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4075 } else {
4076 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4077 if (ipr_is_vset_device(res))
4078 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4079 else
4080 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4082 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4083 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4084 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4085 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4086 sense_buf[6] = failing_lba & 0x000000ff;
4089 sense_buf[7] = 6; /* additional length */
4095 * ipr_get_autosense - Copy autosense data to sense buffer
4096 * @ipr_cmd: ipr command struct
4098 * This function copies the autosense buffer to the buffer
4099 * in the scsi_cmd, if there is autosense available.
4101 * Return value:
4102 * 1 if autosense was available / 0 if not
4104 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4106 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4108 if ((be32_to_cpu(ioasa->ioasc_specific) &
4109 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4110 return 0;
4112 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4113 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4114 SCSI_SENSE_BUFFERSIZE));
4115 return 1;
4119 * ipr_erp_start - Process an error response for a SCSI op
4120 * @ioa_cfg: ioa config struct
4121 * @ipr_cmd: ipr command struct
4123 * This function determines whether or not to initiate ERP
4124 * on the affected device.
4126 * Return value:
4127 * nothing
4129 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4130 struct ipr_cmnd *ipr_cmd)
4132 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4133 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4134 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4136 if (!res) {
4137 ipr_scsi_eh_done(ipr_cmd);
4138 return;
4141 if (ipr_is_gscsi(res))
4142 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
4143 else
4144 ipr_gen_sense(ipr_cmd);
4146 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4147 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4148 if (ipr_is_naca_model(res))
4149 scsi_cmd->result |= (DID_ABORT << 16);
4150 else
4151 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4152 break;
4153 case IPR_IOASC_IR_RESOURCE_HANDLE:
4154 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4155 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4156 break;
4157 case IPR_IOASC_HW_SEL_TIMEOUT:
4158 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4159 if (!ipr_is_naca_model(res))
4160 res->needs_sync_complete = 1;
4161 break;
4162 case IPR_IOASC_SYNC_REQUIRED:
4163 if (!res->in_erp)
4164 res->needs_sync_complete = 1;
4165 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4166 break;
4167 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4168 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4169 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4170 break;
4171 case IPR_IOASC_BUS_WAS_RESET:
4172 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4174 * Report the bus reset and ask for a retry. The device
4175 * will give CC/UA the next command.
4177 if (!res->resetting_device)
4178 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4179 scsi_cmd->result |= (DID_ERROR << 16);
4180 if (!ipr_is_naca_model(res))
4181 res->needs_sync_complete = 1;
4182 break;
4183 case IPR_IOASC_HW_DEV_BUS_STATUS:
4184 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4185 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4186 if (!ipr_get_autosense(ipr_cmd)) {
4187 if (!ipr_is_naca_model(res)) {
4188 ipr_erp_cancel_all(ipr_cmd);
4189 return;
4193 if (!ipr_is_naca_model(res))
4194 res->needs_sync_complete = 1;
4195 break;
4196 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4197 break;
4198 default:
4199 scsi_cmd->result |= (DID_ERROR << 16);
4200 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4201 res->needs_sync_complete = 1;
4202 break;
4205 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4206 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4207 scsi_cmd->scsi_done(scsi_cmd);
4211 * ipr_scsi_done - mid-layer done function
4212 * @ipr_cmd: ipr command struct
4214 * This function is invoked by the interrupt handler for
4215 * ops generated by the SCSI mid-layer
4217 * Return value:
4218 * none
4220 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4223 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4224 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4226 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4228 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4229 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4230 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4231 scsi_cmd->scsi_done(scsi_cmd);
4232 } else
4233 ipr_erp_start(ioa_cfg, ipr_cmd);
4237 * ipr_save_ioafp_mode_select - Save adapters mode select data
4238 * @ioa_cfg: ioa config struct
4239 * @scsi_cmd: scsi command struct
4241 * This function saves mode select data for the adapter to
4242 * use following an adapter reset.
4244 * Return value:
4245 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4247 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4248 struct scsi_cmnd *scsi_cmd)
4250 if (!ioa_cfg->saved_mode_pages) {
4251 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
4252 GFP_ATOMIC);
4253 if (!ioa_cfg->saved_mode_pages) {
4254 dev_err(&ioa_cfg->pdev->dev,
4255 "IOA mode select buffer allocation failed\n");
4256 return SCSI_MLQUEUE_HOST_BUSY;
4260 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4261 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4262 return 0;
4266 * ipr_queuecommand - Queue a mid-layer request
4267 * @scsi_cmd: scsi command struct
4268 * @done: done function
4270 * This function queues a request generated by the mid-layer.
4272 * Return value:
4273 * 0 on success
4274 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4275 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4277 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4278 void (*done) (struct scsi_cmnd *))
4280 struct ipr_ioa_cfg *ioa_cfg;
4281 struct ipr_resource_entry *res;
4282 struct ipr_ioarcb *ioarcb;
4283 struct ipr_cmnd *ipr_cmd;
4284 int rc = 0;
4286 scsi_cmd->scsi_done = done;
4287 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4288 res = scsi_cmd->device->hostdata;
4289 scsi_cmd->result = (DID_OK << 16);
4292 * We are currently blocking all devices due to a host reset
4293 * We have told the host to stop giving us new requests, but
4294 * ERP ops don't count. FIXME
4296 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4297 return SCSI_MLQUEUE_HOST_BUSY;
4300 * FIXME - Create scsi_set_host_offline interface
4301 * and the ioa_is_dead check can be removed
4303 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4304 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4305 scsi_cmd->result = (DID_NO_CONNECT << 16);
4306 scsi_cmd->scsi_done(scsi_cmd);
4307 return 0;
4310 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4311 ioarcb = &ipr_cmd->ioarcb;
4312 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4314 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4315 ipr_cmd->scsi_cmd = scsi_cmd;
4316 ioarcb->res_handle = res->cfgte.res_handle;
4317 ipr_cmd->done = ipr_scsi_done;
4318 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4320 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4321 if (scsi_cmd->underflow == 0)
4322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4324 if (res->needs_sync_complete) {
4325 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4326 res->needs_sync_complete = 0;
4329 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4330 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4331 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4332 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4335 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4336 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4337 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4339 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4340 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4342 if (likely(rc == 0))
4343 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4345 if (likely(rc == 0)) {
4346 mb();
4347 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4348 ioa_cfg->regs.ioarrin_reg);
4349 } else {
4350 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4351 return SCSI_MLQUEUE_HOST_BUSY;
4354 return 0;
4358 * ipr_info - Get information about the card/driver
4359 * @scsi_host: scsi host struct
4361 * Return value:
4362 * pointer to buffer with description string
4364 static const char * ipr_ioa_info(struct Scsi_Host *host)
4366 static char buffer[512];
4367 struct ipr_ioa_cfg *ioa_cfg;
4368 unsigned long lock_flags = 0;
4370 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4372 spin_lock_irqsave(host->host_lock, lock_flags);
4373 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4374 spin_unlock_irqrestore(host->host_lock, lock_flags);
4376 return buffer;
4379 static struct scsi_host_template driver_template = {
4380 .module = THIS_MODULE,
4381 .name = "IPR",
4382 .info = ipr_ioa_info,
4383 .queuecommand = ipr_queuecommand,
4384 .eh_abort_handler = ipr_eh_abort,
4385 .eh_device_reset_handler = ipr_eh_dev_reset,
4386 .eh_host_reset_handler = ipr_eh_host_reset,
4387 .slave_alloc = ipr_slave_alloc,
4388 .slave_configure = ipr_slave_configure,
4389 .slave_destroy = ipr_slave_destroy,
4390 .change_queue_depth = ipr_change_queue_depth,
4391 .change_queue_type = ipr_change_queue_type,
4392 .bios_param = ipr_biosparam,
4393 .can_queue = IPR_MAX_COMMANDS,
4394 .this_id = -1,
4395 .sg_tablesize = IPR_MAX_SGLIST,
4396 .max_sectors = IPR_IOA_MAX_SECTORS,
4397 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4398 .use_clustering = ENABLE_CLUSTERING,
4399 .shost_attrs = ipr_ioa_attrs,
4400 .sdev_attrs = ipr_dev_attrs,
4401 .proc_name = IPR_NAME
4404 #ifdef CONFIG_PPC_PSERIES
4405 static const u16 ipr_blocked_processors[] = {
4406 PV_NORTHSTAR,
4407 PV_PULSAR,
4408 PV_POWER4,
4409 PV_ICESTAR,
4410 PV_SSTAR,
4411 PV_POWER4p,
4412 PV_630,
4413 PV_630p
4417 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4418 * @ioa_cfg: ioa cfg struct
4420 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4421 * certain pSeries hardware. This function determines if the given
4422 * adapter is in one of these confgurations or not.
4424 * Return value:
4425 * 1 if adapter is not supported / 0 if adapter is supported
4427 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4429 u8 rev_id;
4430 int i;
4432 if (ioa_cfg->type == 0x5702) {
4433 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4434 &rev_id) == PCIBIOS_SUCCESSFUL) {
4435 if (rev_id < 4) {
4436 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4437 if (__is_processor(ipr_blocked_processors[i]))
4438 return 1;
4443 return 0;
4445 #else
4446 #define ipr_invalid_adapter(ioa_cfg) 0
4447 #endif
4450 * ipr_ioa_bringdown_done - IOA bring down completion.
4451 * @ipr_cmd: ipr command struct
4453 * This function processes the completion of an adapter bring down.
4454 * It wakes any reset sleepers.
4456 * Return value:
4457 * IPR_RC_JOB_RETURN
4459 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4461 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4463 ENTER;
4464 ioa_cfg->in_reset_reload = 0;
4465 ioa_cfg->reset_retries = 0;
4466 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4467 wake_up_all(&ioa_cfg->reset_wait_q);
4469 spin_unlock_irq(ioa_cfg->host->host_lock);
4470 scsi_unblock_requests(ioa_cfg->host);
4471 spin_lock_irq(ioa_cfg->host->host_lock);
4472 LEAVE;
4474 return IPR_RC_JOB_RETURN;
4478 * ipr_ioa_reset_done - IOA reset completion.
4479 * @ipr_cmd: ipr command struct
4481 * This function processes the completion of an adapter reset.
4482 * It schedules any necessary mid-layer add/removes and
4483 * wakes any reset sleepers.
4485 * Return value:
4486 * IPR_RC_JOB_RETURN
4488 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4490 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4491 struct ipr_resource_entry *res;
4492 struct ipr_hostrcb *hostrcb, *temp;
4493 int i = 0;
4495 ENTER;
4496 ioa_cfg->in_reset_reload = 0;
4497 ioa_cfg->allow_cmds = 1;
4498 ioa_cfg->reset_cmd = NULL;
4499 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4501 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4502 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4503 ipr_trace;
4504 break;
4507 schedule_work(&ioa_cfg->work_q);
4509 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4510 list_del(&hostrcb->queue);
4511 if (i++ < IPR_NUM_LOG_HCAMS)
4512 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4513 else
4514 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4517 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4519 ioa_cfg->reset_retries = 0;
4520 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4521 wake_up_all(&ioa_cfg->reset_wait_q);
4523 spin_unlock_irq(ioa_cfg->host->host_lock);
4524 scsi_unblock_requests(ioa_cfg->host);
4525 spin_lock_irq(ioa_cfg->host->host_lock);
4527 if (!ioa_cfg->allow_cmds)
4528 scsi_block_requests(ioa_cfg->host);
4530 LEAVE;
4531 return IPR_RC_JOB_RETURN;
4535 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4536 * @supported_dev: supported device struct
4537 * @vpids: vendor product id struct
4539 * Return value:
4540 * none
4542 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4543 struct ipr_std_inq_vpids *vpids)
4545 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4546 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4547 supported_dev->num_records = 1;
4548 supported_dev->data_length =
4549 cpu_to_be16(sizeof(struct ipr_supported_device));
4550 supported_dev->reserved = 0;
4554 * ipr_set_supported_devs - Send Set Supported Devices for a device
4555 * @ipr_cmd: ipr command struct
4557 * This function send a Set Supported Devices to the adapter
4559 * Return value:
4560 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4562 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4564 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4565 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4566 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4567 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4568 struct ipr_resource_entry *res = ipr_cmd->u.res;
4570 ipr_cmd->job_step = ipr_ioa_reset_done;
4572 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4573 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
4574 continue;
4576 ipr_cmd->u.res = res;
4577 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4579 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4580 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4581 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4583 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4584 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4585 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4587 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4588 sizeof(struct ipr_supported_device));
4589 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4590 offsetof(struct ipr_misc_cbs, supp_dev));
4591 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4592 ioarcb->write_data_transfer_length =
4593 cpu_to_be32(sizeof(struct ipr_supported_device));
4595 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4596 IPR_SET_SUP_DEVICE_TIMEOUT);
4598 ipr_cmd->job_step = ipr_set_supported_devs;
4599 return IPR_RC_JOB_RETURN;
4602 return IPR_RC_JOB_CONTINUE;
4606 * ipr_setup_write_cache - Disable write cache if needed
4607 * @ipr_cmd: ipr command struct
4609 * This function sets up adapters write cache to desired setting
4611 * Return value:
4612 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4614 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4618 ipr_cmd->job_step = ipr_set_supported_devs;
4619 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4620 struct ipr_resource_entry, queue);
4622 if (ioa_cfg->cache_state != CACHE_DISABLED)
4623 return IPR_RC_JOB_CONTINUE;
4625 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4626 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4627 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4628 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4630 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4632 return IPR_RC_JOB_RETURN;
4636 * ipr_get_mode_page - Locate specified mode page
4637 * @mode_pages: mode page buffer
4638 * @page_code: page code to find
4639 * @len: minimum required length for mode page
4641 * Return value:
4642 * pointer to mode page / NULL on failure
4644 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4645 u32 page_code, u32 len)
4647 struct ipr_mode_page_hdr *mode_hdr;
4648 u32 page_length;
4649 u32 length;
4651 if (!mode_pages || (mode_pages->hdr.length == 0))
4652 return NULL;
4654 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4655 mode_hdr = (struct ipr_mode_page_hdr *)
4656 (mode_pages->data + mode_pages->hdr.block_desc_len);
4658 while (length) {
4659 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4660 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4661 return mode_hdr;
4662 break;
4663 } else {
4664 page_length = (sizeof(struct ipr_mode_page_hdr) +
4665 mode_hdr->page_length);
4666 length -= page_length;
4667 mode_hdr = (struct ipr_mode_page_hdr *)
4668 ((unsigned long)mode_hdr + page_length);
4671 return NULL;
4675 * ipr_check_term_power - Check for term power errors
4676 * @ioa_cfg: ioa config struct
4677 * @mode_pages: IOAFP mode pages buffer
4679 * Check the IOAFP's mode page 28 for term power errors
4681 * Return value:
4682 * nothing
4684 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4685 struct ipr_mode_pages *mode_pages)
4687 int i;
4688 int entry_length;
4689 struct ipr_dev_bus_entry *bus;
4690 struct ipr_mode_page28 *mode_page;
4692 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4693 sizeof(struct ipr_mode_page28));
4695 entry_length = mode_page->entry_length;
4697 bus = mode_page->bus;
4699 for (i = 0; i < mode_page->num_entries; i++) {
4700 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4701 dev_err(&ioa_cfg->pdev->dev,
4702 "Term power is absent on scsi bus %d\n",
4703 bus->res_addr.bus);
4706 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4711 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4712 * @ioa_cfg: ioa config struct
4714 * Looks through the config table checking for SES devices. If
4715 * the SES device is in the SES table indicating a maximum SCSI
4716 * bus speed, the speed is limited for the bus.
4718 * Return value:
4719 * none
4721 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4723 u32 max_xfer_rate;
4724 int i;
4726 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4727 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4728 ioa_cfg->bus_attr[i].bus_width);
4730 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4731 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4736 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4737 * @ioa_cfg: ioa config struct
4738 * @mode_pages: mode page 28 buffer
4740 * Updates mode page 28 based on driver configuration
4742 * Return value:
4743 * none
4745 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4746 struct ipr_mode_pages *mode_pages)
4748 int i, entry_length;
4749 struct ipr_dev_bus_entry *bus;
4750 struct ipr_bus_attributes *bus_attr;
4751 struct ipr_mode_page28 *mode_page;
4753 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4754 sizeof(struct ipr_mode_page28));
4756 entry_length = mode_page->entry_length;
4758 /* Loop for each device bus entry */
4759 for (i = 0, bus = mode_page->bus;
4760 i < mode_page->num_entries;
4761 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4762 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4763 dev_err(&ioa_cfg->pdev->dev,
4764 "Invalid resource address reported: 0x%08X\n",
4765 IPR_GET_PHYS_LOC(bus->res_addr));
4766 continue;
4769 bus_attr = &ioa_cfg->bus_attr[i];
4770 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4771 bus->bus_width = bus_attr->bus_width;
4772 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4773 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4774 if (bus_attr->qas_enabled)
4775 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4776 else
4777 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4782 * ipr_build_mode_select - Build a mode select command
4783 * @ipr_cmd: ipr command struct
4784 * @res_handle: resource handle to send command to
4785 * @parm: Byte 2 of Mode Sense command
4786 * @dma_addr: DMA buffer address
4787 * @xfer_len: data transfer length
4789 * Return value:
4790 * none
4792 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4793 __be32 res_handle, u8 parm, u32 dma_addr,
4794 u8 xfer_len)
4796 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4797 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4799 ioarcb->res_handle = res_handle;
4800 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4801 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4802 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4803 ioarcb->cmd_pkt.cdb[1] = parm;
4804 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4806 ioadl->flags_and_data_len =
4807 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4808 ioadl->address = cpu_to_be32(dma_addr);
4809 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4810 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4814 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4815 * @ipr_cmd: ipr command struct
4817 * This function sets up the SCSI bus attributes and sends
4818 * a Mode Select for Page 28 to activate them.
4820 * Return value:
4821 * IPR_RC_JOB_RETURN
4823 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4825 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4826 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4827 int length;
4829 ENTER;
4830 if (ioa_cfg->saved_mode_pages) {
4831 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4832 ioa_cfg->saved_mode_page_len);
4833 length = ioa_cfg->saved_mode_page_len;
4834 } else {
4835 ipr_scsi_bus_speed_limit(ioa_cfg);
4836 ipr_check_term_power(ioa_cfg, mode_pages);
4837 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4838 length = mode_pages->hdr.length + 1;
4839 mode_pages->hdr.length = 0;
4842 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4843 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4844 length);
4846 ipr_cmd->job_step = ipr_setup_write_cache;
4847 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4849 LEAVE;
4850 return IPR_RC_JOB_RETURN;
4854 * ipr_build_mode_sense - Builds a mode sense command
4855 * @ipr_cmd: ipr command struct
4856 * @res: resource entry struct
4857 * @parm: Byte 2 of mode sense command
4858 * @dma_addr: DMA address of mode sense buffer
4859 * @xfer_len: Size of DMA buffer
4861 * Return value:
4862 * none
4864 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4865 __be32 res_handle,
4866 u8 parm, u32 dma_addr, u8 xfer_len)
4868 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4869 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4871 ioarcb->res_handle = res_handle;
4872 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4873 ioarcb->cmd_pkt.cdb[2] = parm;
4874 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4875 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4877 ioadl->flags_and_data_len =
4878 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4879 ioadl->address = cpu_to_be32(dma_addr);
4880 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4881 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4885 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4886 * @ipr_cmd: ipr command struct
4888 * This function send a Page 28 mode sense to the IOA to
4889 * retrieve SCSI bus attributes.
4891 * Return value:
4892 * IPR_RC_JOB_RETURN
4894 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4898 ENTER;
4899 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4900 0x28, ioa_cfg->vpd_cbs_dma +
4901 offsetof(struct ipr_misc_cbs, mode_pages),
4902 sizeof(struct ipr_mode_pages));
4904 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4906 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4908 LEAVE;
4909 return IPR_RC_JOB_RETURN;
4913 * ipr_init_res_table - Initialize the resource table
4914 * @ipr_cmd: ipr command struct
4916 * This function looks through the existing resource table, comparing
4917 * it with the config table. This function will take care of old/new
4918 * devices and schedule adding/removing them from the mid-layer
4919 * as appropriate.
4921 * Return value:
4922 * IPR_RC_JOB_CONTINUE
4924 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4926 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4927 struct ipr_resource_entry *res, *temp;
4928 struct ipr_config_table_entry *cfgte;
4929 int found, i;
4930 LIST_HEAD(old_res);
4932 ENTER;
4933 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4934 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4936 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4937 list_move_tail(&res->queue, &old_res);
4939 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4940 cfgte = &ioa_cfg->cfg_table->dev[i];
4941 found = 0;
4943 list_for_each_entry_safe(res, temp, &old_res, queue) {
4944 if (!memcmp(&res->cfgte.res_addr,
4945 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4946 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4947 found = 1;
4948 break;
4952 if (!found) {
4953 if (list_empty(&ioa_cfg->free_res_q)) {
4954 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4955 break;
4958 found = 1;
4959 res = list_entry(ioa_cfg->free_res_q.next,
4960 struct ipr_resource_entry, queue);
4961 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4962 ipr_init_res_entry(res);
4963 res->add_to_ml = 1;
4966 if (found)
4967 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4970 list_for_each_entry_safe(res, temp, &old_res, queue) {
4971 if (res->sdev) {
4972 res->del_from_ml = 1;
4973 res->sdev->hostdata = NULL;
4974 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4975 } else {
4976 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4980 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4982 LEAVE;
4983 return IPR_RC_JOB_CONTINUE;
4987 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4988 * @ipr_cmd: ipr command struct
4990 * This function sends a Query IOA Configuration command
4991 * to the adapter to retrieve the IOA configuration table.
4993 * Return value:
4994 * IPR_RC_JOB_RETURN
4996 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5000 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5001 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5003 ENTER;
5004 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5005 ucode_vpd->major_release, ucode_vpd->card_type,
5006 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5007 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5008 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5010 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5011 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5012 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5014 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5015 ioarcb->read_data_transfer_length =
5016 cpu_to_be32(sizeof(struct ipr_config_table));
5018 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5019 ioadl->flags_and_data_len =
5020 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5022 ipr_cmd->job_step = ipr_init_res_table;
5024 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5026 LEAVE;
5027 return IPR_RC_JOB_RETURN;
5031 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5032 * @ipr_cmd: ipr command struct
5034 * This utility function sends an inquiry to the adapter.
5036 * Return value:
5037 * none
5039 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5040 u32 dma_addr, u8 xfer_len)
5042 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5043 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5045 ENTER;
5046 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5047 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5049 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5050 ioarcb->cmd_pkt.cdb[1] = flags;
5051 ioarcb->cmd_pkt.cdb[2] = page;
5052 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5054 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5055 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5057 ioadl->address = cpu_to_be32(dma_addr);
5058 ioadl->flags_and_data_len =
5059 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5061 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5062 LEAVE;
5066 * ipr_inquiry_page_supported - Is the given inquiry page supported
5067 * @page0: inquiry page 0 buffer
5068 * @page: page code.
5070 * This function determines if the specified inquiry page is supported.
5072 * Return value:
5073 * 1 if page is supported / 0 if not
5075 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5077 int i;
5079 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5080 if (page0->page[i] == page)
5081 return 1;
5083 return 0;
5087 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5088 * @ipr_cmd: ipr command struct
5090 * This function sends a Page 3 inquiry to the adapter
5091 * to retrieve software VPD information.
5093 * Return value:
5094 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5096 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5098 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5099 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5101 ENTER;
5103 if (!ipr_inquiry_page_supported(page0, 1))
5104 ioa_cfg->cache_state = CACHE_NONE;
5106 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5108 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5109 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5110 sizeof(struct ipr_inquiry_page3));
5112 LEAVE;
5113 return IPR_RC_JOB_RETURN;
5117 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5118 * @ipr_cmd: ipr command struct
5120 * This function sends a Page 0 inquiry to the adapter
5121 * to retrieve supported inquiry pages.
5123 * Return value:
5124 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5126 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5129 char type[5];
5131 ENTER;
5133 /* Grab the type out of the VPD and store it away */
5134 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5135 type[4] = '\0';
5136 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5138 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5140 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5141 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5142 sizeof(struct ipr_inquiry_page0));
5144 LEAVE;
5145 return IPR_RC_JOB_RETURN;
5149 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5150 * @ipr_cmd: ipr command struct
5152 * This function sends a standard inquiry to the adapter.
5154 * Return value:
5155 * IPR_RC_JOB_RETURN
5157 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5161 ENTER;
5162 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5164 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5165 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5166 sizeof(struct ipr_ioa_vpd));
5168 LEAVE;
5169 return IPR_RC_JOB_RETURN;
5173 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5174 * @ipr_cmd: ipr command struct
5176 * This function send an Identify Host Request Response Queue
5177 * command to establish the HRRQ with the adapter.
5179 * Return value:
5180 * IPR_RC_JOB_RETURN
5182 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5184 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5185 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5187 ENTER;
5188 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5190 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5191 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5193 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5194 ioarcb->cmd_pkt.cdb[2] =
5195 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5196 ioarcb->cmd_pkt.cdb[3] =
5197 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5198 ioarcb->cmd_pkt.cdb[4] =
5199 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5200 ioarcb->cmd_pkt.cdb[5] =
5201 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5202 ioarcb->cmd_pkt.cdb[7] =
5203 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5204 ioarcb->cmd_pkt.cdb[8] =
5205 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5207 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5209 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5211 LEAVE;
5212 return IPR_RC_JOB_RETURN;
5216 * ipr_reset_timer_done - Adapter reset timer function
5217 * @ipr_cmd: ipr command struct
5219 * Description: This function is used in adapter reset processing
5220 * for timing events. If the reset_cmd pointer in the IOA
5221 * config struct is not this adapter's we are doing nested
5222 * resets and fail_all_ops will take care of freeing the
5223 * command block.
5225 * Return value:
5226 * none
5228 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5231 unsigned long lock_flags = 0;
5233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5235 if (ioa_cfg->reset_cmd == ipr_cmd) {
5236 list_del(&ipr_cmd->queue);
5237 ipr_cmd->done(ipr_cmd);
5240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5244 * ipr_reset_start_timer - Start a timer for adapter reset job
5245 * @ipr_cmd: ipr command struct
5246 * @timeout: timeout value
5248 * Description: This function is used in adapter reset processing
5249 * for timing events. If the reset_cmd pointer in the IOA
5250 * config struct is not this adapter's we are doing nested
5251 * resets and fail_all_ops will take care of freeing the
5252 * command block.
5254 * Return value:
5255 * none
5257 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5258 unsigned long timeout)
5260 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5261 ipr_cmd->done = ipr_reset_ioa_job;
5263 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5264 ipr_cmd->timer.expires = jiffies + timeout;
5265 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5266 add_timer(&ipr_cmd->timer);
5270 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5271 * @ioa_cfg: ioa cfg struct
5273 * Return value:
5274 * nothing
5276 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5278 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5280 /* Initialize Host RRQ pointers */
5281 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5282 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5283 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5284 ioa_cfg->toggle_bit = 1;
5286 /* Zero out config table */
5287 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5291 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5292 * @ipr_cmd: ipr command struct
5294 * This function reinitializes some control blocks and
5295 * enables destructive diagnostics on the adapter.
5297 * Return value:
5298 * IPR_RC_JOB_RETURN
5300 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5302 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5303 volatile u32 int_reg;
5305 ENTER;
5306 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5307 ipr_init_ioa_mem(ioa_cfg);
5309 ioa_cfg->allow_interrupts = 1;
5310 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5312 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5313 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5314 ioa_cfg->regs.clr_interrupt_mask_reg);
5315 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5316 return IPR_RC_JOB_CONTINUE;
5319 /* Enable destructive diagnostics on IOA */
5320 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5322 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5323 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5325 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5327 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5328 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5329 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5330 ipr_cmd->done = ipr_reset_ioa_job;
5331 add_timer(&ipr_cmd->timer);
5332 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5334 LEAVE;
5335 return IPR_RC_JOB_RETURN;
5339 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5340 * @ipr_cmd: ipr command struct
5342 * This function is invoked when an adapter dump has run out
5343 * of processing time.
5345 * Return value:
5346 * IPR_RC_JOB_CONTINUE
5348 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5350 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5352 if (ioa_cfg->sdt_state == GET_DUMP)
5353 ioa_cfg->sdt_state = ABORT_DUMP;
5355 ipr_cmd->job_step = ipr_reset_alert;
5357 return IPR_RC_JOB_CONTINUE;
5361 * ipr_unit_check_no_data - Log a unit check/no data error log
5362 * @ioa_cfg: ioa config struct
5364 * Logs an error indicating the adapter unit checked, but for some
5365 * reason, we were unable to fetch the unit check buffer.
5367 * Return value:
5368 * nothing
5370 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5372 ioa_cfg->errors_logged++;
5373 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5377 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5378 * @ioa_cfg: ioa config struct
5380 * Fetches the unit check buffer from the adapter by clocking the data
5381 * through the mailbox register.
5383 * Return value:
5384 * nothing
5386 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5388 unsigned long mailbox;
5389 struct ipr_hostrcb *hostrcb;
5390 struct ipr_uc_sdt sdt;
5391 int rc, length;
5393 mailbox = readl(ioa_cfg->ioa_mailbox);
5395 if (!ipr_sdt_is_fmt2(mailbox)) {
5396 ipr_unit_check_no_data(ioa_cfg);
5397 return;
5400 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5401 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5402 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5404 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5405 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5406 ipr_unit_check_no_data(ioa_cfg);
5407 return;
5410 /* Find length of the first sdt entry (UC buffer) */
5411 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5412 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5414 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5415 struct ipr_hostrcb, queue);
5416 list_del(&hostrcb->queue);
5417 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5419 rc = ipr_get_ldump_data_section(ioa_cfg,
5420 be32_to_cpu(sdt.entry[0].bar_str_offset),
5421 (__be32 *)&hostrcb->hcam,
5422 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5424 if (!rc)
5425 ipr_handle_log_data(ioa_cfg, hostrcb);
5426 else
5427 ipr_unit_check_no_data(ioa_cfg);
5429 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5433 * ipr_reset_restore_cfg_space - Restore PCI config space.
5434 * @ipr_cmd: ipr command struct
5436 * Description: This function restores the saved PCI config space of
5437 * the adapter, fails all outstanding ops back to the callers, and
5438 * fetches the dump/unit check if applicable to this reset.
5440 * Return value:
5441 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5443 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5446 int rc;
5448 ENTER;
5449 pci_unblock_user_cfg_access(ioa_cfg->pdev);
5450 rc = pci_restore_state(ioa_cfg->pdev);
5452 if (rc != PCIBIOS_SUCCESSFUL) {
5453 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5454 return IPR_RC_JOB_CONTINUE;
5457 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5458 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5459 return IPR_RC_JOB_CONTINUE;
5462 ipr_fail_all_ops(ioa_cfg);
5464 if (ioa_cfg->ioa_unit_checked) {
5465 ioa_cfg->ioa_unit_checked = 0;
5466 ipr_get_unit_check_buffer(ioa_cfg);
5467 ipr_cmd->job_step = ipr_reset_alert;
5468 ipr_reset_start_timer(ipr_cmd, 0);
5469 return IPR_RC_JOB_RETURN;
5472 if (ioa_cfg->in_ioa_bringdown) {
5473 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5474 } else {
5475 ipr_cmd->job_step = ipr_reset_enable_ioa;
5477 if (GET_DUMP == ioa_cfg->sdt_state) {
5478 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5479 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5480 schedule_work(&ioa_cfg->work_q);
5481 return IPR_RC_JOB_RETURN;
5485 ENTER;
5486 return IPR_RC_JOB_CONTINUE;
5490 * ipr_reset_start_bist - Run BIST on the adapter.
5491 * @ipr_cmd: ipr command struct
5493 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5495 * Return value:
5496 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5498 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5500 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5501 int rc;
5503 ENTER;
5504 pci_block_user_cfg_access(ioa_cfg->pdev);
5505 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5507 if (rc != PCIBIOS_SUCCESSFUL) {
5508 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5509 rc = IPR_RC_JOB_CONTINUE;
5510 } else {
5511 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5512 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5513 rc = IPR_RC_JOB_RETURN;
5516 LEAVE;
5517 return rc;
5521 * ipr_reset_allowed - Query whether or not IOA can be reset
5522 * @ioa_cfg: ioa config struct
5524 * Return value:
5525 * 0 if reset not allowed / non-zero if reset is allowed
5527 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5529 volatile u32 temp_reg;
5531 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5532 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5536 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5537 * @ipr_cmd: ipr command struct
5539 * Description: This function waits for adapter permission to run BIST,
5540 * then runs BIST. If the adapter does not give permission after a
5541 * reasonable time, we will reset the adapter anyway. The impact of
5542 * resetting the adapter without warning the adapter is the risk of
5543 * losing the persistent error log on the adapter. If the adapter is
5544 * reset while it is writing to the flash on the adapter, the flash
5545 * segment will have bad ECC and be zeroed.
5547 * Return value:
5548 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5550 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5553 int rc = IPR_RC_JOB_RETURN;
5555 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5556 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5557 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5558 } else {
5559 ipr_cmd->job_step = ipr_reset_start_bist;
5560 rc = IPR_RC_JOB_CONTINUE;
5563 return rc;
5567 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5568 * @ipr_cmd: ipr command struct
5570 * Description: This function alerts the adapter that it will be reset.
5571 * If memory space is not currently enabled, proceed directly
5572 * to running BIST on the adapter. The timer must always be started
5573 * so we guarantee we do not run BIST from ipr_isr.
5575 * Return value:
5576 * IPR_RC_JOB_RETURN
5578 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5580 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5581 u16 cmd_reg;
5582 int rc;
5584 ENTER;
5585 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5587 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5588 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5589 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5590 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5591 } else {
5592 ipr_cmd->job_step = ipr_reset_start_bist;
5595 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5596 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5598 LEAVE;
5599 return IPR_RC_JOB_RETURN;
5603 * ipr_reset_ucode_download_done - Microcode download completion
5604 * @ipr_cmd: ipr command struct
5606 * Description: This function unmaps the microcode download buffer.
5608 * Return value:
5609 * IPR_RC_JOB_CONTINUE
5611 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5614 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5616 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5617 sglist->num_sg, DMA_TO_DEVICE);
5619 ipr_cmd->job_step = ipr_reset_alert;
5620 return IPR_RC_JOB_CONTINUE;
5624 * ipr_reset_ucode_download - Download microcode to the adapter
5625 * @ipr_cmd: ipr command struct
5627 * Description: This function checks to see if it there is microcode
5628 * to download to the adapter. If there is, a download is performed.
5630 * Return value:
5631 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5633 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5636 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5638 ENTER;
5639 ipr_cmd->job_step = ipr_reset_alert;
5641 if (!sglist)
5642 return IPR_RC_JOB_CONTINUE;
5644 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5645 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5646 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5647 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5648 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5649 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5650 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5652 ipr_build_ucode_ioadl(ipr_cmd, sglist);
5653 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5655 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5656 IPR_WRITE_BUFFER_TIMEOUT);
5658 LEAVE;
5659 return IPR_RC_JOB_RETURN;
5663 * ipr_reset_shutdown_ioa - Shutdown the adapter
5664 * @ipr_cmd: ipr command struct
5666 * Description: This function issues an adapter shutdown of the
5667 * specified type to the specified adapter as part of the
5668 * adapter reset job.
5670 * Return value:
5671 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5673 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5676 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5677 unsigned long timeout;
5678 int rc = IPR_RC_JOB_CONTINUE;
5680 ENTER;
5681 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5682 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5683 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5684 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5685 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5687 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5688 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5689 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5690 timeout = IPR_INTERNAL_TIMEOUT;
5691 else
5692 timeout = IPR_SHUTDOWN_TIMEOUT;
5694 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5696 rc = IPR_RC_JOB_RETURN;
5697 ipr_cmd->job_step = ipr_reset_ucode_download;
5698 } else
5699 ipr_cmd->job_step = ipr_reset_alert;
5701 LEAVE;
5702 return rc;
5706 * ipr_reset_ioa_job - Adapter reset job
5707 * @ipr_cmd: ipr command struct
5709 * Description: This function is the job router for the adapter reset job.
5711 * Return value:
5712 * none
5714 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5716 u32 rc, ioasc;
5717 unsigned long scratch = ipr_cmd->u.scratch;
5718 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5720 do {
5721 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5723 if (ioa_cfg->reset_cmd != ipr_cmd) {
5725 * We are doing nested adapter resets and this is
5726 * not the current reset job.
5728 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5729 return;
5732 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5733 dev_err(&ioa_cfg->pdev->dev,
5734 "0x%02X failed with IOASC: 0x%08X\n",
5735 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5737 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5738 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5739 return;
5742 ipr_reinit_ipr_cmnd(ipr_cmd);
5743 ipr_cmd->u.scratch = scratch;
5744 rc = ipr_cmd->job_step(ipr_cmd);
5745 } while(rc == IPR_RC_JOB_CONTINUE);
5749 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5750 * @ioa_cfg: ioa config struct
5751 * @job_step: first job step of reset job
5752 * @shutdown_type: shutdown type
5754 * Description: This function will initiate the reset of the given adapter
5755 * starting at the selected job step.
5756 * If the caller needs to wait on the completion of the reset,
5757 * the caller must sleep on the reset_wait_q.
5759 * Return value:
5760 * none
5762 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5763 int (*job_step) (struct ipr_cmnd *),
5764 enum ipr_shutdown_type shutdown_type)
5766 struct ipr_cmnd *ipr_cmd;
5768 ioa_cfg->in_reset_reload = 1;
5769 ioa_cfg->allow_cmds = 0;
5770 scsi_block_requests(ioa_cfg->host);
5772 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5773 ioa_cfg->reset_cmd = ipr_cmd;
5774 ipr_cmd->job_step = job_step;
5775 ipr_cmd->u.shutdown_type = shutdown_type;
5777 ipr_reset_ioa_job(ipr_cmd);
5781 * ipr_initiate_ioa_reset - Initiate an adapter reset
5782 * @ioa_cfg: ioa config struct
5783 * @shutdown_type: shutdown type
5785 * Description: This function will initiate the reset of the given adapter.
5786 * If the caller needs to wait on the completion of the reset,
5787 * the caller must sleep on the reset_wait_q.
5789 * Return value:
5790 * none
5792 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5793 enum ipr_shutdown_type shutdown_type)
5795 if (ioa_cfg->ioa_is_dead)
5796 return;
5798 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5799 ioa_cfg->sdt_state = ABORT_DUMP;
5801 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5802 dev_err(&ioa_cfg->pdev->dev,
5803 "IOA taken offline - error recovery failed\n");
5805 ioa_cfg->reset_retries = 0;
5806 ioa_cfg->ioa_is_dead = 1;
5808 if (ioa_cfg->in_ioa_bringdown) {
5809 ioa_cfg->reset_cmd = NULL;
5810 ioa_cfg->in_reset_reload = 0;
5811 ipr_fail_all_ops(ioa_cfg);
5812 wake_up_all(&ioa_cfg->reset_wait_q);
5814 spin_unlock_irq(ioa_cfg->host->host_lock);
5815 scsi_unblock_requests(ioa_cfg->host);
5816 spin_lock_irq(ioa_cfg->host->host_lock);
5817 return;
5818 } else {
5819 ioa_cfg->in_ioa_bringdown = 1;
5820 shutdown_type = IPR_SHUTDOWN_NONE;
5824 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5825 shutdown_type);
5829 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5830 * @ioa_cfg: ioa cfg struct
5832 * Description: This is the second phase of adapter intialization
5833 * This function takes care of initilizing the adapter to the point
5834 * where it can accept new commands.
5836 * Return value:
5837 * 0 on sucess / -EIO on failure
5839 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5841 int rc = 0;
5842 unsigned long host_lock_flags = 0;
5844 ENTER;
5845 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5846 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5847 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5850 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5851 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5853 if (ioa_cfg->ioa_is_dead) {
5854 rc = -EIO;
5855 } else if (ipr_invalid_adapter(ioa_cfg)) {
5856 if (!ipr_testmode)
5857 rc = -EIO;
5859 dev_err(&ioa_cfg->pdev->dev,
5860 "Adapter not supported in this hardware configuration.\n");
5863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5865 LEAVE;
5866 return rc;
5870 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5871 * @ioa_cfg: ioa config struct
5873 * Return value:
5874 * none
5876 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5878 int i;
5880 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5881 if (ioa_cfg->ipr_cmnd_list[i])
5882 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5883 ioa_cfg->ipr_cmnd_list[i],
5884 ioa_cfg->ipr_cmnd_list_dma[i]);
5886 ioa_cfg->ipr_cmnd_list[i] = NULL;
5889 if (ioa_cfg->ipr_cmd_pool)
5890 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5892 ioa_cfg->ipr_cmd_pool = NULL;
5896 * ipr_free_mem - Frees memory allocated for an adapter
5897 * @ioa_cfg: ioa cfg struct
5899 * Return value:
5900 * nothing
5902 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5904 int i;
5906 kfree(ioa_cfg->res_entries);
5907 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5908 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5909 ipr_free_cmd_blks(ioa_cfg);
5910 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5911 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5912 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5913 ioa_cfg->cfg_table,
5914 ioa_cfg->cfg_table_dma);
5916 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5917 pci_free_consistent(ioa_cfg->pdev,
5918 sizeof(struct ipr_hostrcb),
5919 ioa_cfg->hostrcb[i],
5920 ioa_cfg->hostrcb_dma[i]);
5923 ipr_free_dump(ioa_cfg);
5924 kfree(ioa_cfg->saved_mode_pages);
5925 kfree(ioa_cfg->trace);
5929 * ipr_free_all_resources - Free all allocated resources for an adapter.
5930 * @ipr_cmd: ipr command struct
5932 * This function frees all allocated resources for the
5933 * specified adapter.
5935 * Return value:
5936 * none
5938 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5940 struct pci_dev *pdev = ioa_cfg->pdev;
5942 ENTER;
5943 free_irq(pdev->irq, ioa_cfg);
5944 iounmap(ioa_cfg->hdw_dma_regs);
5945 pci_release_regions(pdev);
5946 ipr_free_mem(ioa_cfg);
5947 scsi_host_put(ioa_cfg->host);
5948 pci_disable_device(pdev);
5949 LEAVE;
5953 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5954 * @ioa_cfg: ioa config struct
5956 * Return value:
5957 * 0 on success / -ENOMEM on allocation failure
5959 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5961 struct ipr_cmnd *ipr_cmd;
5962 struct ipr_ioarcb *ioarcb;
5963 dma_addr_t dma_addr;
5964 int i;
5966 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5967 sizeof(struct ipr_cmnd), 8, 0);
5969 if (!ioa_cfg->ipr_cmd_pool)
5970 return -ENOMEM;
5972 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5973 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5975 if (!ipr_cmd) {
5976 ipr_free_cmd_blks(ioa_cfg);
5977 return -ENOMEM;
5980 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5981 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5982 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5984 ioarcb = &ipr_cmd->ioarcb;
5985 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5986 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5987 ioarcb->write_ioadl_addr =
5988 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5989 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5990 ioarcb->ioasa_host_pci_addr =
5991 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5992 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5993 ipr_cmd->cmd_index = i;
5994 ipr_cmd->ioa_cfg = ioa_cfg;
5995 ipr_cmd->sense_buffer_dma = dma_addr +
5996 offsetof(struct ipr_cmnd, sense_buffer);
5998 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6001 return 0;
6005 * ipr_alloc_mem - Allocate memory for an adapter
6006 * @ioa_cfg: ioa config struct
6008 * Return value:
6009 * 0 on success / non-zero for error
6011 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6013 struct pci_dev *pdev = ioa_cfg->pdev;
6014 int i, rc = -ENOMEM;
6016 ENTER;
6017 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6018 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6020 if (!ioa_cfg->res_entries)
6021 goto out;
6023 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6024 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6026 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6027 sizeof(struct ipr_misc_cbs),
6028 &ioa_cfg->vpd_cbs_dma);
6030 if (!ioa_cfg->vpd_cbs)
6031 goto out_free_res_entries;
6033 if (ipr_alloc_cmd_blks(ioa_cfg))
6034 goto out_free_vpd_cbs;
6036 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6037 sizeof(u32) * IPR_NUM_CMD_BLKS,
6038 &ioa_cfg->host_rrq_dma);
6040 if (!ioa_cfg->host_rrq)
6041 goto out_ipr_free_cmd_blocks;
6043 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6044 sizeof(struct ipr_config_table),
6045 &ioa_cfg->cfg_table_dma);
6047 if (!ioa_cfg->cfg_table)
6048 goto out_free_host_rrq;
6050 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6051 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6052 sizeof(struct ipr_hostrcb),
6053 &ioa_cfg->hostrcb_dma[i]);
6055 if (!ioa_cfg->hostrcb[i])
6056 goto out_free_hostrcb_dma;
6058 ioa_cfg->hostrcb[i]->hostrcb_dma =
6059 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6060 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6063 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6064 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6066 if (!ioa_cfg->trace)
6067 goto out_free_hostrcb_dma;
6069 rc = 0;
6070 out:
6071 LEAVE;
6072 return rc;
6074 out_free_hostrcb_dma:
6075 while (i-- > 0) {
6076 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6077 ioa_cfg->hostrcb[i],
6078 ioa_cfg->hostrcb_dma[i]);
6080 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6081 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6082 out_free_host_rrq:
6083 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6084 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6085 out_ipr_free_cmd_blocks:
6086 ipr_free_cmd_blks(ioa_cfg);
6087 out_free_vpd_cbs:
6088 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6089 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6090 out_free_res_entries:
6091 kfree(ioa_cfg->res_entries);
6092 goto out;
6096 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6097 * @ioa_cfg: ioa config struct
6099 * Return value:
6100 * none
6102 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6104 int i;
6106 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6107 ioa_cfg->bus_attr[i].bus = i;
6108 ioa_cfg->bus_attr[i].qas_enabled = 0;
6109 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6110 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6111 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6112 else
6113 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6118 * ipr_init_ioa_cfg - Initialize IOA config struct
6119 * @ioa_cfg: ioa config struct
6120 * @host: scsi host struct
6121 * @pdev: PCI dev struct
6123 * Return value:
6124 * none
6126 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6127 struct Scsi_Host *host, struct pci_dev *pdev)
6129 const struct ipr_interrupt_offsets *p;
6130 struct ipr_interrupts *t;
6131 void __iomem *base;
6133 ioa_cfg->host = host;
6134 ioa_cfg->pdev = pdev;
6135 ioa_cfg->log_level = ipr_log_level;
6136 ioa_cfg->doorbell = IPR_DOORBELL;
6137 if (!ipr_auto_create)
6138 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6139 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6140 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6141 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6142 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6143 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6144 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6145 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6146 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6148 INIT_LIST_HEAD(&ioa_cfg->free_q);
6149 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6150 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6151 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6152 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6153 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6154 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6155 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6156 ioa_cfg->sdt_state = INACTIVE;
6157 if (ipr_enable_cache)
6158 ioa_cfg->cache_state = CACHE_ENABLED;
6159 else
6160 ioa_cfg->cache_state = CACHE_DISABLED;
6162 ipr_initialize_bus_attr(ioa_cfg);
6164 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6165 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6166 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6167 host->unique_id = host->host_no;
6168 host->max_cmd_len = IPR_MAX_CDB_LEN;
6169 pci_set_drvdata(pdev, ioa_cfg);
6171 p = &ioa_cfg->chip_cfg->regs;
6172 t = &ioa_cfg->regs;
6173 base = ioa_cfg->hdw_dma_regs;
6175 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6176 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6177 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6178 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6179 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6180 t->ioarrin_reg = base + p->ioarrin_reg;
6181 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6182 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6183 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6187 * ipr_get_chip_cfg - Find adapter chip configuration
6188 * @dev_id: PCI device id struct
6190 * Return value:
6191 * ptr to chip config on success / NULL on failure
6193 static const struct ipr_chip_cfg_t * __devinit
6194 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6196 int i;
6198 if (dev_id->driver_data)
6199 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6201 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6202 if (ipr_chip[i].vendor == dev_id->vendor &&
6203 ipr_chip[i].device == dev_id->device)
6204 return ipr_chip[i].cfg;
6205 return NULL;
6209 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6210 * @pdev: PCI device struct
6211 * @dev_id: PCI device id struct
6213 * Return value:
6214 * 0 on success / non-zero on failure
6216 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6217 const struct pci_device_id *dev_id)
6219 struct ipr_ioa_cfg *ioa_cfg;
6220 struct Scsi_Host *host;
6221 unsigned long ipr_regs_pci;
6222 void __iomem *ipr_regs;
6223 u32 rc = PCIBIOS_SUCCESSFUL;
6225 ENTER;
6227 if ((rc = pci_enable_device(pdev))) {
6228 dev_err(&pdev->dev, "Cannot enable adapter\n");
6229 goto out;
6232 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6234 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6236 if (!host) {
6237 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6238 rc = -ENOMEM;
6239 goto out_disable;
6242 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6243 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6245 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6247 if (!ioa_cfg->chip_cfg) {
6248 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6249 dev_id->vendor, dev_id->device);
6250 goto out_scsi_host_put;
6253 ipr_regs_pci = pci_resource_start(pdev, 0);
6255 rc = pci_request_regions(pdev, IPR_NAME);
6256 if (rc < 0) {
6257 dev_err(&pdev->dev,
6258 "Couldn't register memory range of registers\n");
6259 goto out_scsi_host_put;
6262 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6264 if (!ipr_regs) {
6265 dev_err(&pdev->dev,
6266 "Couldn't map memory range of registers\n");
6267 rc = -ENOMEM;
6268 goto out_release_regions;
6271 ioa_cfg->hdw_dma_regs = ipr_regs;
6272 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6273 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6275 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6277 pci_set_master(pdev);
6279 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6280 if (rc < 0) {
6281 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6282 goto cleanup_nomem;
6285 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6286 ioa_cfg->chip_cfg->cache_line_size);
6288 if (rc != PCIBIOS_SUCCESSFUL) {
6289 dev_err(&pdev->dev, "Write of cache line size failed\n");
6290 rc = -EIO;
6291 goto cleanup_nomem;
6294 /* Save away PCI config space for use following IOA reset */
6295 rc = pci_save_state(pdev);
6297 if (rc != PCIBIOS_SUCCESSFUL) {
6298 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6299 rc = -EIO;
6300 goto cleanup_nomem;
6303 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6304 goto cleanup_nomem;
6306 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6307 goto cleanup_nomem;
6309 rc = ipr_alloc_mem(ioa_cfg);
6310 if (rc < 0) {
6311 dev_err(&pdev->dev,
6312 "Couldn't allocate enough memory for device driver!\n");
6313 goto cleanup_nomem;
6316 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6317 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6319 if (rc) {
6320 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6321 pdev->irq, rc);
6322 goto cleanup_nolog;
6325 spin_lock(&ipr_driver_lock);
6326 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6327 spin_unlock(&ipr_driver_lock);
6329 LEAVE;
6330 out:
6331 return rc;
6333 cleanup_nolog:
6334 ipr_free_mem(ioa_cfg);
6335 cleanup_nomem:
6336 iounmap(ipr_regs);
6337 out_release_regions:
6338 pci_release_regions(pdev);
6339 out_scsi_host_put:
6340 scsi_host_put(host);
6341 out_disable:
6342 pci_disable_device(pdev);
6343 goto out;
6347 * ipr_scan_vsets - Scans for VSET devices
6348 * @ioa_cfg: ioa config struct
6350 * Description: Since the VSET resources do not follow SAM in that we can have
6351 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6353 * Return value:
6354 * none
6356 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6358 int target, lun;
6360 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6361 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6362 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6366 * ipr_initiate_ioa_bringdown - Bring down an adapter
6367 * @ioa_cfg: ioa config struct
6368 * @shutdown_type: shutdown type
6370 * Description: This function will initiate bringing down the adapter.
6371 * This consists of issuing an IOA shutdown to the adapter
6372 * to flush the cache, and running BIST.
6373 * If the caller needs to wait on the completion of the reset,
6374 * the caller must sleep on the reset_wait_q.
6376 * Return value:
6377 * none
6379 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6380 enum ipr_shutdown_type shutdown_type)
6382 ENTER;
6383 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6384 ioa_cfg->sdt_state = ABORT_DUMP;
6385 ioa_cfg->reset_retries = 0;
6386 ioa_cfg->in_ioa_bringdown = 1;
6387 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6388 LEAVE;
6392 * __ipr_remove - Remove a single adapter
6393 * @pdev: pci device struct
6395 * Adapter hot plug remove entry point.
6397 * Return value:
6398 * none
6400 static void __ipr_remove(struct pci_dev *pdev)
6402 unsigned long host_lock_flags = 0;
6403 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6404 ENTER;
6406 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6407 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6410 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6411 flush_scheduled_work();
6412 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6414 spin_lock(&ipr_driver_lock);
6415 list_del(&ioa_cfg->queue);
6416 spin_unlock(&ipr_driver_lock);
6418 if (ioa_cfg->sdt_state == ABORT_DUMP)
6419 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6422 ipr_free_all_resources(ioa_cfg);
6424 LEAVE;
6428 * ipr_remove - IOA hot plug remove entry point
6429 * @pdev: pci device struct
6431 * Adapter hot plug remove entry point.
6433 * Return value:
6434 * none
6436 static void ipr_remove(struct pci_dev *pdev)
6438 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6440 ENTER;
6442 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6443 &ipr_trace_attr);
6444 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6445 &ipr_dump_attr);
6446 scsi_remove_host(ioa_cfg->host);
6448 __ipr_remove(pdev);
6450 LEAVE;
6454 * ipr_probe - Adapter hot plug add entry point
6456 * Return value:
6457 * 0 on success / non-zero on failure
6459 static int __devinit ipr_probe(struct pci_dev *pdev,
6460 const struct pci_device_id *dev_id)
6462 struct ipr_ioa_cfg *ioa_cfg;
6463 int rc;
6465 rc = ipr_probe_ioa(pdev, dev_id);
6467 if (rc)
6468 return rc;
6470 ioa_cfg = pci_get_drvdata(pdev);
6471 rc = ipr_probe_ioa_part2(ioa_cfg);
6473 if (rc) {
6474 __ipr_remove(pdev);
6475 return rc;
6478 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6480 if (rc) {
6481 __ipr_remove(pdev);
6482 return rc;
6485 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6486 &ipr_trace_attr);
6488 if (rc) {
6489 scsi_remove_host(ioa_cfg->host);
6490 __ipr_remove(pdev);
6491 return rc;
6494 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6495 &ipr_dump_attr);
6497 if (rc) {
6498 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6499 &ipr_trace_attr);
6500 scsi_remove_host(ioa_cfg->host);
6501 __ipr_remove(pdev);
6502 return rc;
6505 scsi_scan_host(ioa_cfg->host);
6506 ipr_scan_vsets(ioa_cfg);
6507 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6508 ioa_cfg->allow_ml_add_del = 1;
6509 ioa_cfg->host->max_channel = IPR_VSET_BUS;
6510 schedule_work(&ioa_cfg->work_q);
6511 return 0;
6515 * ipr_shutdown - Shutdown handler.
6516 * @pdev: pci device struct
6518 * This function is invoked upon system shutdown/reboot. It will issue
6519 * an adapter shutdown to the adapter to flush the write cache.
6521 * Return value:
6522 * none
6524 static void ipr_shutdown(struct pci_dev *pdev)
6526 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6527 unsigned long lock_flags = 0;
6529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6530 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6532 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6535 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6536 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6537 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6538 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6539 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6540 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6541 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6542 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6543 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6544 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6545 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6546 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6547 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6548 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6549 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6550 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6551 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6552 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6553 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6554 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6555 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6556 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6557 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6558 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6559 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6560 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6562 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6565 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6567 static struct pci_driver ipr_driver = {
6568 .name = IPR_NAME,
6569 .id_table = ipr_pci_table,
6570 .probe = ipr_probe,
6571 .remove = ipr_remove,
6572 .shutdown = ipr_shutdown,
6576 * ipr_init - Module entry point
6578 * Return value:
6579 * 0 on success / negative value on failure
6581 static int __init ipr_init(void)
6583 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6584 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6586 return pci_module_init(&ipr_driver);
6590 * ipr_exit - Module unload
6592 * Module unload entry point.
6594 * Return value:
6595 * none
6597 static void __exit ipr_exit(void)
6599 pci_unregister_driver(&ipr_driver);
6602 module_init(ipr_init);
6603 module_exit(ipr_exit);