[SCSI] iscsi class: fix vlan configuration
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / qla4xxx / ql4_os.c
blobba6a8f3ee6fadff543ff2e967c83b92fc1e9a363
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsicam.h>
15 #include "ql4_def.h"
16 #include "ql4_version.h"
17 #include "ql4_glbl.h"
18 #include "ql4_dbg.h"
19 #include "ql4_inline.h"
22 * Driver version
24 static char qla4xxx_version_str[40];
27 * SRB allocation cache
29 static struct kmem_cache *srb_cachep;
32 * Module parameter information and variables
34 int ql4xdontresethba = 0;
35 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
36 MODULE_PARM_DESC(ql4xdontresethba,
37 "Don't reset the HBA for driver recovery \n"
38 " 0 - It will reset HBA (Default)\n"
39 " 1 - It will NOT reset HBA");
41 int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
42 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(ql4xextended_error_logging,
44 "Option to enable extended error logging, "
45 "Default is 0 - no logging, 1 - debug logging");
47 int ql4xenablemsix = 1;
48 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
49 MODULE_PARM_DESC(ql4xenablemsix,
50 "Set to enable MSI or MSI-X interrupt mechanism.\n"
51 " 0 = enable INTx interrupt mechanism.\n"
52 " 1 = enable MSI-X interrupt mechanism (Default).\n"
53 " 2 = enable MSI interrupt mechanism.");
55 #define QL4_DEF_QDEPTH 32
56 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
57 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
58 MODULE_PARM_DESC(ql4xmaxqdepth,
59 "Maximum queue depth to report for target devices.\n"
60 " Default: 32.");
62 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 "Target Session Recovery Timeout.\n"
66 " Default: 30 sec.");
68 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
70 * SCSI host template entry points
72 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
75 * iSCSI template entry points
77 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 enum iscsi_param param, char *buf);
79 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
80 enum iscsi_host_param param, char *buf);
81 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
82 uint32_t len);
83 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 enum iscsi_param_type param_type,
85 int param, char *buf);
86 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
87 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 struct sockaddr *dst_addr,
89 int non_blocking);
90 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 enum iscsi_param param, char *buf);
94 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95 static struct iscsi_cls_conn *
96 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 struct iscsi_cls_conn *cls_conn,
99 uint64_t transport_fd, int is_leading);
100 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101 static struct iscsi_cls_session *
102 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 uint16_t qdepth, uint32_t initial_cmdsn);
104 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105 static void qla4xxx_task_work(struct work_struct *wdata);
106 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107 static int qla4xxx_task_xmit(struct iscsi_task *);
108 static void qla4xxx_task_cleanup(struct iscsi_task *);
109 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 struct iscsi_stats *stats);
113 * SCSI host template entry points
115 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
116 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
117 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
118 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
119 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
120 static int qla4xxx_slave_alloc(struct scsi_device *device);
121 static int qla4xxx_slave_configure(struct scsi_device *device);
122 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
123 static mode_t ql4_attr_is_visible(int param_type, int param);
124 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
126 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
127 QLA82XX_LEGACY_INTR_CONFIG;
129 static struct scsi_host_template qla4xxx_driver_template = {
130 .module = THIS_MODULE,
131 .name = DRIVER_NAME,
132 .proc_name = DRIVER_NAME,
133 .queuecommand = qla4xxx_queuecommand,
135 .eh_abort_handler = qla4xxx_eh_abort,
136 .eh_device_reset_handler = qla4xxx_eh_device_reset,
137 .eh_target_reset_handler = qla4xxx_eh_target_reset,
138 .eh_host_reset_handler = qla4xxx_eh_host_reset,
139 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
141 .slave_configure = qla4xxx_slave_configure,
142 .slave_alloc = qla4xxx_slave_alloc,
143 .slave_destroy = qla4xxx_slave_destroy,
145 .this_id = -1,
146 .cmd_per_lun = 3,
147 .use_clustering = ENABLE_CLUSTERING,
148 .sg_tablesize = SG_ALL,
150 .max_sectors = 0xFFFF,
151 .shost_attrs = qla4xxx_host_attrs,
152 .host_reset = qla4xxx_host_reset,
153 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
156 static struct iscsi_transport qla4xxx_iscsi_transport = {
157 .owner = THIS_MODULE,
158 .name = DRIVER_NAME,
159 .caps = CAP_TEXT_NEGO |
160 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
161 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
162 CAP_MULTI_R2T,
163 .attr_is_visible = ql4_attr_is_visible,
164 .create_session = qla4xxx_session_create,
165 .destroy_session = qla4xxx_session_destroy,
166 .start_conn = qla4xxx_conn_start,
167 .create_conn = qla4xxx_conn_create,
168 .bind_conn = qla4xxx_conn_bind,
169 .stop_conn = iscsi_conn_stop,
170 .destroy_conn = qla4xxx_conn_destroy,
171 .set_param = iscsi_set_param,
172 .get_conn_param = qla4xxx_conn_get_param,
173 .get_session_param = iscsi_session_get_param,
174 .get_ep_param = qla4xxx_get_ep_param,
175 .ep_connect = qla4xxx_ep_connect,
176 .ep_poll = qla4xxx_ep_poll,
177 .ep_disconnect = qla4xxx_ep_disconnect,
178 .get_stats = qla4xxx_conn_get_stats,
179 .send_pdu = iscsi_conn_send_pdu,
180 .xmit_task = qla4xxx_task_xmit,
181 .cleanup_task = qla4xxx_task_cleanup,
182 .alloc_pdu = qla4xxx_alloc_pdu,
184 .get_host_param = qla4xxx_host_get_param,
185 .set_iface_param = qla4xxx_iface_set_param,
186 .get_iface_param = qla4xxx_get_iface_param,
187 .bsg_request = qla4xxx_bsg_request,
190 static struct scsi_transport_template *qla4xxx_scsi_transport;
192 static mode_t ql4_attr_is_visible(int param_type, int param)
194 switch (param_type) {
195 case ISCSI_HOST_PARAM:
196 switch (param) {
197 case ISCSI_HOST_PARAM_HWADDRESS:
198 case ISCSI_HOST_PARAM_IPADDRESS:
199 case ISCSI_HOST_PARAM_INITIATOR_NAME:
200 return S_IRUGO;
201 default:
202 return 0;
204 case ISCSI_PARAM:
205 switch (param) {
206 case ISCSI_PARAM_CONN_ADDRESS:
207 case ISCSI_PARAM_CONN_PORT:
208 case ISCSI_PARAM_TARGET_NAME:
209 case ISCSI_PARAM_TPGT:
210 case ISCSI_PARAM_TARGET_ALIAS:
211 case ISCSI_PARAM_MAX_BURST:
212 case ISCSI_PARAM_MAX_R2T:
213 case ISCSI_PARAM_FIRST_BURST:
214 case ISCSI_PARAM_MAX_RECV_DLENGTH:
215 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
216 case ISCSI_PARAM_IFACE_NAME:
217 return S_IRUGO;
218 default:
219 return 0;
221 case ISCSI_NET_PARAM:
222 switch (param) {
223 case ISCSI_NET_PARAM_IPV4_ADDR:
224 case ISCSI_NET_PARAM_IPV4_SUBNET:
225 case ISCSI_NET_PARAM_IPV4_GW:
226 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
227 case ISCSI_NET_PARAM_IFACE_ENABLE:
228 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
229 case ISCSI_NET_PARAM_IPV6_ADDR:
230 case ISCSI_NET_PARAM_IPV6_ROUTER:
231 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
232 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
233 case ISCSI_NET_PARAM_VLAN_ID:
234 case ISCSI_NET_PARAM_VLAN_PRIORITY:
235 case ISCSI_NET_PARAM_VLAN_ENABLED:
236 case ISCSI_NET_PARAM_MTU:
237 case ISCSI_NET_PARAM_PORT:
238 return S_IRUGO;
239 default:
240 return 0;
244 return 0;
247 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
248 enum iscsi_param_type param_type,
249 int param, char *buf)
251 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
252 struct scsi_qla_host *ha = to_qla_host(shost);
253 int len = -ENOSYS;
255 if (param_type != ISCSI_NET_PARAM)
256 return -ENOSYS;
258 switch (param) {
259 case ISCSI_NET_PARAM_IPV4_ADDR:
260 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
261 break;
262 case ISCSI_NET_PARAM_IPV4_SUBNET:
263 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
264 break;
265 case ISCSI_NET_PARAM_IPV4_GW:
266 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
267 break;
268 case ISCSI_NET_PARAM_IFACE_ENABLE:
269 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
270 len = sprintf(buf, "%s\n",
271 (ha->ip_config.ipv4_options &
272 IPOPT_IPV4_PROTOCOL_ENABLE) ?
273 "enabled" : "disabled");
274 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
275 len = sprintf(buf, "%s\n",
276 (ha->ip_config.ipv6_options &
277 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
278 "enabled" : "disabled");
279 break;
280 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
281 len = sprintf(buf, "%s\n",
282 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
283 "dhcp" : "static");
284 break;
285 case ISCSI_NET_PARAM_IPV6_ADDR:
286 if (iface->iface_num == 0)
287 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
288 if (iface->iface_num == 1)
289 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
290 break;
291 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
292 len = sprintf(buf, "%pI6\n",
293 &ha->ip_config.ipv6_link_local_addr);
294 break;
295 case ISCSI_NET_PARAM_IPV6_ROUTER:
296 len = sprintf(buf, "%pI6\n",
297 &ha->ip_config.ipv6_default_router_addr);
298 break;
299 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
300 len = sprintf(buf, "%s\n",
301 (ha->ip_config.ipv6_addl_options &
302 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
303 "nd" : "static");
304 break;
305 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
306 len = sprintf(buf, "%s\n",
307 (ha->ip_config.ipv6_addl_options &
308 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
309 "auto" : "static");
310 break;
311 case ISCSI_NET_PARAM_VLAN_ID:
312 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
313 len = sprintf(buf, "%d\n",
314 (ha->ip_config.ipv4_vlan_tag &
315 ISCSI_MAX_VLAN_ID));
316 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
317 len = sprintf(buf, "%d\n",
318 (ha->ip_config.ipv6_vlan_tag &
319 ISCSI_MAX_VLAN_ID));
320 break;
321 case ISCSI_NET_PARAM_VLAN_PRIORITY:
322 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
323 len = sprintf(buf, "%d\n",
324 ((ha->ip_config.ipv4_vlan_tag >> 13) &
325 ISCSI_MAX_VLAN_PRIORITY));
326 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
327 len = sprintf(buf, "%d\n",
328 ((ha->ip_config.ipv6_vlan_tag >> 13) &
329 ISCSI_MAX_VLAN_PRIORITY));
330 break;
331 case ISCSI_NET_PARAM_VLAN_ENABLED:
332 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
333 len = sprintf(buf, "%s\n",
334 (ha->ip_config.ipv4_options &
335 IPOPT_VLAN_TAGGING_ENABLE) ?
336 "enabled" : "disabled");
337 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
338 len = sprintf(buf, "%s\n",
339 (ha->ip_config.ipv6_options &
340 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
341 "enabled" : "disabled");
342 break;
343 case ISCSI_NET_PARAM_MTU:
344 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
345 break;
346 case ISCSI_NET_PARAM_PORT:
347 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
348 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
349 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
350 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
351 break;
352 default:
353 len = -ENOSYS;
356 return len;
359 static struct iscsi_endpoint *
360 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
361 int non_blocking)
363 int ret;
364 struct iscsi_endpoint *ep;
365 struct qla_endpoint *qla_ep;
366 struct scsi_qla_host *ha;
367 struct sockaddr_in *addr;
368 struct sockaddr_in6 *addr6;
370 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
371 if (!shost) {
372 ret = -ENXIO;
373 printk(KERN_ERR "%s: shost is NULL\n",
374 __func__);
375 return ERR_PTR(ret);
378 ha = iscsi_host_priv(shost);
380 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
381 if (!ep) {
382 ret = -ENOMEM;
383 return ERR_PTR(ret);
386 qla_ep = ep->dd_data;
387 memset(qla_ep, 0, sizeof(struct qla_endpoint));
388 if (dst_addr->sa_family == AF_INET) {
389 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
390 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
391 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
392 (char *)&addr->sin_addr));
393 } else if (dst_addr->sa_family == AF_INET6) {
394 memcpy(&qla_ep->dst_addr, dst_addr,
395 sizeof(struct sockaddr_in6));
396 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
397 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
398 (char *)&addr6->sin6_addr));
401 qla_ep->host = shost;
403 return ep;
406 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
408 struct qla_endpoint *qla_ep;
409 struct scsi_qla_host *ha;
410 int ret = 0;
412 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
413 qla_ep = ep->dd_data;
414 ha = to_qla_host(qla_ep->host);
416 if (adapter_up(ha))
417 ret = 1;
419 return ret;
422 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
424 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
425 iscsi_destroy_endpoint(ep);
428 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
429 enum iscsi_param param,
430 char *buf)
432 struct qla_endpoint *qla_ep = ep->dd_data;
433 struct sockaddr *dst_addr;
435 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
437 switch (param) {
438 case ISCSI_PARAM_CONN_PORT:
439 case ISCSI_PARAM_CONN_ADDRESS:
440 if (!qla_ep)
441 return -ENOTCONN;
443 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
444 if (!dst_addr)
445 return -ENOTCONN;
447 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
448 &qla_ep->dst_addr, param, buf);
449 default:
450 return -ENOSYS;
454 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
455 struct iscsi_stats *stats)
457 struct iscsi_session *sess;
458 struct iscsi_cls_session *cls_sess;
459 struct ddb_entry *ddb_entry;
460 struct scsi_qla_host *ha;
461 struct ql_iscsi_stats *ql_iscsi_stats;
462 int stats_size;
463 int ret;
464 dma_addr_t iscsi_stats_dma;
466 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
468 cls_sess = iscsi_conn_to_session(cls_conn);
469 sess = cls_sess->dd_data;
470 ddb_entry = sess->dd_data;
471 ha = ddb_entry->ha;
473 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
474 /* Allocate memory */
475 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
476 &iscsi_stats_dma, GFP_KERNEL);
477 if (!ql_iscsi_stats) {
478 ql4_printk(KERN_ERR, ha,
479 "Unable to allocate memory for iscsi stats\n");
480 goto exit_get_stats;
483 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
484 iscsi_stats_dma);
485 if (ret != QLA_SUCCESS) {
486 ql4_printk(KERN_ERR, ha,
487 "Unable to retreive iscsi stats\n");
488 goto free_stats;
491 /* octets */
492 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
493 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
494 /* xmit pdus */
495 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
496 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
497 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
498 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
499 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
500 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
501 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
502 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
503 /* recv pdus */
504 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
505 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
506 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
507 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
508 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
509 stats->logoutrsp_pdus =
510 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
511 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
512 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
513 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
515 free_stats:
516 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
517 iscsi_stats_dma);
518 exit_get_stats:
519 return;
522 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
524 struct iscsi_cls_session *session;
525 struct iscsi_session *sess;
526 unsigned long flags;
527 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
529 session = starget_to_session(scsi_target(sc->device));
530 sess = session->dd_data;
532 spin_lock_irqsave(&session->lock, flags);
533 if (session->state == ISCSI_SESSION_FAILED)
534 ret = BLK_EH_RESET_TIMER;
535 spin_unlock_irqrestore(&session->lock, flags);
537 return ret;
540 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
541 enum iscsi_host_param param, char *buf)
543 struct scsi_qla_host *ha = to_qla_host(shost);
544 int len;
546 switch (param) {
547 case ISCSI_HOST_PARAM_HWADDRESS:
548 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
549 break;
550 case ISCSI_HOST_PARAM_IPADDRESS:
551 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
552 break;
553 case ISCSI_HOST_PARAM_INITIATOR_NAME:
554 len = sprintf(buf, "%s\n", ha->name_string);
555 break;
556 default:
557 return -ENOSYS;
560 return len;
563 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
565 if (ha->iface_ipv4)
566 return;
568 /* IPv4 */
569 ha->iface_ipv4 = iscsi_create_iface(ha->host,
570 &qla4xxx_iscsi_transport,
571 ISCSI_IFACE_TYPE_IPV4, 0, 0);
572 if (!ha->iface_ipv4)
573 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
574 "iface0.\n");
577 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
579 if (!ha->iface_ipv6_0)
580 /* IPv6 iface-0 */
581 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
582 &qla4xxx_iscsi_transport,
583 ISCSI_IFACE_TYPE_IPV6, 0,
585 if (!ha->iface_ipv6_0)
586 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
587 "iface0.\n");
589 if (!ha->iface_ipv6_1)
590 /* IPv6 iface-1 */
591 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
592 &qla4xxx_iscsi_transport,
593 ISCSI_IFACE_TYPE_IPV6, 1,
595 if (!ha->iface_ipv6_1)
596 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
597 "iface1.\n");
600 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
602 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
603 qla4xxx_create_ipv4_iface(ha);
605 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
606 qla4xxx_create_ipv6_iface(ha);
609 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
611 if (ha->iface_ipv4) {
612 iscsi_destroy_iface(ha->iface_ipv4);
613 ha->iface_ipv4 = NULL;
617 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
619 if (ha->iface_ipv6_0) {
620 iscsi_destroy_iface(ha->iface_ipv6_0);
621 ha->iface_ipv6_0 = NULL;
623 if (ha->iface_ipv6_1) {
624 iscsi_destroy_iface(ha->iface_ipv6_1);
625 ha->iface_ipv6_1 = NULL;
629 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
631 qla4xxx_destroy_ipv4_iface(ha);
632 qla4xxx_destroy_ipv6_iface(ha);
635 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
636 struct iscsi_iface_param_info *iface_param,
637 struct addr_ctrl_blk *init_fw_cb)
640 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
641 * iface_num 1 is valid only for IPv6 Addr.
643 switch (iface_param->param) {
644 case ISCSI_NET_PARAM_IPV6_ADDR:
645 if (iface_param->iface_num & 0x1)
646 /* IPv6 Addr 1 */
647 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
648 sizeof(init_fw_cb->ipv6_addr1));
649 else
650 /* IPv6 Addr 0 */
651 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
652 sizeof(init_fw_cb->ipv6_addr0));
653 break;
654 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
655 if (iface_param->iface_num & 0x1)
656 break;
657 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
658 sizeof(init_fw_cb->ipv6_if_id));
659 break;
660 case ISCSI_NET_PARAM_IPV6_ROUTER:
661 if (iface_param->iface_num & 0x1)
662 break;
663 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
664 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
665 break;
666 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
667 /* Autocfg applies to even interface */
668 if (iface_param->iface_num & 0x1)
669 break;
671 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
672 init_fw_cb->ipv6_addtl_opts &=
673 cpu_to_le16(
674 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
675 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
676 init_fw_cb->ipv6_addtl_opts |=
677 cpu_to_le16(
678 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
679 else
680 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
681 "IPv6 addr\n");
682 break;
683 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
684 /* Autocfg applies to even interface */
685 if (iface_param->iface_num & 0x1)
686 break;
688 if (iface_param->value[0] ==
689 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
690 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
691 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
692 else if (iface_param->value[0] ==
693 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
694 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
695 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
696 else
697 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
698 "IPv6 linklocal addr\n");
699 break;
700 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
701 /* Autocfg applies to even interface */
702 if (iface_param->iface_num & 0x1)
703 break;
705 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
706 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
707 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
708 break;
709 case ISCSI_NET_PARAM_IFACE_ENABLE:
710 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
711 init_fw_cb->ipv6_opts |=
712 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
713 qla4xxx_create_ipv6_iface(ha);
714 } else {
715 init_fw_cb->ipv6_opts &=
716 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
717 0xFFFF);
718 qla4xxx_destroy_ipv6_iface(ha);
720 break;
721 case ISCSI_NET_PARAM_VLAN_TAG:
722 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
723 break;
724 init_fw_cb->ipv6_vlan_tag =
725 cpu_to_be16(*(uint16_t *)iface_param->value);
726 break;
727 case ISCSI_NET_PARAM_VLAN_ENABLED:
728 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
729 init_fw_cb->ipv6_opts |=
730 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
731 else
732 init_fw_cb->ipv6_opts &=
733 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
734 break;
735 case ISCSI_NET_PARAM_MTU:
736 init_fw_cb->eth_mtu_size =
737 cpu_to_le16(*(uint16_t *)iface_param->value);
738 break;
739 case ISCSI_NET_PARAM_PORT:
740 /* Autocfg applies to even interface */
741 if (iface_param->iface_num & 0x1)
742 break;
744 init_fw_cb->ipv6_port =
745 cpu_to_le16(*(uint16_t *)iface_param->value);
746 break;
747 default:
748 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
749 iface_param->param);
750 break;
754 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
755 struct iscsi_iface_param_info *iface_param,
756 struct addr_ctrl_blk *init_fw_cb)
758 switch (iface_param->param) {
759 case ISCSI_NET_PARAM_IPV4_ADDR:
760 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
761 sizeof(init_fw_cb->ipv4_addr));
762 break;
763 case ISCSI_NET_PARAM_IPV4_SUBNET:
764 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
765 sizeof(init_fw_cb->ipv4_subnet));
766 break;
767 case ISCSI_NET_PARAM_IPV4_GW:
768 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
769 sizeof(init_fw_cb->ipv4_gw_addr));
770 break;
771 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
772 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
773 init_fw_cb->ipv4_tcp_opts |=
774 cpu_to_le16(TCPOPT_DHCP_ENABLE);
775 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
776 init_fw_cb->ipv4_tcp_opts &=
777 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
778 else
779 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
780 break;
781 case ISCSI_NET_PARAM_IFACE_ENABLE:
782 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
783 init_fw_cb->ipv4_ip_opts |=
784 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
785 qla4xxx_create_ipv4_iface(ha);
786 } else {
787 init_fw_cb->ipv4_ip_opts &=
788 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
789 0xFFFF);
790 qla4xxx_destroy_ipv4_iface(ha);
792 break;
793 case ISCSI_NET_PARAM_VLAN_TAG:
794 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
795 break;
796 init_fw_cb->ipv4_vlan_tag =
797 cpu_to_be16(*(uint16_t *)iface_param->value);
798 break;
799 case ISCSI_NET_PARAM_VLAN_ENABLED:
800 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
801 init_fw_cb->ipv4_ip_opts |=
802 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
803 else
804 init_fw_cb->ipv4_ip_opts &=
805 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
806 break;
807 case ISCSI_NET_PARAM_MTU:
808 init_fw_cb->eth_mtu_size =
809 cpu_to_le16(*(uint16_t *)iface_param->value);
810 break;
811 case ISCSI_NET_PARAM_PORT:
812 init_fw_cb->ipv4_port =
813 cpu_to_le16(*(uint16_t *)iface_param->value);
814 break;
815 default:
816 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
817 iface_param->param);
818 break;
822 static void
823 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
825 struct addr_ctrl_blk_def *acb;
826 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
827 memset(acb->reserved1, 0, sizeof(acb->reserved1));
828 memset(acb->reserved2, 0, sizeof(acb->reserved2));
829 memset(acb->reserved3, 0, sizeof(acb->reserved3));
830 memset(acb->reserved4, 0, sizeof(acb->reserved4));
831 memset(acb->reserved5, 0, sizeof(acb->reserved5));
832 memset(acb->reserved6, 0, sizeof(acb->reserved6));
833 memset(acb->reserved7, 0, sizeof(acb->reserved7));
834 memset(acb->reserved8, 0, sizeof(acb->reserved8));
835 memset(acb->reserved9, 0, sizeof(acb->reserved9));
836 memset(acb->reserved10, 0, sizeof(acb->reserved10));
837 memset(acb->reserved11, 0, sizeof(acb->reserved11));
838 memset(acb->reserved12, 0, sizeof(acb->reserved12));
839 memset(acb->reserved13, 0, sizeof(acb->reserved13));
840 memset(acb->reserved14, 0, sizeof(acb->reserved14));
841 memset(acb->reserved15, 0, sizeof(acb->reserved15));
844 static int
845 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
847 struct scsi_qla_host *ha = to_qla_host(shost);
848 int rval = 0;
849 struct iscsi_iface_param_info *iface_param = NULL;
850 struct addr_ctrl_blk *init_fw_cb = NULL;
851 dma_addr_t init_fw_cb_dma;
852 uint32_t mbox_cmd[MBOX_REG_COUNT];
853 uint32_t mbox_sts[MBOX_REG_COUNT];
854 uint32_t rem = len;
855 struct nlattr *attr;
857 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
858 sizeof(struct addr_ctrl_blk),
859 &init_fw_cb_dma, GFP_KERNEL);
860 if (!init_fw_cb) {
861 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
862 __func__);
863 return -ENOMEM;
866 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
867 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
868 memset(&mbox_sts, 0, sizeof(mbox_sts));
870 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
871 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
872 rval = -EIO;
873 goto exit_init_fw_cb;
876 nla_for_each_attr(attr, data, len, rem) {
877 iface_param = nla_data(attr);
879 if (iface_param->param_type != ISCSI_NET_PARAM)
880 continue;
882 switch (iface_param->iface_type) {
883 case ISCSI_IFACE_TYPE_IPV4:
884 switch (iface_param->iface_num) {
885 case 0:
886 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
887 break;
888 default:
889 /* Cannot have more than one IPv4 interface */
890 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
891 "number = %d\n",
892 iface_param->iface_num);
893 break;
895 break;
896 case ISCSI_IFACE_TYPE_IPV6:
897 switch (iface_param->iface_num) {
898 case 0:
899 case 1:
900 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
901 break;
902 default:
903 /* Cannot have more than two IPv6 interface */
904 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
905 "number = %d\n",
906 iface_param->iface_num);
907 break;
909 break;
910 default:
911 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
912 break;
916 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
918 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
919 sizeof(struct addr_ctrl_blk),
920 FLASH_OPT_RMW_COMMIT);
921 if (rval != QLA_SUCCESS) {
922 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
923 __func__);
924 rval = -EIO;
925 goto exit_init_fw_cb;
928 qla4xxx_disable_acb(ha);
930 qla4xxx_initcb_to_acb(init_fw_cb);
932 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
933 if (rval != QLA_SUCCESS) {
934 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
935 __func__);
936 rval = -EIO;
937 goto exit_init_fw_cb;
940 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
941 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
942 init_fw_cb_dma);
944 exit_init_fw_cb:
945 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
946 init_fw_cb, init_fw_cb_dma);
948 return rval;
951 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
952 enum iscsi_param param, char *buf)
954 struct iscsi_conn *conn;
955 struct qla_conn *qla_conn;
956 struct sockaddr *dst_addr;
957 int len = 0;
959 conn = cls_conn->dd_data;
960 qla_conn = conn->dd_data;
961 dst_addr = &qla_conn->qla_ep->dst_addr;
963 switch (param) {
964 case ISCSI_PARAM_CONN_PORT:
965 case ISCSI_PARAM_CONN_ADDRESS:
966 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
967 dst_addr, param, buf);
968 default:
969 return iscsi_conn_get_param(cls_conn, param, buf);
972 return len;
976 static struct iscsi_cls_session *
977 qla4xxx_session_create(struct iscsi_endpoint *ep,
978 uint16_t cmds_max, uint16_t qdepth,
979 uint32_t initial_cmdsn)
981 struct iscsi_cls_session *cls_sess;
982 struct scsi_qla_host *ha;
983 struct qla_endpoint *qla_ep;
984 struct ddb_entry *ddb_entry;
985 uint32_t ddb_index;
986 uint32_t mbx_sts = 0;
987 struct iscsi_session *sess;
988 struct sockaddr *dst_addr;
989 int ret;
991 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
992 if (!ep) {
993 printk(KERN_ERR "qla4xxx: missing ep.\n");
994 return NULL;
997 qla_ep = ep->dd_data;
998 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
999 ha = to_qla_host(qla_ep->host);
1001 get_ddb_index:
1002 ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1004 if (ddb_index >= MAX_DDB_ENTRIES) {
1005 DEBUG2(ql4_printk(KERN_INFO, ha,
1006 "Free DDB index not available\n"));
1007 return NULL;
1010 if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1011 goto get_ddb_index;
1013 DEBUG2(ql4_printk(KERN_INFO, ha,
1014 "Found a free DDB index at %d\n", ddb_index));
1015 ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1016 if (ret == QLA_ERROR) {
1017 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1018 ql4_printk(KERN_INFO, ha,
1019 "DDB index = %d not available trying next\n",
1020 ddb_index);
1021 goto get_ddb_index;
1023 DEBUG2(ql4_printk(KERN_INFO, ha,
1024 "Free FW DDB not available\n"));
1025 return NULL;
1028 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1029 cmds_max, sizeof(struct ddb_entry),
1030 sizeof(struct ql4_task_data),
1031 initial_cmdsn, ddb_index);
1032 if (!cls_sess)
1033 return NULL;
1035 sess = cls_sess->dd_data;
1036 ddb_entry = sess->dd_data;
1037 ddb_entry->fw_ddb_index = ddb_index;
1038 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1039 ddb_entry->ha = ha;
1040 ddb_entry->sess = cls_sess;
1041 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1042 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1043 ha->tot_ddbs++;
1045 return cls_sess;
1048 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1050 struct iscsi_session *sess;
1051 struct ddb_entry *ddb_entry;
1052 struct scsi_qla_host *ha;
1053 unsigned long flags;
1055 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1056 sess = cls_sess->dd_data;
1057 ddb_entry = sess->dd_data;
1058 ha = ddb_entry->ha;
1060 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1062 spin_lock_irqsave(&ha->hardware_lock, flags);
1063 qla4xxx_free_ddb(ha, ddb_entry);
1064 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1065 iscsi_session_teardown(cls_sess);
1068 static struct iscsi_cls_conn *
1069 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1071 struct iscsi_cls_conn *cls_conn;
1072 struct iscsi_session *sess;
1073 struct ddb_entry *ddb_entry;
1075 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1076 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1077 conn_idx);
1078 sess = cls_sess->dd_data;
1079 ddb_entry = sess->dd_data;
1080 ddb_entry->conn = cls_conn;
1082 return cls_conn;
1085 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1086 struct iscsi_cls_conn *cls_conn,
1087 uint64_t transport_fd, int is_leading)
1089 struct iscsi_conn *conn;
1090 struct qla_conn *qla_conn;
1091 struct iscsi_endpoint *ep;
1093 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1095 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1096 return -EINVAL;
1097 ep = iscsi_lookup_endpoint(transport_fd);
1098 conn = cls_conn->dd_data;
1099 qla_conn = conn->dd_data;
1100 qla_conn->qla_ep = ep->dd_data;
1101 return 0;
1104 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1106 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1107 struct iscsi_session *sess;
1108 struct ddb_entry *ddb_entry;
1109 struct scsi_qla_host *ha;
1110 struct dev_db_entry *fw_ddb_entry;
1111 dma_addr_t fw_ddb_entry_dma;
1112 uint32_t mbx_sts = 0;
1113 int ret = 0;
1114 int status = QLA_SUCCESS;
1116 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1117 sess = cls_sess->dd_data;
1118 ddb_entry = sess->dd_data;
1119 ha = ddb_entry->ha;
1121 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1122 &fw_ddb_entry_dma, GFP_KERNEL);
1123 if (!fw_ddb_entry) {
1124 ql4_printk(KERN_ERR, ha,
1125 "%s: Unable to allocate dma buffer\n", __func__);
1126 return -ENOMEM;
1129 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1130 if (ret) {
1131 /* If iscsid is stopped and started then no need to do
1132 * set param again since ddb state will be already
1133 * active and FW does not allow set ddb to an
1134 * active session.
1136 if (mbx_sts)
1137 if (ddb_entry->fw_ddb_device_state ==
1138 DDB_DS_SESSION_ACTIVE) {
1139 iscsi_conn_start(ddb_entry->conn);
1140 iscsi_conn_login_event(ddb_entry->conn,
1141 ISCSI_CONN_STATE_LOGGED_IN);
1142 goto exit_set_param;
1145 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1146 __func__, ddb_entry->fw_ddb_index);
1147 goto exit_conn_start;
1150 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1151 if (status == QLA_ERROR) {
1152 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1153 sess->targetname);
1154 ret = -EINVAL;
1155 goto exit_conn_start;
1158 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1159 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1161 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1162 ddb_entry->fw_ddb_device_state));
1164 exit_set_param:
1165 ret = 0;
1167 exit_conn_start:
1168 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1169 fw_ddb_entry, fw_ddb_entry_dma);
1170 return ret;
1173 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1175 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1176 struct iscsi_session *sess;
1177 struct scsi_qla_host *ha;
1178 struct ddb_entry *ddb_entry;
1179 int options;
1181 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1182 sess = cls_sess->dd_data;
1183 ddb_entry = sess->dd_data;
1184 ha = ddb_entry->ha;
1186 options = LOGOUT_OPTION_CLOSE_SESSION;
1187 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1188 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1191 static void qla4xxx_task_work(struct work_struct *wdata)
1193 struct ql4_task_data *task_data;
1194 struct scsi_qla_host *ha;
1195 struct passthru_status *sts;
1196 struct iscsi_task *task;
1197 struct iscsi_hdr *hdr;
1198 uint8_t *data;
1199 uint32_t data_len;
1200 struct iscsi_conn *conn;
1201 int hdr_len;
1202 itt_t itt;
1204 task_data = container_of(wdata, struct ql4_task_data, task_work);
1205 ha = task_data->ha;
1206 task = task_data->task;
1207 sts = &task_data->sts;
1208 hdr_len = sizeof(struct iscsi_hdr);
1210 DEBUG3(printk(KERN_INFO "Status returned\n"));
1211 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1212 DEBUG3(printk(KERN_INFO "Response buffer"));
1213 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1215 conn = task->conn;
1217 switch (sts->completionStatus) {
1218 case PASSTHRU_STATUS_COMPLETE:
1219 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1220 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1221 itt = sts->handle;
1222 hdr->itt = itt;
1223 data = task_data->resp_buffer + hdr_len;
1224 data_len = task_data->resp_len - hdr_len;
1225 iscsi_complete_pdu(conn, hdr, data, data_len);
1226 break;
1227 default:
1228 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1229 sts->completionStatus);
1230 break;
1232 return;
1235 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1237 struct ql4_task_data *task_data;
1238 struct iscsi_session *sess;
1239 struct ddb_entry *ddb_entry;
1240 struct scsi_qla_host *ha;
1241 int hdr_len;
1243 sess = task->conn->session;
1244 ddb_entry = sess->dd_data;
1245 ha = ddb_entry->ha;
1246 task_data = task->dd_data;
1247 memset(task_data, 0, sizeof(struct ql4_task_data));
1249 if (task->sc) {
1250 ql4_printk(KERN_INFO, ha,
1251 "%s: SCSI Commands not implemented\n", __func__);
1252 return -EINVAL;
1255 hdr_len = sizeof(struct iscsi_hdr);
1256 task_data->ha = ha;
1257 task_data->task = task;
1259 if (task->data_count) {
1260 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1261 task->data_count,
1262 PCI_DMA_TODEVICE);
1265 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1266 __func__, task->conn->max_recv_dlength, hdr_len));
1268 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1269 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1270 task_data->resp_len,
1271 &task_data->resp_dma,
1272 GFP_ATOMIC);
1273 if (!task_data->resp_buffer)
1274 goto exit_alloc_pdu;
1276 task_data->req_len = task->data_count + hdr_len;
1277 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1278 task_data->req_len,
1279 &task_data->req_dma,
1280 GFP_ATOMIC);
1281 if (!task_data->req_buffer)
1282 goto exit_alloc_pdu;
1284 task->hdr = task_data->req_buffer;
1286 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1288 return 0;
1290 exit_alloc_pdu:
1291 if (task_data->resp_buffer)
1292 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1293 task_data->resp_buffer, task_data->resp_dma);
1295 if (task_data->req_buffer)
1296 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1297 task_data->req_buffer, task_data->req_dma);
1298 return -ENOMEM;
1301 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1303 struct ql4_task_data *task_data;
1304 struct iscsi_session *sess;
1305 struct ddb_entry *ddb_entry;
1306 struct scsi_qla_host *ha;
1307 int hdr_len;
1309 hdr_len = sizeof(struct iscsi_hdr);
1310 sess = task->conn->session;
1311 ddb_entry = sess->dd_data;
1312 ha = ddb_entry->ha;
1313 task_data = task->dd_data;
1315 if (task->data_count) {
1316 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1317 task->data_count, PCI_DMA_TODEVICE);
1320 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1321 __func__, task->conn->max_recv_dlength, hdr_len));
1323 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1324 task_data->resp_buffer, task_data->resp_dma);
1325 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1326 task_data->req_buffer, task_data->req_dma);
1327 return;
1330 static int qla4xxx_task_xmit(struct iscsi_task *task)
1332 struct scsi_cmnd *sc = task->sc;
1333 struct iscsi_session *sess = task->conn->session;
1334 struct ddb_entry *ddb_entry = sess->dd_data;
1335 struct scsi_qla_host *ha = ddb_entry->ha;
1337 if (!sc)
1338 return qla4xxx_send_passthru0(task);
1340 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1341 __func__);
1342 return -ENOSYS;
1345 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1346 struct ddb_entry *ddb_entry)
1348 struct iscsi_cls_session *cls_sess;
1349 struct iscsi_cls_conn *cls_conn;
1350 struct iscsi_session *sess;
1351 struct iscsi_conn *conn;
1352 uint32_t ddb_state;
1353 dma_addr_t fw_ddb_entry_dma;
1354 struct dev_db_entry *fw_ddb_entry;
1356 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1357 &fw_ddb_entry_dma, GFP_KERNEL);
1358 if (!fw_ddb_entry) {
1359 ql4_printk(KERN_ERR, ha,
1360 "%s: Unable to allocate dma buffer\n", __func__);
1361 return;
1364 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1365 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1366 NULL, NULL, NULL) == QLA_ERROR) {
1367 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1368 "get_ddb_entry for fw_ddb_index %d\n",
1369 ha->host_no, __func__,
1370 ddb_entry->fw_ddb_index));
1371 return;
1374 cls_sess = ddb_entry->sess;
1375 sess = cls_sess->dd_data;
1377 cls_conn = ddb_entry->conn;
1378 conn = cls_conn->dd_data;
1380 /* Update params */
1381 conn->max_recv_dlength = BYTE_UNITS *
1382 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1384 conn->max_xmit_dlength = BYTE_UNITS *
1385 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1387 sess->initial_r2t_en =
1388 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1390 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1392 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1394 sess->first_burst = BYTE_UNITS *
1395 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1397 sess->max_burst = BYTE_UNITS *
1398 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1400 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1402 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1404 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1406 memcpy(sess->initiatorname, ha->name_string,
1407 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1411 * Timer routines
1414 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
1415 unsigned long interval)
1417 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
1418 __func__, ha->host->host_no));
1419 init_timer(&ha->timer);
1420 ha->timer.expires = jiffies + interval * HZ;
1421 ha->timer.data = (unsigned long)ha;
1422 ha->timer.function = (void (*)(unsigned long))func;
1423 add_timer(&ha->timer);
1424 ha->timer_active = 1;
1427 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
1429 del_timer_sync(&ha->timer);
1430 ha->timer_active = 0;
1433 /***
1434 * qla4xxx_mark_device_missing - blocks the session
1435 * @cls_session: Pointer to the session to be blocked
1436 * @ddb_entry: Pointer to device database entry
1438 * This routine marks a device missing and close connection.
1440 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
1442 iscsi_block_session(cls_session);
1446 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
1447 * @ha: Pointer to host adapter structure.
1449 * This routine marks a device missing and resets the relogin retry count.
1451 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
1453 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
1456 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
1457 struct ddb_entry *ddb_entry,
1458 struct scsi_cmnd *cmd)
1460 struct srb *srb;
1462 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
1463 if (!srb)
1464 return srb;
1466 kref_init(&srb->srb_ref);
1467 srb->ha = ha;
1468 srb->ddb = ddb_entry;
1469 srb->cmd = cmd;
1470 srb->flags = 0;
1471 CMD_SP(cmd) = (void *)srb;
1473 return srb;
1476 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
1478 struct scsi_cmnd *cmd = srb->cmd;
1480 if (srb->flags & SRB_DMA_VALID) {
1481 scsi_dma_unmap(cmd);
1482 srb->flags &= ~SRB_DMA_VALID;
1484 CMD_SP(cmd) = NULL;
1487 void qla4xxx_srb_compl(struct kref *ref)
1489 struct srb *srb = container_of(ref, struct srb, srb_ref);
1490 struct scsi_cmnd *cmd = srb->cmd;
1491 struct scsi_qla_host *ha = srb->ha;
1493 qla4xxx_srb_free_dma(ha, srb);
1495 mempool_free(srb, ha->srb_mempool);
1497 cmd->scsi_done(cmd);
1501 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
1502 * @host: scsi host
1503 * @cmd: Pointer to Linux's SCSI command structure
1505 * Remarks:
1506 * This routine is invoked by Linux to send a SCSI command to the driver.
1507 * The mid-level driver tries to ensure that queuecommand never gets
1508 * invoked concurrently with itself or the interrupt handler (although
1509 * the interrupt handler may call this routine as part of request-
1510 * completion handling). Unfortunely, it sometimes calls the scheduler
1511 * in interrupt context which is a big NO! NO!.
1513 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1515 struct scsi_qla_host *ha = to_qla_host(host);
1516 struct ddb_entry *ddb_entry = cmd->device->hostdata;
1517 struct iscsi_cls_session *sess = ddb_entry->sess;
1518 struct srb *srb;
1519 int rval;
1521 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1522 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
1523 cmd->result = DID_NO_CONNECT << 16;
1524 else
1525 cmd->result = DID_REQUEUE << 16;
1526 goto qc_fail_command;
1529 if (!sess) {
1530 cmd->result = DID_IMM_RETRY << 16;
1531 goto qc_fail_command;
1534 rval = iscsi_session_chkready(sess);
1535 if (rval) {
1536 cmd->result = rval;
1537 goto qc_fail_command;
1540 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1541 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1542 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1543 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1544 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1545 !test_bit(AF_ONLINE, &ha->flags) ||
1546 !test_bit(AF_LINK_UP, &ha->flags) ||
1547 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
1548 goto qc_host_busy;
1550 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
1551 if (!srb)
1552 goto qc_host_busy;
1554 rval = qla4xxx_send_command_to_isp(ha, srb);
1555 if (rval != QLA_SUCCESS)
1556 goto qc_host_busy_free_sp;
1558 return 0;
1560 qc_host_busy_free_sp:
1561 qla4xxx_srb_free_dma(ha, srb);
1562 mempool_free(srb, ha->srb_mempool);
1564 qc_host_busy:
1565 return SCSI_MLQUEUE_HOST_BUSY;
1567 qc_fail_command:
1568 cmd->scsi_done(cmd);
1570 return 0;
1574 * qla4xxx_mem_free - frees memory allocated to adapter
1575 * @ha: Pointer to host adapter structure.
1577 * Frees memory previously allocated by qla4xxx_mem_alloc
1579 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1581 if (ha->queues)
1582 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
1583 ha->queues_dma);
1585 ha->queues_len = 0;
1586 ha->queues = NULL;
1587 ha->queues_dma = 0;
1588 ha->request_ring = NULL;
1589 ha->request_dma = 0;
1590 ha->response_ring = NULL;
1591 ha->response_dma = 0;
1592 ha->shadow_regs = NULL;
1593 ha->shadow_regs_dma = 0;
1595 /* Free srb pool. */
1596 if (ha->srb_mempool)
1597 mempool_destroy(ha->srb_mempool);
1599 ha->srb_mempool = NULL;
1601 if (ha->chap_dma_pool)
1602 dma_pool_destroy(ha->chap_dma_pool);
1604 if (ha->chap_list)
1605 vfree(ha->chap_list);
1606 ha->chap_list = NULL;
1608 /* release io space registers */
1609 if (is_qla8022(ha)) {
1610 if (ha->nx_pcibase)
1611 iounmap(
1612 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
1613 } else if (ha->reg)
1614 iounmap(ha->reg);
1615 pci_release_regions(ha->pdev);
1619 * qla4xxx_mem_alloc - allocates memory for use by adapter.
1620 * @ha: Pointer to host adapter structure
1622 * Allocates DMA memory for request and response queues. Also allocates memory
1623 * for srbs.
1625 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1627 unsigned long align;
1629 /* Allocate contiguous block of DMA memory for queues. */
1630 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1631 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
1632 sizeof(struct shadow_regs) +
1633 MEM_ALIGN_VALUE +
1634 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1635 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
1636 &ha->queues_dma, GFP_KERNEL);
1637 if (ha->queues == NULL) {
1638 ql4_printk(KERN_WARNING, ha,
1639 "Memory Allocation failed - queues.\n");
1641 goto mem_alloc_error_exit;
1643 memset(ha->queues, 0, ha->queues_len);
1646 * As per RISC alignment requirements -- the bus-address must be a
1647 * multiple of the request-ring size (in bytes).
1649 align = 0;
1650 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
1651 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
1652 (MEM_ALIGN_VALUE - 1));
1654 /* Update request and response queue pointers. */
1655 ha->request_dma = ha->queues_dma + align;
1656 ha->request_ring = (struct queue_entry *) (ha->queues + align);
1657 ha->response_dma = ha->queues_dma + align +
1658 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
1659 ha->response_ring = (struct queue_entry *) (ha->queues + align +
1660 (REQUEST_QUEUE_DEPTH *
1661 QUEUE_SIZE));
1662 ha->shadow_regs_dma = ha->queues_dma + align +
1663 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1664 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
1665 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
1666 (REQUEST_QUEUE_DEPTH *
1667 QUEUE_SIZE) +
1668 (RESPONSE_QUEUE_DEPTH *
1669 QUEUE_SIZE));
1671 /* Allocate memory for srb pool. */
1672 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
1673 mempool_free_slab, srb_cachep);
1674 if (ha->srb_mempool == NULL) {
1675 ql4_printk(KERN_WARNING, ha,
1676 "Memory Allocation failed - SRB Pool.\n");
1678 goto mem_alloc_error_exit;
1681 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1682 CHAP_DMA_BLOCK_SIZE, 8, 0);
1684 if (ha->chap_dma_pool == NULL) {
1685 ql4_printk(KERN_WARNING, ha,
1686 "%s: chap_dma_pool allocation failed..\n", __func__);
1687 goto mem_alloc_error_exit;
1690 return QLA_SUCCESS;
1692 mem_alloc_error_exit:
1693 qla4xxx_mem_free(ha);
1694 return QLA_ERROR;
1698 * qla4_8xxx_check_fw_alive - Check firmware health
1699 * @ha: Pointer to host adapter structure.
1701 * Context: Interrupt
1703 static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1705 uint32_t fw_heartbeat_counter, halt_status;
1707 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1708 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1709 if (fw_heartbeat_counter == 0xffffffff) {
1710 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1711 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1712 ha->host_no, __func__));
1713 return;
1716 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
1717 ha->seconds_since_last_heartbeat++;
1718 /* FW not alive after 2 seconds */
1719 if (ha->seconds_since_last_heartbeat == 2) {
1720 ha->seconds_since_last_heartbeat = 0;
1721 halt_status = qla4_8xxx_rd_32(ha,
1722 QLA82XX_PEG_HALT_STATUS1);
1724 ql4_printk(KERN_INFO, ha,
1725 "scsi(%ld): %s, Dumping hw/fw registers:\n "
1726 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
1727 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1728 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1729 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1730 ha->host_no, __func__, halt_status,
1731 qla4_8xxx_rd_32(ha,
1732 QLA82XX_PEG_HALT_STATUS2),
1733 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
1734 0x3c),
1735 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
1736 0x3c),
1737 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
1738 0x3c),
1739 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
1740 0x3c),
1741 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
1742 0x3c));
1744 /* Since we cannot change dev_state in interrupt
1745 * context, set appropriate DPC flag then wakeup
1746 * DPC */
1747 if (halt_status & HALT_STATUS_UNRECOVERABLE)
1748 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
1749 else {
1750 printk("scsi%ld: %s: detect abort needed!\n",
1751 ha->host_no, __func__);
1752 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1754 qla4xxx_wake_dpc(ha);
1755 qla4xxx_mailbox_premature_completion(ha);
1757 } else
1758 ha->seconds_since_last_heartbeat = 0;
1760 ha->fw_heartbeat_counter = fw_heartbeat_counter;
1764 * qla4_8xxx_watchdog - Poll dev state
1765 * @ha: Pointer to host adapter structure.
1767 * Context: Interrupt
1769 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1771 uint32_t dev_state;
1773 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1775 /* don't poll if reset is going on */
1776 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1777 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1778 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
1779 if (dev_state == QLA82XX_DEV_NEED_RESET &&
1780 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1781 if (!ql4xdontresethba) {
1782 ql4_printk(KERN_INFO, ha, "%s: HW State: "
1783 "NEED RESET!\n", __func__);
1784 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1785 qla4xxx_wake_dpc(ha);
1786 qla4xxx_mailbox_premature_completion(ha);
1788 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
1789 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
1790 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
1791 __func__);
1792 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
1793 qla4xxx_wake_dpc(ha);
1794 } else {
1795 /* Check firmware health */
1796 qla4_8xxx_check_fw_alive(ha);
1802 * qla4xxx_timer - checks every second for work to do.
1803 * @ha: Pointer to host adapter structure.
1805 static void qla4xxx_timer(struct scsi_qla_host *ha)
1807 int start_dpc = 0;
1808 uint16_t w;
1810 /* If we are in the middle of AER/EEH processing
1811 * skip any processing and reschedule the timer
1813 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1814 mod_timer(&ha->timer, jiffies + HZ);
1815 return;
1818 /* Hardware read to trigger an EEH error during mailbox waits. */
1819 if (!pci_channel_offline(ha->pdev))
1820 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
1822 if (is_qla8022(ha)) {
1823 qla4_8xxx_watchdog(ha);
1826 if (!is_qla8022(ha)) {
1827 /* Check for heartbeat interval. */
1828 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
1829 ha->heartbeat_interval != 0) {
1830 ha->seconds_since_last_heartbeat++;
1831 if (ha->seconds_since_last_heartbeat >
1832 ha->heartbeat_interval + 2)
1833 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1837 /* Wakeup the dpc routine for this adapter, if needed. */
1838 if (start_dpc ||
1839 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1840 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
1841 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
1842 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
1843 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1844 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
1845 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
1846 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1847 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1848 test_bit(DPC_AEN, &ha->dpc_flags)) {
1849 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
1850 " - dpc flags = 0x%lx\n",
1851 ha->host_no, __func__, ha->dpc_flags));
1852 qla4xxx_wake_dpc(ha);
1855 /* Reschedule timer thread to call us back in one second */
1856 mod_timer(&ha->timer, jiffies + HZ);
1858 DEBUG2(ha->seconds_since_last_intr++);
1862 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
1863 * @ha: Pointer to host adapter structure.
1865 * This routine stalls the driver until all outstanding commands are returned.
1866 * Caller must release the Hardware Lock prior to calling this routine.
1868 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
1870 uint32_t index = 0;
1871 unsigned long flags;
1872 struct scsi_cmnd *cmd;
1874 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
1876 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
1877 "complete\n", WAIT_CMD_TOV));
1879 while (!time_after_eq(jiffies, wtime)) {
1880 spin_lock_irqsave(&ha->hardware_lock, flags);
1881 /* Find a command that hasn't completed. */
1882 for (index = 0; index < ha->host->can_queue; index++) {
1883 cmd = scsi_host_find_tag(ha->host, index);
1885 * We cannot just check if the index is valid,
1886 * becase if we are run from the scsi eh, then
1887 * the scsi/block layer is going to prevent
1888 * the tag from being released.
1890 if (cmd != NULL && CMD_SP(cmd))
1891 break;
1893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1895 /* If No Commands are pending, wait is complete */
1896 if (index == ha->host->can_queue)
1897 return QLA_SUCCESS;
1899 msleep(1000);
1901 /* If we timed out on waiting for commands to come back
1902 * return ERROR. */
1903 return QLA_ERROR;
1906 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
1908 uint32_t ctrl_status;
1909 unsigned long flags = 0;
1911 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
1913 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1914 return QLA_ERROR;
1916 spin_lock_irqsave(&ha->hardware_lock, flags);
1919 * If the SCSI Reset Interrupt bit is set, clear it.
1920 * Otherwise, the Soft Reset won't work.
1922 ctrl_status = readw(&ha->reg->ctrl_status);
1923 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
1924 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1926 /* Issue Soft Reset */
1927 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
1928 readl(&ha->reg->ctrl_status);
1930 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1931 return QLA_SUCCESS;
1935 * qla4xxx_soft_reset - performs soft reset.
1936 * @ha: Pointer to host adapter structure.
1938 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1940 uint32_t max_wait_time;
1941 unsigned long flags = 0;
1942 int status;
1943 uint32_t ctrl_status;
1945 status = qla4xxx_hw_reset(ha);
1946 if (status != QLA_SUCCESS)
1947 return status;
1949 status = QLA_ERROR;
1950 /* Wait until the Network Reset Intr bit is cleared */
1951 max_wait_time = RESET_INTR_TOV;
1952 do {
1953 spin_lock_irqsave(&ha->hardware_lock, flags);
1954 ctrl_status = readw(&ha->reg->ctrl_status);
1955 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1957 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
1958 break;
1960 msleep(1000);
1961 } while ((--max_wait_time));
1963 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
1964 DEBUG2(printk(KERN_WARNING
1965 "scsi%ld: Network Reset Intr not cleared by "
1966 "Network function, clearing it now!\n",
1967 ha->host_no));
1968 spin_lock_irqsave(&ha->hardware_lock, flags);
1969 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
1970 readl(&ha->reg->ctrl_status);
1971 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1974 /* Wait until the firmware tells us the Soft Reset is done */
1975 max_wait_time = SOFT_RESET_TOV;
1976 do {
1977 spin_lock_irqsave(&ha->hardware_lock, flags);
1978 ctrl_status = readw(&ha->reg->ctrl_status);
1979 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1981 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
1982 status = QLA_SUCCESS;
1983 break;
1986 msleep(1000);
1987 } while ((--max_wait_time));
1990 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1991 * after the soft reset has taken place.
1993 spin_lock_irqsave(&ha->hardware_lock, flags);
1994 ctrl_status = readw(&ha->reg->ctrl_status);
1995 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
1996 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1997 readl(&ha->reg->ctrl_status);
1999 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2001 /* If soft reset fails then most probably the bios on other
2002 * function is also enabled.
2003 * Since the initialization is sequential the other fn
2004 * wont be able to acknowledge the soft reset.
2005 * Issue a force soft reset to workaround this scenario.
2007 if (max_wait_time == 0) {
2008 /* Issue Force Soft Reset */
2009 spin_lock_irqsave(&ha->hardware_lock, flags);
2010 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2011 readl(&ha->reg->ctrl_status);
2012 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2013 /* Wait until the firmware tells us the Soft Reset is done */
2014 max_wait_time = SOFT_RESET_TOV;
2015 do {
2016 spin_lock_irqsave(&ha->hardware_lock, flags);
2017 ctrl_status = readw(&ha->reg->ctrl_status);
2018 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2020 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2021 status = QLA_SUCCESS;
2022 break;
2025 msleep(1000);
2026 } while ((--max_wait_time));
2029 return status;
2033 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
2034 * @ha: Pointer to host adapter structure.
2035 * @res: returned scsi status
2037 * This routine is called just prior to a HARD RESET to return all
2038 * outstanding commands back to the Operating System.
2039 * Caller should make sure that the following locks are released
2040 * before this calling routine: Hardware lock, and io_request_lock.
2042 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
2044 struct srb *srb;
2045 int i;
2046 unsigned long flags;
2048 spin_lock_irqsave(&ha->hardware_lock, flags);
2049 for (i = 0; i < ha->host->can_queue; i++) {
2050 srb = qla4xxx_del_from_active_array(ha, i);
2051 if (srb != NULL) {
2052 srb->cmd->result = res;
2053 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2056 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2059 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2061 clear_bit(AF_ONLINE, &ha->flags);
2063 /* Disable the board */
2064 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
2066 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2067 qla4xxx_mark_all_devices_missing(ha);
2068 clear_bit(AF_INIT_DONE, &ha->flags);
2071 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2073 struct iscsi_session *sess;
2074 struct ddb_entry *ddb_entry;
2076 sess = cls_session->dd_data;
2077 ddb_entry = sess->dd_data;
2078 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2079 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2083 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2084 * @ha: Pointer to host adapter structure.
2086 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2088 int status = QLA_ERROR;
2089 uint8_t reset_chip = 0;
2091 /* Stall incoming I/O until we are done */
2092 scsi_block_requests(ha->host);
2093 clear_bit(AF_ONLINE, &ha->flags);
2094 clear_bit(AF_LINK_UP, &ha->flags);
2096 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
2098 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2100 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2102 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2103 reset_chip = 1;
2105 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2106 * do not reset adapter, jump to initialize_adapter */
2107 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2108 status = QLA_SUCCESS;
2109 goto recover_ha_init_adapter;
2112 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2113 * from eh_host_reset or ioctl module */
2114 if (is_qla8022(ha) && !reset_chip &&
2115 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2117 DEBUG2(ql4_printk(KERN_INFO, ha,
2118 "scsi%ld: %s - Performing stop_firmware...\n",
2119 ha->host_no, __func__));
2120 status = ha->isp_ops->reset_firmware(ha);
2121 if (status == QLA_SUCCESS) {
2122 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2123 qla4xxx_cmd_wait(ha);
2124 ha->isp_ops->disable_intrs(ha);
2125 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2126 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2127 } else {
2128 /* If the stop_firmware fails then
2129 * reset the entire chip */
2130 reset_chip = 1;
2131 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2132 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2136 /* Issue full chip reset if recovering from a catastrophic error,
2137 * or if stop_firmware fails for ISP-82xx.
2138 * This is the default case for ISP-4xxx */
2139 if (!is_qla8022(ha) || reset_chip) {
2140 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2141 qla4xxx_cmd_wait(ha);
2142 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2143 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2144 DEBUG2(ql4_printk(KERN_INFO, ha,
2145 "scsi%ld: %s - Performing chip reset..\n",
2146 ha->host_no, __func__));
2147 status = ha->isp_ops->reset_chip(ha);
2150 /* Flush any pending ddb changed AENs */
2151 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2153 recover_ha_init_adapter:
2154 /* Upon successful firmware/chip reset, re-initialize the adapter */
2155 if (status == QLA_SUCCESS) {
2156 /* For ISP-4xxx, force function 1 to always initialize
2157 * before function 3 to prevent both funcions from
2158 * stepping on top of the other */
2159 if (!is_qla8022(ha) && (ha->mac_index == 3))
2160 ssleep(6);
2162 /* NOTE: AF_ONLINE flag set upon successful completion of
2163 * qla4xxx_initialize_adapter */
2164 status = qla4xxx_initialize_adapter(ha);
2167 /* Retry failed adapter initialization, if necessary
2168 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2169 * case to prevent ping-pong resets between functions */
2170 if (!test_bit(AF_ONLINE, &ha->flags) &&
2171 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2172 /* Adapter initialization failed, see if we can retry
2173 * resetting the ha.
2174 * Since we don't want to block the DPC for too long
2175 * with multiple resets in the same thread,
2176 * utilize DPC to retry */
2177 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2178 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2179 DEBUG2(printk("scsi%ld: recover adapter - retrying "
2180 "(%d) more times\n", ha->host_no,
2181 ha->retry_reset_ha_cnt));
2182 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2183 status = QLA_ERROR;
2184 } else {
2185 if (ha->retry_reset_ha_cnt > 0) {
2186 /* Schedule another Reset HA--DPC will retry */
2187 ha->retry_reset_ha_cnt--;
2188 DEBUG2(printk("scsi%ld: recover adapter - "
2189 "retry remaining %d\n",
2190 ha->host_no,
2191 ha->retry_reset_ha_cnt));
2192 status = QLA_ERROR;
2195 if (ha->retry_reset_ha_cnt == 0) {
2196 /* Recover adapter retries have been exhausted.
2197 * Adapter DEAD */
2198 DEBUG2(printk("scsi%ld: recover adapter "
2199 "failed - board disabled\n",
2200 ha->host_no));
2201 qla4xxx_dead_adapter_cleanup(ha);
2202 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2203 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2204 clear_bit(DPC_RESET_HA_FW_CONTEXT,
2205 &ha->dpc_flags);
2206 status = QLA_ERROR;
2209 } else {
2210 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2211 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2212 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2215 ha->adapter_error_count++;
2217 if (test_bit(AF_ONLINE, &ha->flags))
2218 ha->isp_ops->enable_intrs(ha);
2220 scsi_unblock_requests(ha->host);
2222 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2223 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
2224 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
2226 return status;
2229 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2231 struct iscsi_session *sess;
2232 struct ddb_entry *ddb_entry;
2233 struct scsi_qla_host *ha;
2235 sess = cls_session->dd_data;
2236 ddb_entry = sess->dd_data;
2237 ha = ddb_entry->ha;
2238 if (!iscsi_is_session_online(cls_session)) {
2239 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
2240 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2241 " unblock session\n", ha->host_no, __func__,
2242 ddb_entry->fw_ddb_index);
2243 iscsi_unblock_session(ddb_entry->sess);
2244 } else {
2245 /* Trigger relogin */
2246 iscsi_session_failure(cls_session->dd_data,
2247 ISCSI_ERR_CONN_FAILED);
2252 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2254 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2257 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2259 if (ha->dpc_thread)
2260 queue_work(ha->dpc_thread, &ha->dpc_work);
2264 * qla4xxx_do_dpc - dpc routine
2265 * @data: in our case pointer to adapter structure
2267 * This routine is a task that is schedule by the interrupt handler
2268 * to perform the background processing for interrupts. We put it
2269 * on a task queue that is consumed whenever the scheduler runs; that's
2270 * so you can do anything (i.e. put the process to sleep etc). In fact,
2271 * the mid-level tries to sleep when it reaches the driver threshold
2272 * "host->can_queue". This can cause a panic if we were in our interrupt code.
2274 static void qla4xxx_do_dpc(struct work_struct *work)
2276 struct scsi_qla_host *ha =
2277 container_of(work, struct scsi_qla_host, dpc_work);
2278 int status = QLA_ERROR;
2280 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
2281 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
2282 ha->host_no, __func__, ha->flags, ha->dpc_flags))
2284 /* Initialization not yet finished. Don't do anything yet. */
2285 if (!test_bit(AF_INIT_DONE, &ha->flags))
2286 return;
2288 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2289 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
2290 ha->host_no, __func__, ha->flags));
2291 return;
2294 if (is_qla8022(ha)) {
2295 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2296 qla4_8xxx_idc_lock(ha);
2297 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2298 QLA82XX_DEV_FAILED);
2299 qla4_8xxx_idc_unlock(ha);
2300 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2301 qla4_8xxx_device_state_handler(ha);
2303 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2304 qla4_8xxx_need_qsnt_handler(ha);
2308 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
2309 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2310 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2311 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
2312 if (ql4xdontresethba) {
2313 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2314 ha->host_no, __func__));
2315 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2316 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2317 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2318 goto dpc_post_reset_ha;
2320 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2321 test_bit(DPC_RESET_HA, &ha->dpc_flags))
2322 qla4xxx_recover_adapter(ha);
2324 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2325 uint8_t wait_time = RESET_INTR_TOV;
2327 while ((readw(&ha->reg->ctrl_status) &
2328 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
2329 if (--wait_time == 0)
2330 break;
2331 msleep(1000);
2333 if (wait_time == 0)
2334 DEBUG2(printk("scsi%ld: %s: SR|FSR "
2335 "bit not cleared-- resetting\n",
2336 ha->host_no, __func__));
2337 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2338 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
2339 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2340 status = qla4xxx_recover_adapter(ha);
2342 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2343 if (status == QLA_SUCCESS)
2344 ha->isp_ops->enable_intrs(ha);
2348 dpc_post_reset_ha:
2349 /* ---- process AEN? --- */
2350 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
2351 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
2353 /* ---- Get DHCP IP Address? --- */
2354 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2355 qla4xxx_get_dhcp_ip_address(ha);
2357 /* ---- link change? --- */
2358 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2359 if (!test_bit(AF_LINK_UP, &ha->flags)) {
2360 /* ---- link down? --- */
2361 qla4xxx_mark_all_devices_missing(ha);
2362 } else {
2363 /* ---- link up? --- *
2364 * F/W will auto login to all devices ONLY ONCE after
2365 * link up during driver initialization and runtime
2366 * fatal error recovery. Therefore, the driver must
2367 * manually relogin to devices when recovering from
2368 * connection failures, logouts, expired KATO, etc. */
2370 qla4xxx_relogin_all_devices(ha);
2376 * qla4xxx_free_adapter - release the adapter
2377 * @ha: pointer to adapter structure
2379 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2382 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2383 /* Turn-off interrupts on the card. */
2384 ha->isp_ops->disable_intrs(ha);
2387 /* Remove timer thread, if present */
2388 if (ha->timer_active)
2389 qla4xxx_stop_timer(ha);
2391 /* Kill the kernel thread for this host */
2392 if (ha->dpc_thread)
2393 destroy_workqueue(ha->dpc_thread);
2395 /* Kill the kernel thread for this host */
2396 if (ha->task_wq)
2397 destroy_workqueue(ha->task_wq);
2399 /* Put firmware in known state */
2400 ha->isp_ops->reset_firmware(ha);
2402 if (is_qla8022(ha)) {
2403 qla4_8xxx_idc_lock(ha);
2404 qla4_8xxx_clear_drv_active(ha);
2405 qla4_8xxx_idc_unlock(ha);
2408 /* Detach interrupts */
2409 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
2410 qla4xxx_free_irqs(ha);
2412 /* free extra memory */
2413 qla4xxx_mem_free(ha);
2416 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
2418 int status = 0;
2419 uint8_t revision_id;
2420 unsigned long mem_base, mem_len, db_base, db_len;
2421 struct pci_dev *pdev = ha->pdev;
2423 status = pci_request_regions(pdev, DRIVER_NAME);
2424 if (status) {
2425 printk(KERN_WARNING
2426 "scsi(%ld) Failed to reserve PIO regions (%s) "
2427 "status=%d\n", ha->host_no, pci_name(pdev), status);
2428 goto iospace_error_exit;
2431 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
2432 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
2433 __func__, revision_id));
2434 ha->revision_id = revision_id;
2436 /* remap phys address */
2437 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
2438 mem_len = pci_resource_len(pdev, 0);
2439 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
2440 __func__, mem_base, mem_len));
2442 /* mapping of pcibase pointer */
2443 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
2444 if (!ha->nx_pcibase) {
2445 printk(KERN_ERR
2446 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
2447 pci_release_regions(ha->pdev);
2448 goto iospace_error_exit;
2451 /* Mapping of IO base pointer, door bell read and write pointer */
2453 /* mapping of IO base pointer */
2454 ha->qla4_8xxx_reg =
2455 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
2456 0xbc000 + (ha->pdev->devfn << 11));
2458 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
2459 db_len = pci_resource_len(pdev, 4);
2461 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
2462 QLA82XX_CAM_RAM_DB2);
2464 return 0;
2465 iospace_error_exit:
2466 return -ENOMEM;
2469 /***
2470 * qla4xxx_iospace_config - maps registers
2471 * @ha: pointer to adapter structure
2473 * This routines maps HBA's registers from the pci address space
2474 * into the kernel virtual address space for memory mapped i/o.
2476 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
2478 unsigned long pio, pio_len, pio_flags;
2479 unsigned long mmio, mmio_len, mmio_flags;
2481 pio = pci_resource_start(ha->pdev, 0);
2482 pio_len = pci_resource_len(ha->pdev, 0);
2483 pio_flags = pci_resource_flags(ha->pdev, 0);
2484 if (pio_flags & IORESOURCE_IO) {
2485 if (pio_len < MIN_IOBASE_LEN) {
2486 ql4_printk(KERN_WARNING, ha,
2487 "Invalid PCI I/O region size\n");
2488 pio = 0;
2490 } else {
2491 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
2492 pio = 0;
2495 /* Use MMIO operations for all accesses. */
2496 mmio = pci_resource_start(ha->pdev, 1);
2497 mmio_len = pci_resource_len(ha->pdev, 1);
2498 mmio_flags = pci_resource_flags(ha->pdev, 1);
2500 if (!(mmio_flags & IORESOURCE_MEM)) {
2501 ql4_printk(KERN_ERR, ha,
2502 "region #0 not an MMIO resource, aborting\n");
2504 goto iospace_error_exit;
2507 if (mmio_len < MIN_IOBASE_LEN) {
2508 ql4_printk(KERN_ERR, ha,
2509 "Invalid PCI mem region size, aborting\n");
2510 goto iospace_error_exit;
2513 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
2514 ql4_printk(KERN_WARNING, ha,
2515 "Failed to reserve PIO/MMIO regions\n");
2517 goto iospace_error_exit;
2520 ha->pio_address = pio;
2521 ha->pio_length = pio_len;
2522 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
2523 if (!ha->reg) {
2524 ql4_printk(KERN_ERR, ha,
2525 "cannot remap MMIO, aborting\n");
2527 goto iospace_error_exit;
2530 return 0;
2532 iospace_error_exit:
2533 return -ENOMEM;
2536 static struct isp_operations qla4xxx_isp_ops = {
2537 .iospace_config = qla4xxx_iospace_config,
2538 .pci_config = qla4xxx_pci_config,
2539 .disable_intrs = qla4xxx_disable_intrs,
2540 .enable_intrs = qla4xxx_enable_intrs,
2541 .start_firmware = qla4xxx_start_firmware,
2542 .intr_handler = qla4xxx_intr_handler,
2543 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
2544 .reset_chip = qla4xxx_soft_reset,
2545 .reset_firmware = qla4xxx_hw_reset,
2546 .queue_iocb = qla4xxx_queue_iocb,
2547 .complete_iocb = qla4xxx_complete_iocb,
2548 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
2549 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
2550 .get_sys_info = qla4xxx_get_sys_info,
2553 static struct isp_operations qla4_8xxx_isp_ops = {
2554 .iospace_config = qla4_8xxx_iospace_config,
2555 .pci_config = qla4_8xxx_pci_config,
2556 .disable_intrs = qla4_8xxx_disable_intrs,
2557 .enable_intrs = qla4_8xxx_enable_intrs,
2558 .start_firmware = qla4_8xxx_load_risc,
2559 .intr_handler = qla4_8xxx_intr_handler,
2560 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
2561 .reset_chip = qla4_8xxx_isp_reset,
2562 .reset_firmware = qla4_8xxx_stop_firmware,
2563 .queue_iocb = qla4_8xxx_queue_iocb,
2564 .complete_iocb = qla4_8xxx_complete_iocb,
2565 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
2566 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
2567 .get_sys_info = qla4_8xxx_get_sys_info,
2570 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2572 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
2575 uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2577 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
2580 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2582 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
2585 uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2587 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
2590 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2592 struct scsi_qla_host *ha = data;
2593 char *str = buf;
2594 int rc;
2596 switch (type) {
2597 case ISCSI_BOOT_ETH_FLAGS:
2598 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2599 break;
2600 case ISCSI_BOOT_ETH_INDEX:
2601 rc = sprintf(str, "0\n");
2602 break;
2603 case ISCSI_BOOT_ETH_MAC:
2604 rc = sysfs_format_mac(str, ha->my_mac,
2605 MAC_ADDR_LEN);
2606 break;
2607 default:
2608 rc = -ENOSYS;
2609 break;
2611 return rc;
2614 static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2616 int rc;
2618 switch (type) {
2619 case ISCSI_BOOT_ETH_FLAGS:
2620 case ISCSI_BOOT_ETH_MAC:
2621 case ISCSI_BOOT_ETH_INDEX:
2622 rc = S_IRUGO;
2623 break;
2624 default:
2625 rc = 0;
2626 break;
2628 return rc;
2631 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2633 struct scsi_qla_host *ha = data;
2634 char *str = buf;
2635 int rc;
2637 switch (type) {
2638 case ISCSI_BOOT_INI_INITIATOR_NAME:
2639 rc = sprintf(str, "%s\n", ha->name_string);
2640 break;
2641 default:
2642 rc = -ENOSYS;
2643 break;
2645 return rc;
2648 static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2650 int rc;
2652 switch (type) {
2653 case ISCSI_BOOT_INI_INITIATOR_NAME:
2654 rc = S_IRUGO;
2655 break;
2656 default:
2657 rc = 0;
2658 break;
2660 return rc;
2663 static ssize_t
2664 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2665 char *buf)
2667 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2668 char *str = buf;
2669 int rc;
2671 switch (type) {
2672 case ISCSI_BOOT_TGT_NAME:
2673 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2674 break;
2675 case ISCSI_BOOT_TGT_IP_ADDR:
2676 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2677 rc = sprintf(buf, "%pI4\n",
2678 &boot_conn->dest_ipaddr.ip_address);
2679 else
2680 rc = sprintf(str, "%pI6\n",
2681 &boot_conn->dest_ipaddr.ip_address);
2682 break;
2683 case ISCSI_BOOT_TGT_PORT:
2684 rc = sprintf(str, "%d\n", boot_conn->dest_port);
2685 break;
2686 case ISCSI_BOOT_TGT_CHAP_NAME:
2687 rc = sprintf(str, "%.*s\n",
2688 boot_conn->chap.target_chap_name_length,
2689 (char *)&boot_conn->chap.target_chap_name);
2690 break;
2691 case ISCSI_BOOT_TGT_CHAP_SECRET:
2692 rc = sprintf(str, "%.*s\n",
2693 boot_conn->chap.target_secret_length,
2694 (char *)&boot_conn->chap.target_secret);
2695 break;
2696 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2697 rc = sprintf(str, "%.*s\n",
2698 boot_conn->chap.intr_chap_name_length,
2699 (char *)&boot_conn->chap.intr_chap_name);
2700 break;
2701 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2702 rc = sprintf(str, "%.*s\n",
2703 boot_conn->chap.intr_secret_length,
2704 (char *)&boot_conn->chap.intr_secret);
2705 break;
2706 case ISCSI_BOOT_TGT_FLAGS:
2707 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2708 break;
2709 case ISCSI_BOOT_TGT_NIC_ASSOC:
2710 rc = sprintf(str, "0\n");
2711 break;
2712 default:
2713 rc = -ENOSYS;
2714 break;
2716 return rc;
2719 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2721 struct scsi_qla_host *ha = data;
2722 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2724 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2727 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2729 struct scsi_qla_host *ha = data;
2730 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2732 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2735 static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2737 int rc;
2739 switch (type) {
2740 case ISCSI_BOOT_TGT_NAME:
2741 case ISCSI_BOOT_TGT_IP_ADDR:
2742 case ISCSI_BOOT_TGT_PORT:
2743 case ISCSI_BOOT_TGT_CHAP_NAME:
2744 case ISCSI_BOOT_TGT_CHAP_SECRET:
2745 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2746 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2747 case ISCSI_BOOT_TGT_NIC_ASSOC:
2748 case ISCSI_BOOT_TGT_FLAGS:
2749 rc = S_IRUGO;
2750 break;
2751 default:
2752 rc = 0;
2753 break;
2755 return rc;
2758 static void qla4xxx_boot_release(void *data)
2760 struct scsi_qla_host *ha = data;
2762 scsi_host_put(ha->host);
2765 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2767 dma_addr_t buf_dma;
2768 uint32_t addr, pri_addr, sec_addr;
2769 uint32_t offset;
2770 uint16_t func_num;
2771 uint8_t val;
2772 uint8_t *buf = NULL;
2773 size_t size = 13 * sizeof(uint8_t);
2774 int ret = QLA_SUCCESS;
2776 func_num = PCI_FUNC(ha->pdev->devfn);
2778 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
2779 __func__, ha->pdev->device, func_num);
2781 if (is_qla40XX(ha)) {
2782 if (func_num == 1) {
2783 addr = NVRAM_PORT0_BOOT_MODE;
2784 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2785 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2786 } else if (func_num == 3) {
2787 addr = NVRAM_PORT1_BOOT_MODE;
2788 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2789 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2790 } else {
2791 ret = QLA_ERROR;
2792 goto exit_boot_info;
2795 /* Check Boot Mode */
2796 val = rd_nvram_byte(ha, addr);
2797 if (!(val & 0x07)) {
2798 DEBUG2(ql4_printk(KERN_ERR, ha,
2799 "%s: Failed Boot options : 0x%x\n",
2800 __func__, val));
2801 ret = QLA_ERROR;
2802 goto exit_boot_info;
2805 /* get primary valid target index */
2806 val = rd_nvram_byte(ha, pri_addr);
2807 if (val & BIT_7)
2808 ddb_index[0] = (val & 0x7f);
2810 /* get secondary valid target index */
2811 val = rd_nvram_byte(ha, sec_addr);
2812 if (val & BIT_7)
2813 ddb_index[1] = (val & 0x7f);
2815 } else if (is_qla8022(ha)) {
2816 buf = dma_alloc_coherent(&ha->pdev->dev, size,
2817 &buf_dma, GFP_KERNEL);
2818 if (!buf) {
2819 DEBUG2(ql4_printk(KERN_ERR, ha,
2820 "%s: Unable to allocate dma buffer\n",
2821 __func__));
2822 ret = QLA_ERROR;
2823 goto exit_boot_info;
2826 if (ha->port_num == 0)
2827 offset = BOOT_PARAM_OFFSET_PORT0;
2828 else if (ha->port_num == 1)
2829 offset = BOOT_PARAM_OFFSET_PORT1;
2830 else {
2831 ret = QLA_ERROR;
2832 goto exit_boot_info_free;
2834 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2835 offset;
2836 if (qla4xxx_get_flash(ha, buf_dma, addr,
2837 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2838 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2839 "failed\n", ha->host_no, __func__));
2840 ret = QLA_ERROR;
2841 goto exit_boot_info_free;
2843 /* Check Boot Mode */
2844 if (!(buf[1] & 0x07)) {
2845 DEBUG2(ql4_printk(KERN_INFO, ha,
2846 "Failed: Boot options : 0x%x\n",
2847 buf[1]));
2848 ret = QLA_ERROR;
2849 goto exit_boot_info_free;
2852 /* get primary valid target index */
2853 if (buf[2] & BIT_7)
2854 ddb_index[0] = buf[2] & 0x7f;
2856 /* get secondary valid target index */
2857 if (buf[11] & BIT_7)
2858 ddb_index[1] = buf[11] & 0x7f;
2859 } else {
2860 ret = QLA_ERROR;
2861 goto exit_boot_info;
2864 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2865 " target ID %d\n", __func__, ddb_index[0],
2866 ddb_index[1]));
2868 exit_boot_info_free:
2869 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2870 exit_boot_info:
2871 return ret;
2875 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
2876 * @ha: pointer to adapter structure
2877 * @username: CHAP username to be returned
2878 * @password: CHAP password to be returned
2880 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
2881 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
2882 * So from the CHAP cache find the first BIDI CHAP entry and set it
2883 * to the boot record in sysfs.
2885 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
2886 char *password)
2888 int i, ret = -EINVAL;
2889 int max_chap_entries = 0;
2890 struct ql4_chap_table *chap_table;
2892 if (is_qla8022(ha))
2893 max_chap_entries = (ha->hw.flt_chap_size / 2) /
2894 sizeof(struct ql4_chap_table);
2895 else
2896 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
2898 if (!ha->chap_list) {
2899 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
2900 return ret;
2903 mutex_lock(&ha->chap_sem);
2904 for (i = 0; i < max_chap_entries; i++) {
2905 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
2906 if (chap_table->cookie !=
2907 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
2908 continue;
2911 if (chap_table->flags & BIT_7) /* local */
2912 continue;
2914 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
2915 continue;
2917 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
2918 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
2919 ret = 0;
2920 break;
2922 mutex_unlock(&ha->chap_sem);
2924 return ret;
2928 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2929 struct ql4_boot_session_info *boot_sess,
2930 uint16_t ddb_index)
2932 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2933 struct dev_db_entry *fw_ddb_entry;
2934 dma_addr_t fw_ddb_entry_dma;
2935 uint16_t idx;
2936 uint16_t options;
2937 int ret = QLA_SUCCESS;
2939 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2940 &fw_ddb_entry_dma, GFP_KERNEL);
2941 if (!fw_ddb_entry) {
2942 DEBUG2(ql4_printk(KERN_ERR, ha,
2943 "%s: Unable to allocate dma buffer.\n",
2944 __func__));
2945 ret = QLA_ERROR;
2946 return ret;
2949 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2950 fw_ddb_entry_dma, ddb_index)) {
2951 DEBUG2(ql4_printk(KERN_ERR, ha,
2952 "%s: Flash DDB read Failed\n", __func__));
2953 ret = QLA_ERROR;
2954 goto exit_boot_target;
2957 /* Update target name and IP from DDB */
2958 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2959 min(sizeof(boot_sess->target_name),
2960 sizeof(fw_ddb_entry->iscsi_name)));
2962 options = le16_to_cpu(fw_ddb_entry->options);
2963 if (options & DDB_OPT_IPV6_DEVICE) {
2964 memcpy(&boot_conn->dest_ipaddr.ip_address,
2965 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2966 } else {
2967 boot_conn->dest_ipaddr.ip_type = 0x1;
2968 memcpy(&boot_conn->dest_ipaddr.ip_address,
2969 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2972 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2974 /* update chap information */
2975 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2977 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2979 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2981 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2982 target_chap_name,
2983 (char *)&boot_conn->chap.target_secret,
2984 idx);
2985 if (ret) {
2986 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2987 ret = QLA_ERROR;
2988 goto exit_boot_target;
2991 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2992 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2995 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2997 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
2999 ret = qla4xxx_get_bidi_chap(ha,
3000 (char *)&boot_conn->chap.intr_chap_name,
3001 (char *)&boot_conn->chap.intr_secret);
3003 if (ret) {
3004 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
3005 ret = QLA_ERROR;
3006 goto exit_boot_target;
3009 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
3010 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
3013 exit_boot_target:
3014 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3015 fw_ddb_entry, fw_ddb_entry_dma);
3016 return ret;
3019 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3021 uint16_t ddb_index[2];
3022 int ret = QLA_ERROR;
3023 int rval;
3025 memset(ddb_index, 0, sizeof(ddb_index));
3026 ddb_index[0] = 0xffff;
3027 ddb_index[1] = 0xffff;
3028 ret = get_fw_boot_info(ha, ddb_index);
3029 if (ret != QLA_SUCCESS) {
3030 DEBUG2(ql4_printk(KERN_ERR, ha,
3031 "%s: Failed to set boot info.\n", __func__));
3032 return ret;
3035 if (ddb_index[0] == 0xffff)
3036 goto sec_target;
3038 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
3039 ddb_index[0]);
3040 if (rval != QLA_SUCCESS) {
3041 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3042 "primary target\n", __func__));
3043 } else
3044 ret = QLA_SUCCESS;
3046 sec_target:
3047 if (ddb_index[1] == 0xffff)
3048 goto exit_get_boot_info;
3050 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
3051 ddb_index[1]);
3052 if (rval != QLA_SUCCESS) {
3053 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3054 "secondary target\n", __func__));
3055 } else
3056 ret = QLA_SUCCESS;
3058 exit_get_boot_info:
3059 return ret;
3062 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3064 struct iscsi_boot_kobj *boot_kobj;
3066 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3067 return 0;
3069 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3070 if (!ha->boot_kset)
3071 goto kset_free;
3073 if (!scsi_host_get(ha->host))
3074 goto kset_free;
3075 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
3076 qla4xxx_show_boot_tgt_pri_info,
3077 qla4xxx_tgt_get_attr_visibility,
3078 qla4xxx_boot_release);
3079 if (!boot_kobj)
3080 goto put_host;
3082 if (!scsi_host_get(ha->host))
3083 goto kset_free;
3084 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3085 qla4xxx_show_boot_tgt_sec_info,
3086 qla4xxx_tgt_get_attr_visibility,
3087 qla4xxx_boot_release);
3088 if (!boot_kobj)
3089 goto put_host;
3091 if (!scsi_host_get(ha->host))
3092 goto kset_free;
3093 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3094 qla4xxx_show_boot_ini_info,
3095 qla4xxx_ini_get_attr_visibility,
3096 qla4xxx_boot_release);
3097 if (!boot_kobj)
3098 goto put_host;
3100 if (!scsi_host_get(ha->host))
3101 goto kset_free;
3102 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3103 qla4xxx_show_boot_eth_info,
3104 qla4xxx_eth_get_attr_visibility,
3105 qla4xxx_boot_release);
3106 if (!boot_kobj)
3107 goto put_host;
3109 return 0;
3111 put_host:
3112 scsi_host_put(ha->host);
3113 kset_free:
3114 iscsi_boot_destroy_kset(ha->boot_kset);
3115 return -ENOMEM;
3120 * qla4xxx_create chap_list - Create CHAP list from FLASH
3121 * @ha: pointer to adapter structure
3123 * Read flash and make a list of CHAP entries, during login when a CHAP entry
3124 * is received, it will be checked in this list. If entry exist then the CHAP
3125 * entry index is set in the DDB. If CHAP entry does not exist in this list
3126 * then a new entry is added in FLASH in CHAP table and the index obtained is
3127 * used in the DDB.
3129 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
3131 int rval = 0;
3132 uint8_t *chap_flash_data = NULL;
3133 uint32_t offset;
3134 dma_addr_t chap_dma;
3135 uint32_t chap_size = 0;
3137 if (is_qla40XX(ha))
3138 chap_size = MAX_CHAP_ENTRIES_40XX *
3139 sizeof(struct ql4_chap_table);
3140 else /* Single region contains CHAP info for both
3141 * ports which is divided into half for each port.
3143 chap_size = ha->hw.flt_chap_size / 2;
3145 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
3146 &chap_dma, GFP_KERNEL);
3147 if (!chap_flash_data) {
3148 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
3149 return;
3151 if (is_qla40XX(ha))
3152 offset = FLASH_CHAP_OFFSET;
3153 else {
3154 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
3155 if (ha->port_num == 1)
3156 offset += chap_size;
3159 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
3160 if (rval != QLA_SUCCESS)
3161 goto exit_chap_list;
3163 if (ha->chap_list == NULL)
3164 ha->chap_list = vmalloc(chap_size);
3165 if (ha->chap_list == NULL) {
3166 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
3167 goto exit_chap_list;
3170 memcpy(ha->chap_list, chap_flash_data, chap_size);
3172 exit_chap_list:
3173 dma_free_coherent(&ha->pdev->dev, chap_size,
3174 chap_flash_data, chap_dma);
3175 return;
3179 * qla4xxx_probe_adapter - callback function to probe HBA
3180 * @pdev: pointer to pci_dev structure
3181 * @pci_device_id: pointer to pci_device entry
3183 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
3184 * It returns zero if successful. It also initializes all data necessary for
3185 * the driver.
3187 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3188 const struct pci_device_id *ent)
3190 int ret = -ENODEV, status;
3191 struct Scsi_Host *host;
3192 struct scsi_qla_host *ha;
3193 uint8_t init_retry_count = 0;
3194 char buf[34];
3195 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
3196 uint32_t dev_state;
3198 if (pci_enable_device(pdev))
3199 return -1;
3201 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
3202 if (host == NULL) {
3203 printk(KERN_WARNING
3204 "qla4xxx: Couldn't allocate host from scsi layer!\n");
3205 goto probe_disable_device;
3208 /* Clear our data area */
3209 ha = to_qla_host(host);
3210 memset(ha, 0, sizeof(*ha));
3212 /* Save the information from PCI BIOS. */
3213 ha->pdev = pdev;
3214 ha->host = host;
3215 ha->host_no = host->host_no;
3217 pci_enable_pcie_error_reporting(pdev);
3219 /* Setup Runtime configurable options */
3220 if (is_qla8022(ha)) {
3221 ha->isp_ops = &qla4_8xxx_isp_ops;
3222 rwlock_init(&ha->hw_lock);
3223 ha->qdr_sn_window = -1;
3224 ha->ddr_mn_window = -1;
3225 ha->curr_window = 255;
3226 ha->func_num = PCI_FUNC(ha->pdev->devfn);
3227 nx_legacy_intr = &legacy_intr[ha->func_num];
3228 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
3229 ha->nx_legacy_intr.tgt_status_reg =
3230 nx_legacy_intr->tgt_status_reg;
3231 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
3232 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
3233 } else {
3234 ha->isp_ops = &qla4xxx_isp_ops;
3237 /* Set EEH reset type to fundamental if required by hba */
3238 if (is_qla8022(ha))
3239 pdev->needs_freset = 1;
3241 /* Configure PCI I/O space. */
3242 ret = ha->isp_ops->iospace_config(ha);
3243 if (ret)
3244 goto probe_failed_ioconfig;
3246 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
3247 pdev->device, pdev->irq, ha->reg);
3249 qla4xxx_config_dma_addressing(ha);
3251 /* Initialize lists and spinlocks. */
3252 INIT_LIST_HEAD(&ha->free_srb_q);
3254 mutex_init(&ha->mbox_sem);
3255 mutex_init(&ha->chap_sem);
3256 init_completion(&ha->mbx_intr_comp);
3257 init_completion(&ha->disable_acb_comp);
3259 spin_lock_init(&ha->hardware_lock);
3261 /* Allocate dma buffers */
3262 if (qla4xxx_mem_alloc(ha)) {
3263 ql4_printk(KERN_WARNING, ha,
3264 "[ERROR] Failed to allocate memory for adapter\n");
3266 ret = -ENOMEM;
3267 goto probe_failed;
3270 host->cmd_per_lun = 3;
3271 host->max_channel = 0;
3272 host->max_lun = MAX_LUNS - 1;
3273 host->max_id = MAX_TARGETS;
3274 host->max_cmd_len = IOCB_MAX_CDB_LEN;
3275 host->can_queue = MAX_SRBS ;
3276 host->transportt = qla4xxx_scsi_transport;
3278 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3279 if (ret) {
3280 ql4_printk(KERN_WARNING, ha,
3281 "%s: scsi_init_shared_tag_map failed\n", __func__);
3282 goto probe_failed;
3285 pci_set_drvdata(pdev, ha);
3287 ret = scsi_add_host(host, &pdev->dev);
3288 if (ret)
3289 goto probe_failed;
3291 if (is_qla8022(ha))
3292 (void) qla4_8xxx_get_flash_info(ha);
3295 * Initialize the Host adapter request/response queues and
3296 * firmware
3297 * NOTE: interrupts enabled upon successful completion
3299 status = qla4xxx_initialize_adapter(ha);
3300 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3301 init_retry_count++ < MAX_INIT_RETRIES) {
3303 if (is_qla8022(ha)) {
3304 qla4_8xxx_idc_lock(ha);
3305 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3306 qla4_8xxx_idc_unlock(ha);
3307 if (dev_state == QLA82XX_DEV_FAILED) {
3308 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
3309 "initialize adapter. H/W is in failed state\n",
3310 __func__);
3311 break;
3314 DEBUG2(printk("scsi: %s: retrying adapter initialization "
3315 "(%d)\n", __func__, init_retry_count));
3317 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3318 continue;
3320 status = qla4xxx_initialize_adapter(ha);
3323 if (!test_bit(AF_ONLINE, &ha->flags)) {
3324 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
3326 if (is_qla8022(ha) && ql4xdontresethba) {
3327 /* Put the device in failed state. */
3328 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
3329 qla4_8xxx_idc_lock(ha);
3330 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3331 QLA82XX_DEV_FAILED);
3332 qla4_8xxx_idc_unlock(ha);
3334 ret = -ENODEV;
3335 goto remove_host;
3338 /* Startup the kernel thread for this host adapter. */
3339 DEBUG2(printk("scsi: %s: Starting kernel thread for "
3340 "qla4xxx_dpc\n", __func__));
3341 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
3342 ha->dpc_thread = create_singlethread_workqueue(buf);
3343 if (!ha->dpc_thread) {
3344 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
3345 ret = -ENODEV;
3346 goto remove_host;
3348 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
3350 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3351 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3352 if (!ha->task_wq) {
3353 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3354 ret = -ENODEV;
3355 goto remove_host;
3358 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
3359 * (which is called indirectly by qla4xxx_initialize_adapter),
3360 * so that irqs will be registered after crbinit but before
3361 * mbx_intr_enable.
3363 if (!is_qla8022(ha)) {
3364 ret = qla4xxx_request_irqs(ha);
3365 if (ret) {
3366 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
3367 "interrupt %d already in use.\n", pdev->irq);
3368 goto remove_host;
3372 pci_save_state(ha->pdev);
3373 ha->isp_ops->enable_intrs(ha);
3375 /* Start timer thread. */
3376 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
3378 set_bit(AF_INIT_DONE, &ha->flags);
3380 printk(KERN_INFO
3381 " QLogic iSCSI HBA Driver version: %s\n"
3382 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
3383 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
3384 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3385 ha->patch_number, ha->build_number);
3387 qla4xxx_create_chap_list(ha);
3389 if (qla4xxx_setup_boot_info(ha))
3390 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3391 __func__);
3393 qla4xxx_create_ifaces(ha);
3394 return 0;
3396 remove_host:
3397 scsi_remove_host(ha->host);
3399 probe_failed:
3400 qla4xxx_free_adapter(ha);
3402 probe_failed_ioconfig:
3403 pci_disable_pcie_error_reporting(pdev);
3404 scsi_host_put(ha->host);
3406 probe_disable_device:
3407 pci_disable_device(pdev);
3409 return ret;
3413 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
3414 * @ha: pointer to adapter structure
3416 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
3417 * so that the other port will not re-initialize while in the process of
3418 * removing the ha due to driver unload or hba hotplug.
3420 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3422 struct scsi_qla_host *other_ha = NULL;
3423 struct pci_dev *other_pdev = NULL;
3424 int fn = ISP4XXX_PCI_FN_2;
3426 /*iscsi function numbers for ISP4xxx is 1 and 3*/
3427 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
3428 fn = ISP4XXX_PCI_FN_1;
3430 other_pdev =
3431 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3432 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3433 fn));
3435 /* Get other_ha if other_pdev is valid and state is enable*/
3436 if (other_pdev) {
3437 if (atomic_read(&other_pdev->enable_cnt)) {
3438 other_ha = pci_get_drvdata(other_pdev);
3439 if (other_ha) {
3440 set_bit(AF_HA_REMOVAL, &other_ha->flags);
3441 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
3442 "Prevent %s reinit\n", __func__,
3443 dev_name(&other_ha->pdev->dev)));
3446 pci_dev_put(other_pdev);
3451 * qla4xxx_remove_adapter - calback function to remove adapter.
3452 * @pci_dev: PCI device pointer
3454 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3456 struct scsi_qla_host *ha;
3458 ha = pci_get_drvdata(pdev);
3460 if (!is_qla8022(ha))
3461 qla4xxx_prevent_other_port_reinit(ha);
3463 /* destroy iface from sysfs */
3464 qla4xxx_destroy_ifaces(ha);
3466 if (ha->boot_kset)
3467 iscsi_boot_destroy_kset(ha->boot_kset);
3469 scsi_remove_host(ha->host);
3471 qla4xxx_free_adapter(ha);
3473 scsi_host_put(ha->host);
3475 pci_disable_pcie_error_reporting(pdev);
3476 pci_disable_device(pdev);
3477 pci_set_drvdata(pdev, NULL);
3481 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
3482 * @ha: HA context
3484 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
3485 * supported addressing method.
3487 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
3489 int retval;
3491 /* Update our PCI device dma_mask for full 64 bit mask */
3492 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
3493 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
3494 dev_dbg(&ha->pdev->dev,
3495 "Failed to set 64 bit PCI consistent mask; "
3496 "using 32 bit.\n");
3497 retval = pci_set_consistent_dma_mask(ha->pdev,
3498 DMA_BIT_MASK(32));
3500 } else
3501 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
3504 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
3506 struct iscsi_cls_session *cls_sess;
3507 struct iscsi_session *sess;
3508 struct ddb_entry *ddb;
3509 int queue_depth = QL4_DEF_QDEPTH;
3511 cls_sess = starget_to_session(sdev->sdev_target);
3512 sess = cls_sess->dd_data;
3513 ddb = sess->dd_data;
3515 sdev->hostdata = ddb;
3516 sdev->tagged_supported = 1;
3518 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
3519 queue_depth = ql4xmaxqdepth;
3521 scsi_activate_tcq(sdev, queue_depth);
3522 return 0;
3525 static int qla4xxx_slave_configure(struct scsi_device *sdev)
3527 sdev->tagged_supported = 1;
3528 return 0;
3531 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
3533 scsi_deactivate_tcq(sdev, 1);
3537 * qla4xxx_del_from_active_array - returns an active srb
3538 * @ha: Pointer to host adapter structure.
3539 * @index: index into the active_array
3541 * This routine removes and returns the srb at the specified index
3543 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
3544 uint32_t index)
3546 struct srb *srb = NULL;
3547 struct scsi_cmnd *cmd = NULL;
3549 cmd = scsi_host_find_tag(ha->host, index);
3550 if (!cmd)
3551 return srb;
3553 srb = (struct srb *)CMD_SP(cmd);
3554 if (!srb)
3555 return srb;
3557 /* update counters */
3558 if (srb->flags & SRB_DMA_VALID) {
3559 ha->req_q_count += srb->iocb_cnt;
3560 ha->iocb_cnt -= srb->iocb_cnt;
3561 if (srb->cmd)
3562 srb->cmd->host_scribble =
3563 (unsigned char *)(unsigned long) MAX_SRBS;
3565 return srb;
3569 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
3570 * @ha: Pointer to host adapter structure.
3571 * @cmd: Scsi Command to wait on.
3573 * This routine waits for the command to be returned by the Firmware
3574 * for some max time.
3576 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
3577 struct scsi_cmnd *cmd)
3579 int done = 0;
3580 struct srb *rp;
3581 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
3582 int ret = SUCCESS;
3584 /* Dont wait on command if PCI error is being handled
3585 * by PCI AER driver
3587 if (unlikely(pci_channel_offline(ha->pdev)) ||
3588 (test_bit(AF_EEH_BUSY, &ha->flags))) {
3589 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
3590 ha->host_no, __func__);
3591 return ret;
3594 do {
3595 /* Checking to see if its returned to OS */
3596 rp = (struct srb *) CMD_SP(cmd);
3597 if (rp == NULL) {
3598 done++;
3599 break;
3602 msleep(2000);
3603 } while (max_wait_time--);
3605 return done;
3609 * qla4xxx_wait_for_hba_online - waits for HBA to come online
3610 * @ha: Pointer to host adapter structure
3612 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
3614 unsigned long wait_online;
3616 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
3617 while (time_before(jiffies, wait_online)) {
3619 if (adapter_up(ha))
3620 return QLA_SUCCESS;
3622 msleep(2000);
3625 return QLA_ERROR;
3629 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
3630 * @ha: pointer to HBA
3631 * @t: target id
3632 * @l: lun id
3634 * This function waits for all outstanding commands to a lun to complete. It
3635 * returns 0 if all pending commands are returned and 1 otherwise.
3637 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
3638 struct scsi_target *stgt,
3639 struct scsi_device *sdev)
3641 int cnt;
3642 int status = 0;
3643 struct scsi_cmnd *cmd;
3646 * Waiting for all commands for the designated target or dev
3647 * in the active array
3649 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
3650 cmd = scsi_host_find_tag(ha->host, cnt);
3651 if (cmd && stgt == scsi_target(cmd->device) &&
3652 (!sdev || sdev == cmd->device)) {
3653 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3654 status++;
3655 break;
3659 return status;
3663 * qla4xxx_eh_abort - callback for abort task.
3664 * @cmd: Pointer to Linux's SCSI command structure
3666 * This routine is called by the Linux OS to abort the specified
3667 * command.
3669 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3671 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3672 unsigned int id = cmd->device->id;
3673 unsigned int lun = cmd->device->lun;
3674 unsigned long flags;
3675 struct srb *srb = NULL;
3676 int ret = SUCCESS;
3677 int wait = 0;
3679 ql4_printk(KERN_INFO, ha,
3680 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
3681 ha->host_no, id, lun, cmd);
3683 spin_lock_irqsave(&ha->hardware_lock, flags);
3684 srb = (struct srb *) CMD_SP(cmd);
3685 if (!srb) {
3686 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3687 return SUCCESS;
3689 kref_get(&srb->srb_ref);
3690 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3692 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
3693 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
3694 ha->host_no, id, lun));
3695 ret = FAILED;
3696 } else {
3697 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
3698 ha->host_no, id, lun));
3699 wait = 1;
3702 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3704 /* Wait for command to complete */
3705 if (wait) {
3706 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3707 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
3708 ha->host_no, id, lun));
3709 ret = FAILED;
3713 ql4_printk(KERN_INFO, ha,
3714 "scsi%ld:%d:%d: Abort command - %s\n",
3715 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
3717 return ret;
3721 * qla4xxx_eh_device_reset - callback for target reset.
3722 * @cmd: Pointer to Linux's SCSI command structure
3724 * This routine is called by the Linux OS to reset all luns on the
3725 * specified target.
3727 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3729 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3730 struct ddb_entry *ddb_entry = cmd->device->hostdata;
3731 int ret = FAILED, stat;
3733 if (!ddb_entry)
3734 return ret;
3736 ret = iscsi_block_scsi_eh(cmd);
3737 if (ret)
3738 return ret;
3739 ret = FAILED;
3741 ql4_printk(KERN_INFO, ha,
3742 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
3743 cmd->device->channel, cmd->device->id, cmd->device->lun);
3745 DEBUG2(printk(KERN_INFO
3746 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
3747 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
3748 cmd, jiffies, cmd->request->timeout / HZ,
3749 ha->dpc_flags, cmd->result, cmd->allowed));
3751 /* FIXME: wait for hba to go online */
3752 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3753 if (stat != QLA_SUCCESS) {
3754 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
3755 goto eh_dev_reset_done;
3758 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3759 cmd->device)) {
3760 ql4_printk(KERN_INFO, ha,
3761 "DEVICE RESET FAILED - waiting for "
3762 "commands.\n");
3763 goto eh_dev_reset_done;
3766 /* Send marker. */
3767 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3768 MM_LUN_RESET) != QLA_SUCCESS)
3769 goto eh_dev_reset_done;
3771 ql4_printk(KERN_INFO, ha,
3772 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
3773 ha->host_no, cmd->device->channel, cmd->device->id,
3774 cmd->device->lun);
3776 ret = SUCCESS;
3778 eh_dev_reset_done:
3780 return ret;
3784 * qla4xxx_eh_target_reset - callback for target reset.
3785 * @cmd: Pointer to Linux's SCSI command structure
3787 * This routine is called by the Linux OS to reset the target.
3789 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3791 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3792 struct ddb_entry *ddb_entry = cmd->device->hostdata;
3793 int stat, ret;
3795 if (!ddb_entry)
3796 return FAILED;
3798 ret = iscsi_block_scsi_eh(cmd);
3799 if (ret)
3800 return ret;
3802 starget_printk(KERN_INFO, scsi_target(cmd->device),
3803 "WARM TARGET RESET ISSUED.\n");
3805 DEBUG2(printk(KERN_INFO
3806 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
3807 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
3808 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
3809 ha->dpc_flags, cmd->result, cmd->allowed));
3811 stat = qla4xxx_reset_target(ha, ddb_entry);
3812 if (stat != QLA_SUCCESS) {
3813 starget_printk(KERN_INFO, scsi_target(cmd->device),
3814 "WARM TARGET RESET FAILED.\n");
3815 return FAILED;
3818 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3819 NULL)) {
3820 starget_printk(KERN_INFO, scsi_target(cmd->device),
3821 "WARM TARGET DEVICE RESET FAILED - "
3822 "waiting for commands.\n");
3823 return FAILED;
3826 /* Send marker. */
3827 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3828 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
3829 starget_printk(KERN_INFO, scsi_target(cmd->device),
3830 "WARM TARGET DEVICE RESET FAILED - "
3831 "marker iocb failed.\n");
3832 return FAILED;
3835 starget_printk(KERN_INFO, scsi_target(cmd->device),
3836 "WARM TARGET RESET SUCCEEDED.\n");
3837 return SUCCESS;
3841 * qla4xxx_eh_host_reset - kernel callback
3842 * @cmd: Pointer to Linux's SCSI command structure
3844 * This routine is invoked by the Linux kernel to perform fatal error
3845 * recovery on the specified adapter.
3847 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3849 int return_status = FAILED;
3850 struct scsi_qla_host *ha;
3852 ha = to_qla_host(cmd->device->host);
3854 if (ql4xdontresethba) {
3855 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3856 ha->host_no, __func__));
3857 return FAILED;
3860 ql4_printk(KERN_INFO, ha,
3861 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
3862 cmd->device->channel, cmd->device->id, cmd->device->lun);
3864 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
3865 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
3866 "DEAD.\n", ha->host_no, cmd->device->channel,
3867 __func__));
3869 return FAILED;
3872 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3873 if (is_qla8022(ha))
3874 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3875 else
3876 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3879 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
3880 return_status = SUCCESS;
3882 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
3883 return_status == FAILED ? "FAILED" : "SUCCEEDED");
3885 return return_status;
3888 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
3890 uint32_t mbox_cmd[MBOX_REG_COUNT];
3891 uint32_t mbox_sts[MBOX_REG_COUNT];
3892 struct addr_ctrl_blk_def *acb = NULL;
3893 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
3894 int rval = QLA_SUCCESS;
3895 dma_addr_t acb_dma;
3897 acb = dma_alloc_coherent(&ha->pdev->dev,
3898 sizeof(struct addr_ctrl_blk_def),
3899 &acb_dma, GFP_KERNEL);
3900 if (!acb) {
3901 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
3902 __func__);
3903 rval = -ENOMEM;
3904 goto exit_port_reset;
3907 memset(acb, 0, acb_len);
3909 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
3910 if (rval != QLA_SUCCESS) {
3911 rval = -EIO;
3912 goto exit_free_acb;
3915 rval = qla4xxx_disable_acb(ha);
3916 if (rval != QLA_SUCCESS) {
3917 rval = -EIO;
3918 goto exit_free_acb;
3921 wait_for_completion_timeout(&ha->disable_acb_comp,
3922 DISABLE_ACB_TOV * HZ);
3924 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
3925 if (rval != QLA_SUCCESS) {
3926 rval = -EIO;
3927 goto exit_free_acb;
3930 exit_free_acb:
3931 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
3932 acb, acb_dma);
3933 exit_port_reset:
3934 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
3935 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
3936 return rval;
3939 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
3941 struct scsi_qla_host *ha = to_qla_host(shost);
3942 int rval = QLA_SUCCESS;
3944 if (ql4xdontresethba) {
3945 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
3946 __func__));
3947 rval = -EPERM;
3948 goto exit_host_reset;
3951 rval = qla4xxx_wait_for_hba_online(ha);
3952 if (rval != QLA_SUCCESS) {
3953 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
3954 "adapter\n", __func__));
3955 rval = -EIO;
3956 goto exit_host_reset;
3959 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3960 goto recover_adapter;
3962 switch (reset_type) {
3963 case SCSI_ADAPTER_RESET:
3964 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3965 break;
3966 case SCSI_FIRMWARE_RESET:
3967 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3968 if (is_qla8022(ha))
3969 /* set firmware context reset */
3970 set_bit(DPC_RESET_HA_FW_CONTEXT,
3971 &ha->dpc_flags);
3972 else {
3973 rval = qla4xxx_context_reset(ha);
3974 goto exit_host_reset;
3977 break;
3980 recover_adapter:
3981 rval = qla4xxx_recover_adapter(ha);
3982 if (rval != QLA_SUCCESS) {
3983 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
3984 __func__));
3985 rval = -EIO;
3988 exit_host_reset:
3989 return rval;
3992 /* PCI AER driver recovers from all correctable errors w/o
3993 * driver intervention. For uncorrectable errors PCI AER
3994 * driver calls the following device driver's callbacks
3996 * - Fatal Errors - link_reset
3997 * - Non-Fatal Errors - driver's pci_error_detected() which
3998 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
4000 * PCI AER driver calls
4001 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
4002 * returns RECOVERED or NEED_RESET if fw_hung
4003 * NEED_RESET - driver's slot_reset()
4004 * DISCONNECT - device is dead & cannot recover
4005 * RECOVERED - driver's pci_resume()
4007 static pci_ers_result_t
4008 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4010 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4012 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
4013 ha->host_no, __func__, state);
4015 if (!is_aer_supported(ha))
4016 return PCI_ERS_RESULT_NONE;
4018 switch (state) {
4019 case pci_channel_io_normal:
4020 clear_bit(AF_EEH_BUSY, &ha->flags);
4021 return PCI_ERS_RESULT_CAN_RECOVER;
4022 case pci_channel_io_frozen:
4023 set_bit(AF_EEH_BUSY, &ha->flags);
4024 qla4xxx_mailbox_premature_completion(ha);
4025 qla4xxx_free_irqs(ha);
4026 pci_disable_device(pdev);
4027 /* Return back all IOs */
4028 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4029 return PCI_ERS_RESULT_NEED_RESET;
4030 case pci_channel_io_perm_failure:
4031 set_bit(AF_EEH_BUSY, &ha->flags);
4032 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
4033 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4034 return PCI_ERS_RESULT_DISCONNECT;
4036 return PCI_ERS_RESULT_NEED_RESET;
4040 * qla4xxx_pci_mmio_enabled() gets called if
4041 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
4042 * and read/write to the device still works.
4044 static pci_ers_result_t
4045 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
4047 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4049 if (!is_aer_supported(ha))
4050 return PCI_ERS_RESULT_NONE;
4052 return PCI_ERS_RESULT_RECOVERED;
4055 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
4057 uint32_t rval = QLA_ERROR;
4058 uint32_t ret = 0;
4059 int fn;
4060 struct pci_dev *other_pdev = NULL;
4062 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
4064 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4066 if (test_bit(AF_ONLINE, &ha->flags)) {
4067 clear_bit(AF_ONLINE, &ha->flags);
4068 clear_bit(AF_LINK_UP, &ha->flags);
4069 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
4070 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4073 fn = PCI_FUNC(ha->pdev->devfn);
4074 while (fn > 0) {
4075 fn--;
4076 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
4077 "func %x\n", ha->host_no, __func__, fn);
4078 /* Get the pci device given the domain, bus,
4079 * slot/function number */
4080 other_pdev =
4081 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
4082 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
4083 fn));
4085 if (!other_pdev)
4086 continue;
4088 if (atomic_read(&other_pdev->enable_cnt)) {
4089 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
4090 "func in enabled state%x\n", ha->host_no,
4091 __func__, fn);
4092 pci_dev_put(other_pdev);
4093 break;
4095 pci_dev_put(other_pdev);
4098 /* The first function on the card, the reset owner will
4099 * start & initialize the firmware. The other functions
4100 * on the card will reset the firmware context
4102 if (!fn) {
4103 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
4104 "0x%x is the owner\n", ha->host_no, __func__,
4105 ha->pdev->devfn);
4107 qla4_8xxx_idc_lock(ha);
4108 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4109 QLA82XX_DEV_COLD);
4111 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4112 QLA82XX_IDC_VERSION);
4114 qla4_8xxx_idc_unlock(ha);
4115 clear_bit(AF_FW_RECOVERY, &ha->flags);
4116 rval = qla4xxx_initialize_adapter(ha);
4117 qla4_8xxx_idc_lock(ha);
4119 if (rval != QLA_SUCCESS) {
4120 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4121 "FAILED\n", ha->host_no, __func__);
4122 qla4_8xxx_clear_drv_active(ha);
4123 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4124 QLA82XX_DEV_FAILED);
4125 } else {
4126 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4127 "READY\n", ha->host_no, __func__);
4128 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4129 QLA82XX_DEV_READY);
4130 /* Clear driver state register */
4131 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
4132 qla4_8xxx_set_drv_active(ha);
4133 ret = qla4xxx_request_irqs(ha);
4134 if (ret) {
4135 ql4_printk(KERN_WARNING, ha, "Failed to "
4136 "reserve interrupt %d already in use.\n",
4137 ha->pdev->irq);
4138 rval = QLA_ERROR;
4139 } else {
4140 ha->isp_ops->enable_intrs(ha);
4141 rval = QLA_SUCCESS;
4144 qla4_8xxx_idc_unlock(ha);
4145 } else {
4146 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
4147 "the reset owner\n", ha->host_no, __func__,
4148 ha->pdev->devfn);
4149 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4150 QLA82XX_DEV_READY)) {
4151 clear_bit(AF_FW_RECOVERY, &ha->flags);
4152 rval = qla4xxx_initialize_adapter(ha);
4153 if (rval == QLA_SUCCESS) {
4154 ret = qla4xxx_request_irqs(ha);
4155 if (ret) {
4156 ql4_printk(KERN_WARNING, ha, "Failed to"
4157 " reserve interrupt %d already in"
4158 " use.\n", ha->pdev->irq);
4159 rval = QLA_ERROR;
4160 } else {
4161 ha->isp_ops->enable_intrs(ha);
4162 rval = QLA_SUCCESS;
4165 qla4_8xxx_idc_lock(ha);
4166 qla4_8xxx_set_drv_active(ha);
4167 qla4_8xxx_idc_unlock(ha);
4170 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4171 return rval;
4174 static pci_ers_result_t
4175 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
4177 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4178 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4179 int rc;
4181 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
4182 ha->host_no, __func__);
4184 if (!is_aer_supported(ha))
4185 return PCI_ERS_RESULT_NONE;
4187 /* Restore the saved state of PCIe device -
4188 * BAR registers, PCI Config space, PCIX, MSI,
4189 * IOV states
4191 pci_restore_state(pdev);
4193 /* pci_restore_state() clears the saved_state flag of the device
4194 * save restored state which resets saved_state flag
4196 pci_save_state(pdev);
4198 /* Initialize device or resume if in suspended state */
4199 rc = pci_enable_device(pdev);
4200 if (rc) {
4201 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
4202 "device after reset\n", ha->host_no, __func__);
4203 goto exit_slot_reset;
4206 ha->isp_ops->disable_intrs(ha);
4208 if (is_qla8022(ha)) {
4209 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
4210 ret = PCI_ERS_RESULT_RECOVERED;
4211 goto exit_slot_reset;
4212 } else
4213 goto exit_slot_reset;
4216 exit_slot_reset:
4217 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
4218 "device after reset\n", ha->host_no, __func__, ret);
4219 return ret;
4222 static void
4223 qla4xxx_pci_resume(struct pci_dev *pdev)
4225 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4226 int ret;
4228 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
4229 ha->host_no, __func__);
4231 ret = qla4xxx_wait_for_hba_online(ha);
4232 if (ret != QLA_SUCCESS) {
4233 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
4234 "resume I/O from slot/link_reset\n", ha->host_no,
4235 __func__);
4238 pci_cleanup_aer_uncorrect_error_status(pdev);
4239 clear_bit(AF_EEH_BUSY, &ha->flags);
4242 static struct pci_error_handlers qla4xxx_err_handler = {
4243 .error_detected = qla4xxx_pci_error_detected,
4244 .mmio_enabled = qla4xxx_pci_mmio_enabled,
4245 .slot_reset = qla4xxx_pci_slot_reset,
4246 .resume = qla4xxx_pci_resume,
4249 static struct pci_device_id qla4xxx_pci_tbl[] = {
4251 .vendor = PCI_VENDOR_ID_QLOGIC,
4252 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
4253 .subvendor = PCI_ANY_ID,
4254 .subdevice = PCI_ANY_ID,
4257 .vendor = PCI_VENDOR_ID_QLOGIC,
4258 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
4259 .subvendor = PCI_ANY_ID,
4260 .subdevice = PCI_ANY_ID,
4263 .vendor = PCI_VENDOR_ID_QLOGIC,
4264 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
4265 .subvendor = PCI_ANY_ID,
4266 .subdevice = PCI_ANY_ID,
4269 .vendor = PCI_VENDOR_ID_QLOGIC,
4270 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
4271 .subvendor = PCI_ANY_ID,
4272 .subdevice = PCI_ANY_ID,
4274 {0, 0},
4276 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
4278 static struct pci_driver qla4xxx_pci_driver = {
4279 .name = DRIVER_NAME,
4280 .id_table = qla4xxx_pci_tbl,
4281 .probe = qla4xxx_probe_adapter,
4282 .remove = qla4xxx_remove_adapter,
4283 .err_handler = &qla4xxx_err_handler,
4286 static int __init qla4xxx_module_init(void)
4288 int ret;
4290 /* Allocate cache for SRBs. */
4291 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
4292 SLAB_HWCACHE_ALIGN, NULL);
4293 if (srb_cachep == NULL) {
4294 printk(KERN_ERR
4295 "%s: Unable to allocate SRB cache..."
4296 "Failing load!\n", DRIVER_NAME);
4297 ret = -ENOMEM;
4298 goto no_srp_cache;
4301 /* Derive version string. */
4302 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
4303 if (ql4xextended_error_logging)
4304 strcat(qla4xxx_version_str, "-debug");
4306 qla4xxx_scsi_transport =
4307 iscsi_register_transport(&qla4xxx_iscsi_transport);
4308 if (!qla4xxx_scsi_transport){
4309 ret = -ENODEV;
4310 goto release_srb_cache;
4313 ret = pci_register_driver(&qla4xxx_pci_driver);
4314 if (ret)
4315 goto unregister_transport;
4317 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
4318 return 0;
4320 unregister_transport:
4321 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4322 release_srb_cache:
4323 kmem_cache_destroy(srb_cachep);
4324 no_srp_cache:
4325 return ret;
4328 static void __exit qla4xxx_module_exit(void)
4330 pci_unregister_driver(&qla4xxx_pci_driver);
4331 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4332 kmem_cache_destroy(srb_cachep);
4335 module_init(qla4xxx_module_init);
4336 module_exit(qla4xxx_module_exit);
4338 MODULE_AUTHOR("QLogic Corporation");
4339 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
4340 MODULE_LICENSE("GPL");
4341 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);