4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C
);
33 static int emlxs_sli4_init_extents(emlxs_hba_t
*hba
,
35 static uint32_t emlxs_sli4_read_status(emlxs_hba_t
*hba
);
37 static int emlxs_init_bootstrap_mb(emlxs_hba_t
*hba
);
39 static uint32_t emlxs_sli4_read_sema(emlxs_hba_t
*hba
);
41 static uint32_t emlxs_sli4_read_mbdb(emlxs_hba_t
*hba
);
43 static void emlxs_sli4_write_mbdb(emlxs_hba_t
*hba
, uint32_t value
);
45 static void emlxs_sli4_write_wqdb(emlxs_hba_t
*hba
, uint32_t value
);
47 static void emlxs_sli4_write_mqdb(emlxs_hba_t
*hba
, uint32_t value
);
49 static void emlxs_sli4_write_rqdb(emlxs_hba_t
*hba
, uint32_t value
);
51 static void emlxs_sli4_write_cqdb(emlxs_hba_t
*hba
, uint32_t value
);
53 static int emlxs_sli4_create_queues(emlxs_hba_t
*hba
,
55 static int emlxs_sli4_post_hdr_tmplates(emlxs_hba_t
*hba
,
57 static int emlxs_sli4_post_sgl_pages(emlxs_hba_t
*hba
,
60 static int emlxs_sli4_read_eq(emlxs_hba_t
*hba
, EQ_DESC_t
*eq
);
62 static int emlxs_sli4_map_hdw(emlxs_hba_t
*hba
);
64 static void emlxs_sli4_unmap_hdw(emlxs_hba_t
*hba
);
66 static int32_t emlxs_sli4_online(emlxs_hba_t
*hba
);
68 static void emlxs_sli4_offline(emlxs_hba_t
*hba
,
69 uint32_t reset_requested
);
71 static uint32_t emlxs_sli4_hba_reset(emlxs_hba_t
*hba
, uint32_t restart
,
72 uint32_t skip_post
, uint32_t quiesce
);
73 static void emlxs_sli4_hba_kill(emlxs_hba_t
*hba
);
75 static uint32_t emlxs_sli4_hba_init(emlxs_hba_t
*hba
);
77 static uint32_t emlxs_sli4_bde_setup(emlxs_port_t
*port
,
80 static void emlxs_sli4_issue_iocb_cmd(emlxs_hba_t
*hba
,
81 CHANNEL
*cp
, IOCBQ
*iocb_cmd
);
82 static uint32_t emlxs_sli4_issue_mbox_cmd(emlxs_hba_t
*hba
,
83 MAILBOXQ
*mbq
, int32_t flg
, uint32_t tmo
);
84 static uint32_t emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t
*hba
,
85 MAILBOXQ
*mbq
, int32_t flg
, uint32_t tmo
);
87 static uint32_t emlxs_sli4_prep_fct_iocb(emlxs_port_t
*port
,
88 emlxs_buf_t
*cmd_sbp
, int channel
);
89 static uint32_t emlxs_sli4_fct_bde_setup(emlxs_port_t
*port
,
91 #endif /* SFCT_SUPPORT */
93 static uint32_t emlxs_sli4_prep_fcp_iocb(emlxs_port_t
*port
,
94 emlxs_buf_t
*sbp
, int ring
);
95 static uint32_t emlxs_sli4_prep_ip_iocb(emlxs_port_t
*port
,
97 static uint32_t emlxs_sli4_prep_els_iocb(emlxs_port_t
*port
,
99 static uint32_t emlxs_sli4_prep_ct_iocb(emlxs_port_t
*port
,
101 static void emlxs_sli4_poll_intr(emlxs_hba_t
*hba
);
102 static int32_t emlxs_sli4_intx_intr(char *arg
);
105 static uint32_t emlxs_sli4_msi_intr(char *arg1
, char *arg2
);
106 #endif /* MSI_SUPPORT */
108 static void emlxs_sli4_resource_free(emlxs_hba_t
*hba
);
110 static int emlxs_sli4_resource_alloc(emlxs_hba_t
*hba
);
111 extern void emlxs_sli4_zero_queue_stat(emlxs_hba_t
*hba
);
113 static XRIobj_t
*emlxs_sli4_alloc_xri(emlxs_port_t
*port
,
114 emlxs_buf_t
*sbp
, RPIobj_t
*rpip
,
116 static void emlxs_sli4_enable_intr(emlxs_hba_t
*hba
);
118 static void emlxs_sli4_disable_intr(emlxs_hba_t
*hba
, uint32_t att
);
120 static void emlxs_sli4_timer(emlxs_hba_t
*hba
);
122 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t
*hba
);
124 static void emlxs_sli4_poll_erratt(emlxs_hba_t
*hba
);
126 extern XRIobj_t
*emlxs_sli4_reserve_xri(emlxs_port_t
*port
,
127 RPIobj_t
*rpip
, uint32_t type
, uint16_t rx_id
);
128 static int emlxs_check_hdw_ready(emlxs_hba_t
*);
130 static uint32_t emlxs_sli4_reg_did(emlxs_port_t
*port
,
131 uint32_t did
, SERV_PARM
*param
,
132 emlxs_buf_t
*sbp
, fc_unsol_buf_t
*ubp
,
135 static uint32_t emlxs_sli4_unreg_node(emlxs_port_t
*port
,
136 emlxs_node_t
*node
, emlxs_buf_t
*sbp
,
137 fc_unsol_buf_t
*ubp
, IOCBQ
*iocbq
);
139 static void emlxs_sli4_handle_fc_link_att(emlxs_hba_t
*hba
,
141 static void emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t
*hba
,
145 static uint16_t emlxs_sli4_rqid_to_index(emlxs_hba_t
*hba
,
147 static uint16_t emlxs_sli4_wqid_to_index(emlxs_hba_t
*hba
,
149 static uint16_t emlxs_sli4_cqid_to_index(emlxs_hba_t
*hba
,
152 /* Define SLI4 API functions */
153 emlxs_sli_api_t emlxs_sli4_api
= {
155 emlxs_sli4_unmap_hdw
,
158 emlxs_sli4_hba_reset
,
160 emlxs_sli4_issue_iocb_cmd
,
161 emlxs_sli4_issue_mbox_cmd
,
163 emlxs_sli4_prep_fct_iocb
,
166 #endif /* SFCT_SUPPORT */
167 emlxs_sli4_prep_fcp_iocb
,
168 emlxs_sli4_prep_ip_iocb
,
169 emlxs_sli4_prep_els_iocb
,
170 emlxs_sli4_prep_ct_iocb
,
171 emlxs_sli4_poll_intr
,
172 emlxs_sli4_intx_intr
,
174 emlxs_sli4_disable_intr
,
176 emlxs_sli4_poll_erratt
,
178 emlxs_sli4_unreg_node
182 /* ************************************************************************** */
185 emlxs_sli4_set_default_params(emlxs_hba_t
*hba
)
187 emlxs_port_t
*port
= &PPORT
;
189 bzero((char *)&hba
->sli
.sli4
.param
, sizeof (sli_params_t
));
191 hba
->sli
.sli4
.param
.ProtocolType
= 0x3; /* FC/FCoE */
193 hba
->sli
.sli4
.param
.SliHint2
= 0;
194 hba
->sli
.sli4
.param
.SliHint1
= 0;
195 hba
->sli
.sli4
.param
.IfType
= 0;
196 hba
->sli
.sli4
.param
.SliFamily
= 0;
197 hba
->sli
.sli4
.param
.Revision
= 0x4; /* SLI4 */
198 hba
->sli
.sli4
.param
.FT
= 0;
200 hba
->sli
.sli4
.param
.EqeCntMethod
= 0x1; /* Bit pattern */
201 hba
->sli
.sli4
.param
.EqPageSize
= 0x1; /* 4096 */
202 hba
->sli
.sli4
.param
.EqeSize
= 0x1; /* 4 byte */
203 hba
->sli
.sli4
.param
.EqPageCnt
= 8;
204 hba
->sli
.sli4
.param
.EqeCntMask
= 0x1F; /* 256-4096 elements */
206 hba
->sli
.sli4
.param
.CqeCntMethod
= 0x1; /* Bit pattern */
207 hba
->sli
.sli4
.param
.CqPageSize
= 0x1; /* 4096 */
208 hba
->sli
.sli4
.param
.CQV
= 0;
209 hba
->sli
.sli4
.param
.CqeSize
= 0x3; /* 16 byte */
210 hba
->sli
.sli4
.param
.CqPageCnt
= 4;
211 hba
->sli
.sli4
.param
.CqeCntMask
= 0x70; /* 256-1024 elements */
213 hba
->sli
.sli4
.param
.MqeCntMethod
= 0x1; /* Bit pattern */
214 hba
->sli
.sli4
.param
.MqPageSize
= 0x1; /* 4096 */
215 hba
->sli
.sli4
.param
.MQV
= 0;
216 hba
->sli
.sli4
.param
.MqPageCnt
= 8;
217 hba
->sli
.sli4
.param
.MqeCntMask
= 0x0F; /* 16-128 elements */
219 hba
->sli
.sli4
.param
.WqeCntMethod
= 0; /* Page Count */
220 hba
->sli
.sli4
.param
.WqPageSize
= 0x1; /* 4096 */
221 hba
->sli
.sli4
.param
.WQV
= 0;
222 hba
->sli
.sli4
.param
.WqeSize
= 0x5; /* 64 byte */
223 hba
->sli
.sli4
.param
.WqPageCnt
= 4;
224 hba
->sli
.sli4
.param
.WqeCntMask
= 0x10; /* 256 elements */
226 hba
->sli
.sli4
.param
.RqeCntMethod
= 0; /* Page Count */
227 hba
->sli
.sli4
.param
.RqPageSize
= 0x1; /* 4096 */
228 hba
->sli
.sli4
.param
.RQV
= 0;
229 hba
->sli
.sli4
.param
.RqeSize
= 0x2; /* 8 byte */
230 hba
->sli
.sli4
.param
.RqPageCnt
= 8;
231 hba
->sli
.sli4
.param
.RqDbWin
= 1;
232 hba
->sli
.sli4
.param
.RqeCntMask
= 0x100; /* 4096 elements */
234 hba
->sli
.sli4
.param
.Loopback
= 0xf; /* unsupported */
235 hba
->sli
.sli4
.param
.PHWQ
= 0;
236 hba
->sli
.sli4
.param
.PHON
= 0;
237 hba
->sli
.sli4
.param
.TRIR
= 0;
238 hba
->sli
.sli4
.param
.TRTY
= 0;
239 hba
->sli
.sli4
.param
.TCCA
= 0;
240 hba
->sli
.sli4
.param
.MWQE
= 0;
241 hba
->sli
.sli4
.param
.ASSI
= 0;
242 hba
->sli
.sli4
.param
.TERP
= 0;
243 hba
->sli
.sli4
.param
.TGT
= 0;
244 hba
->sli
.sli4
.param
.AREG
= 0;
245 hba
->sli
.sli4
.param
.FBRR
= 0;
246 hba
->sli
.sli4
.param
.SGLR
= 1;
247 hba
->sli
.sli4
.param
.HDRR
= 1;
248 hba
->sli
.sli4
.param
.EXT
= 0;
249 hba
->sli
.sli4
.param
.FCOE
= 1;
251 hba
->sli
.sli4
.param
.SgeLength
= (64 * 1024);
252 hba
->sli
.sli4
.param
.SglAlign
= 0x7 /* 4096 */;
253 hba
->sli
.sli4
.param
.SglPageSize
= 0x1; /* 4096 */
254 hba
->sli
.sli4
.param
.SglPageCnt
= 2;
256 hba
->sli
.sli4
.param
.MinRqSize
= 128;
257 hba
->sli
.sli4
.param
.MaxRqSize
= 2048;
259 hba
->sli
.sli4
.param
.RPIMax
= 0x3ff;
260 hba
->sli
.sli4
.param
.XRIMax
= 0x3ff;
261 hba
->sli
.sli4
.param
.VFIMax
= 0xff;
262 hba
->sli
.sli4
.param
.VPIMax
= 0xff;
264 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
265 "Default SLI4 parameters set.");
267 } /* emlxs_sli4_set_default_params() */
271 * emlxs_sli4_online()
273 * This routine will start initialization of the SLI4 HBA.
276 emlxs_sli4_online(emlxs_hba_t
*hba
)
278 emlxs_port_t
*port
= &PPORT
;
281 MAILBOXQ
*mbq
= NULL
;
292 uint32_t kern_update
= 0;
293 emlxs_firmware_t hba_fw
;
294 emlxs_firmware_t
*fw
;
301 sli_mode
= EMLXS_HBA_SLI4_MODE
;
302 hba
->sli_mode
= sli_mode
;
304 /* Set the fw_check flag */
305 fw_check
= cfg
[CFG_FW_CHECK
].current
;
307 if ((fw_check
& 0x04) ||
308 (hba
->fw_flag
& FW_UPDATE_KERNEL
)) {
312 hba
->mbox_queue_flag
= 0;
313 hba
->fc_edtov
= FF_DEF_EDTOV
;
314 hba
->fc_ratov
= FF_DEF_RATOV
;
315 hba
->fc_altov
= FF_DEF_ALTOV
;
316 hba
->fc_arbtov
= FF_DEF_ARBTOV
;
318 /* Networking not supported */
319 if (cfg
[CFG_NETWORK_ON
].current
) {
320 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_msg
,
321 "Networking is not supported in SLI4, turning it off");
322 cfg
[CFG_NETWORK_ON
].current
= 0;
325 hba
->chan_count
= hba
->intr_count
* cfg
[CFG_NUM_WQ
].current
;
326 if (hba
->chan_count
> MAX_CHANNEL
) {
327 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
328 "Max channels exceeded, dropping num-wq from %d to 1",
329 cfg
[CFG_NUM_WQ
].current
);
330 cfg
[CFG_NUM_WQ
].current
= 1;
331 hba
->chan_count
= hba
->intr_count
* cfg
[CFG_NUM_WQ
].current
;
333 hba
->channel_fcp
= 0; /* First channel */
335 /* Default channel for everything else is the last channel */
336 hba
->channel_ip
= hba
->chan_count
- 1;
337 hba
->channel_els
= hba
->chan_count
- 1;
338 hba
->channel_ct
= hba
->chan_count
- 1;
342 hba
->channel_tx_count
= 0;
344 /* Initialize the local dump region buffer */
345 bzero(&hba
->sli
.sli4
.dump_region
, sizeof (MBUF_INFO
));
346 hba
->sli
.sli4
.dump_region
.size
= EMLXS_DUMP_REGION_SIZE
;
347 hba
->sli
.sli4
.dump_region
.flags
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
349 hba
->sli
.sli4
.dump_region
.align
= ddi_ptob(hba
->dip
, 1L);
351 (void) emlxs_mem_alloc(hba
, &hba
->sli
.sli4
.dump_region
);
353 if (hba
->sli
.sli4
.dump_region
.virt
== NULL
) {
354 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
355 "Unable to allocate dump region buffer.");
361 * Get a buffer which will be used repeatedly for mailbox commands
363 mbq
= (MAILBOXQ
*) kmem_zalloc((sizeof (MAILBOXQ
)), KM_SLEEP
);
365 mb
= (MAILBOX4
*)mbq
;
368 /* Reset & Initialize the adapter */
369 if (emlxs_sli4_hba_init(hba
)) {
370 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
371 "Unable to init hba.");
378 /* Access handle validation */
379 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
380 case SLI_INTF_IF_TYPE_2
:
381 if ((emlxs_fm_check_acc_handle(hba
,
382 hba
->pci_acc_handle
) != DDI_FM_OK
) ||
383 (emlxs_fm_check_acc_handle(hba
,
384 hba
->sli
.sli4
.bar0_acc_handle
) != DDI_FM_OK
)) {
385 EMLXS_MSGF(EMLXS_CONTEXT
,
386 &emlxs_invalid_access_handle_msg
, NULL
);
394 if ((emlxs_fm_check_acc_handle(hba
,
395 hba
->pci_acc_handle
) != DDI_FM_OK
) ||
396 (emlxs_fm_check_acc_handle(hba
,
397 hba
->sli
.sli4
.bar1_acc_handle
) != DDI_FM_OK
) ||
398 (emlxs_fm_check_acc_handle(hba
,
399 hba
->sli
.sli4
.bar2_acc_handle
) != DDI_FM_OK
)) {
400 EMLXS_MSGF(EMLXS_CONTEXT
,
401 &emlxs_invalid_access_handle_msg
, NULL
);
408 #endif /* FMA_SUPPORT */
411 * Setup and issue mailbox READ REV command
414 vpd
->postKernRev
= 0;
420 vpd
->postKernName
[0] = 0;
421 vpd
->opFwName
[0] = 0;
422 vpd
->sli1FwName
[0] = 0;
423 vpd
->sli2FwName
[0] = 0;
424 vpd
->sli3FwName
[0] = 0;
425 vpd
->sli4FwName
[0] = 0;
427 vpd
->opFwLabel
[0] = 0;
428 vpd
->sli1FwLabel
[0] = 0;
429 vpd
->sli2FwLabel
[0] = 0;
430 vpd
->sli3FwLabel
[0] = 0;
431 vpd
->sli4FwLabel
[0] = 0;
433 EMLXS_STATE_CHANGE(hba
, FC_INIT_REV
);
435 emlxs_mb_get_sli4_params(hba
, mbq
);
436 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
437 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
438 "Unable to read parameters. Mailbox cmd=%x status=%x",
439 mb
->mbxCommand
, mb
->mbxStatus
);
441 /* Set param defaults */
442 emlxs_sli4_set_default_params(hba
);
445 /* Save parameters */
446 bcopy((char *)&mb
->un
.varSLIConfig
.payload
,
447 (char *)&hba
->sli
.sli4
.param
, sizeof (sli_params_t
));
449 emlxs_data_dump(port
, "SLI_PARMS",
450 (uint32_t *)&hba
->sli
.sli4
.param
,
451 sizeof (sli_params_t
), 0);
454 /* Reuse mbq from previous mbox */
455 bzero(mbq
, sizeof (MAILBOXQ
));
457 emlxs_mb_get_port_name(hba
, mbq
);
458 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
459 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
460 "Unable to get port names. Mailbox cmd=%x status=%x",
461 mb
->mbxCommand
, mb
->mbxStatus
);
463 bzero(hba
->sli
.sli4
.port_name
,
464 sizeof (hba
->sli
.sli4
.port_name
));
466 /* Save port names */
467 bcopy((char *)&mb
->un
.varSLIConfig
.payload
,
468 (char *)&hba
->sli
.sli4
.port_name
,
469 sizeof (hba
->sli
.sli4
.port_name
));
472 /* Reuse mbq from previous mbox */
473 bzero(mbq
, sizeof (MAILBOXQ
));
475 emlxs_mb_read_rev(hba
, mbq
, 0);
476 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
477 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
478 "Unable to read rev. Mailbox cmd=%x status=%x",
479 mb
->mbxCommand
, mb
->mbxStatus
);
486 emlxs_data_dump(port
, "RD_REV", (uint32_t *)mb
, 18, 0);
487 if (mb
->un
.varRdRev4
.sliLevel
!= 4) {
488 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
489 "Invalid read rev Version for SLI4: 0x%x",
490 mb
->un
.varRdRev4
.sliLevel
);
496 switch (mb
->un
.varRdRev4
.dcbxMode
) {
497 case EMLXS_DCBX_MODE_CIN
: /* Mapped to nonFIP mode */
498 hba
->flag
&= ~FC_FIP_SUPPORTED
;
501 case EMLXS_DCBX_MODE_CEE
: /* Mapped to FIP mode */
502 hba
->flag
|= FC_FIP_SUPPORTED
;
506 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
507 "Invalid read rev dcbx mode for SLI4: 0x%x",
508 mb
->un
.varRdRev4
.dcbxMode
);
514 /* Set FC/FCoE mode */
515 if (mb
->un
.varRdRev4
.FCoE
) {
516 hba
->sli
.sli4
.flag
|= EMLXS_SLI4_FCOE_MODE
;
518 hba
->sli
.sli4
.flag
&= ~EMLXS_SLI4_FCOE_MODE
;
521 /* Save information as VPD data */
524 vpd
->sli4FwRev
= (mb
->un
.varRdRev4
.ULPFwId
);
525 bcopy((char *)mb
->un
.varRdRev4
.ULPFwName
, vpd
->sli4FwName
, 16);
527 vpd
->opFwRev
= (mb
->un
.varRdRev4
.ULPFwId
);
528 bcopy((char *)mb
->un
.varRdRev4
.ULPFwName
, vpd
->opFwName
, 16);
530 vpd
->postKernRev
= (mb
->un
.varRdRev4
.ARMFwId
);
531 bcopy((char *)mb
->un
.varRdRev4
.ARMFwName
, vpd
->postKernName
, 16);
533 vpd
->biuRev
= mb
->un
.varRdRev4
.HwRev1
;
534 vpd
->fcphHigh
= mb
->un
.varRdRev4
.fcphHigh
;
535 vpd
->fcphLow
= mb
->un
.varRdRev4
.fcphLow
;
536 vpd
->feaLevelHigh
= mb
->un
.varRdRev4
.feaLevelHigh
;
537 vpd
->feaLevelLow
= mb
->un
.varRdRev4
.feaLevelLow
;
539 /* Decode FW labels */
540 if (hba
->model_info
.chip
== EMLXS_LANCER_CHIP
) {
541 bcopy(vpd
->postKernName
, vpd
->sli4FwName
, 16);
543 emlxs_decode_label(vpd
->sli4FwName
, vpd
->sli4FwName
, 0,
544 sizeof (vpd
->sli4FwName
));
545 emlxs_decode_label(vpd
->opFwName
, vpd
->opFwName
, 0,
546 sizeof (vpd
->opFwName
));
547 emlxs_decode_label(vpd
->postKernName
, vpd
->postKernName
, 0,
548 sizeof (vpd
->postKernName
));
550 if (hba
->model_info
.chip
== EMLXS_BE2_CHIP
) {
551 (void) strlcpy(vpd
->sli4FwLabel
, "be2.ufi",
552 sizeof (vpd
->sli4FwLabel
));
553 } else if (hba
->model_info
.chip
== EMLXS_BE3_CHIP
) {
554 (void) strlcpy(vpd
->sli4FwLabel
, "be3.ufi",
555 sizeof (vpd
->sli4FwLabel
));
556 } else if (hba
->model_info
.chip
== EMLXS_LANCER_CHIP
) {
557 (void) strlcpy(vpd
->sli4FwLabel
, "xe201.grp",
558 sizeof (vpd
->sli4FwLabel
));
560 (void) strlcpy(vpd
->sli4FwLabel
, "sli4.fw",
561 sizeof (vpd
->sli4FwLabel
));
564 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
565 "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
566 vpd
->opFwRev
, vpd
->opFwName
, vpd
->postKernRev
, vpd
->postKernName
,
567 vpd
->fcphHigh
, vpd
->fcphLow
, vpd
->feaLevelHigh
, vpd
->feaLevelLow
,
568 mb
->un
.varRdRev4
.dcbxMode
);
570 /* No key information is needed for SLI4 products */
572 /* Get adapter VPD information */
573 vpd
->port_index
= (uint32_t)-1;
575 /* Reuse mbq from previous mbox */
576 bzero(mbq
, sizeof (MAILBOXQ
));
578 emlxs_mb_dump_vpd(hba
, mbq
, 0);
579 vpd_data
= hba
->sli
.sli4
.dump_region
.virt
;
581 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
583 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
584 "No VPD found. status=%x", mb
->mbxStatus
);
586 EMLXS_MSGF(EMLXS_CONTEXT
,
587 &emlxs_init_debug_msg
,
588 "VPD dumped. rsp_cnt=%d status=%x",
589 mb
->un
.varDmp4
.rsp_cnt
, mb
->mbxStatus
);
591 if (mb
->un
.varDmp4
.rsp_cnt
) {
592 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.dump_region
.dma_handle
,
593 0, mb
->un
.varDmp4
.rsp_cnt
, DDI_DMA_SYNC_FORKERNEL
);
596 if (hba
->sli
.sli4
.dump_region
.dma_handle
) {
597 if (emlxs_fm_check_dma_handle(hba
,
598 hba
->sli
.sli4
.dump_region
.dma_handle
)
600 EMLXS_MSGF(EMLXS_CONTEXT
,
601 &emlxs_invalid_dma_handle_msg
,
602 "sli4_online: hdl=%p",
603 hba
->sli
.sli4
.dump_region
.
609 #endif /* FMA_SUPPORT */
615 (void) emlxs_parse_vpd(hba
, (uint8_t *)vpd_data
,
616 mb
->un
.varDmp4
.rsp_cnt
);
619 * If there is a VPD part number, and it does not
620 * match the current default HBA model info,
621 * replace the default data with an entry that
624 * After emlxs_parse_vpd model holds the VPD value
625 * for V2 and part_num hold the value for PN. These
626 * 2 values are NOT necessarily the same.
630 if ((vpd
->model
[0] != 0) &&
631 (strcmp(&vpd
->model
[0], hba
->model_info
.model
) != 0)) {
633 /* First scan for a V2 match */
635 for (i
= 1; i
< emlxs_pci_model_count
; i
++) {
636 if (strcmp(&vpd
->model
[0],
637 emlxs_pci_model
[i
].model
) == 0) {
638 bcopy(&emlxs_pci_model
[i
],
640 sizeof (emlxs_model_t
));
647 if (!rval
&& (vpd
->part_num
[0] != 0) &&
648 (strcmp(&vpd
->part_num
[0], hba
->model_info
.model
) != 0)) {
650 /* Next scan for a PN match */
652 for (i
= 1; i
< emlxs_pci_model_count
; i
++) {
653 if (strcmp(&vpd
->part_num
[0],
654 emlxs_pci_model
[i
].model
) == 0) {
655 bcopy(&emlxs_pci_model
[i
],
657 sizeof (emlxs_model_t
));
663 /* HP CNA port indices start at 1 instead of 0 */
664 if (hba
->model_info
.chip
& EMLXS_BE_CHIPS
) {
665 ssvid
= ddi_get16(hba
->pci_acc_handle
,
666 (uint16_t *)(hba
->pci_addr
+ PCI_SSVID_REGISTER
));
668 if ((ssvid
== PCI_SSVID_HP
) && (vpd
->port_index
> 0)) {
674 * Now lets update hba->model_info with the real
679 * Replace the default model description with vpd data
681 if (vpd
->model_desc
[0] != 0) {
682 (void) strncpy(hba
->model_info
.model_desc
,
684 (sizeof (hba
->model_info
.model_desc
)-1));
687 /* Replace the default model with vpd data */
688 if (vpd
->model
[0] != 0) {
689 (void) strncpy(hba
->model_info
.model
, vpd
->model
,
690 (sizeof (hba
->model_info
.model
)-1));
693 /* Replace the default program types with vpd data */
694 if (vpd
->prog_types
[0] != 0) {
695 emlxs_parse_prog_types(hba
, vpd
->prog_types
);
700 * Since the adapter model may have changed with the vpd data
701 * lets double check if adapter is not supported
703 if (hba
->model_info
.flags
& EMLXS_NOT_SUPPORTED
) {
704 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
705 "Unsupported adapter found. "
706 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
707 hba
->model_info
.id
, hba
->model_info
.device_id
,
708 hba
->model_info
.ssdid
, hba
->model_info
.model
);
714 (void) strncpy(vpd
->boot_version
, vpd
->sli4FwName
,
715 (sizeof (vpd
->boot_version
)-1));
717 /* Get fcode version property */
718 emlxs_get_fcode_version(hba
);
720 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
721 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd
->postKernRev
,
722 vpd
->opFwRev
, vpd
->sli1FwRev
);
724 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
725 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd
->sli2FwRev
,
726 vpd
->sli3FwRev
, vpd
->sli4FwRev
, vpd
->feaLevelHigh
);
728 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
729 "BIOS: boot=%s fcode=%s", vpd
->boot_version
, vpd
->fcode_version
);
732 * If firmware checking is enabled and the adapter model indicates
733 * a firmware image, then perform firmware version check
738 if (((fw_check
& 0x1) &&
739 (hba
->model_info
.flags
& EMLXS_ORACLE_BRANDED
) &&
740 hba
->model_info
.fwid
) ||
741 ((fw_check
& 0x2) && hba
->model_info
.fwid
)) {
743 /* Find firmware image indicated by adapter model */
745 for (i
= 0; i
< emlxs_fw_count
; i
++) {
746 if (emlxs_fw_table
[i
].id
== hba
->model_info
.fwid
) {
747 fw
= &emlxs_fw_table
[i
];
753 * If the image was found, then verify current firmware
754 * versions of adapter
757 /* Obtain current firmware version info */
758 if (hba
->model_info
.chip
& EMLXS_BE_CHIPS
) {
759 (void) emlxs_be_read_fw_version(hba
, &hba_fw
);
761 hba_fw
.kern
= vpd
->postKernRev
;
762 hba_fw
.stub
= vpd
->opFwRev
;
763 hba_fw
.sli1
= vpd
->sli1FwRev
;
764 hba_fw
.sli2
= vpd
->sli2FwRev
;
765 hba_fw
.sli3
= vpd
->sli3FwRev
;
766 hba_fw
.sli4
= vpd
->sli4FwRev
;
770 ((fw
->kern
&& (hba_fw
.kern
!= fw
->kern
)) ||
771 (fw
->stub
&& (hba_fw
.stub
!= fw
->stub
)))) {
773 hba
->fw_flag
|= FW_UPDATE_NEEDED
;
775 } else if ((fw
->kern
&& (hba_fw
.kern
!= fw
->kern
)) ||
776 (fw
->stub
&& (hba_fw
.stub
!= fw
->stub
)) ||
777 (fw
->sli1
&& (hba_fw
.sli1
!= fw
->sli1
)) ||
778 (fw
->sli2
&& (hba_fw
.sli2
!= fw
->sli2
)) ||
779 (fw
->sli3
&& (hba_fw
.sli3
!= fw
->sli3
)) ||
780 (fw
->sli4
&& (hba_fw
.sli4
!= fw
->sli4
))) {
782 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_msg
,
783 "Firmware update needed. "
784 "Updating. id=%d fw=%d",
785 hba
->model_info
.id
, hba
->model_info
.fwid
);
789 * Load the firmware image now
790 * If MODFW_SUPPORT is not defined, the
791 * firmware image will already be defined
792 * in the emlxs_fw_table
794 emlxs_fw_load(hba
, fw
);
795 #endif /* MODFW_SUPPORT */
797 if (fw
->image
&& fw
->size
) {
800 rc
= emlxs_fw_download(hba
,
801 (char *)fw
->image
, fw
->size
, 0);
802 if ((rc
!= FC_SUCCESS
) &&
803 (rc
!= EMLXS_REBOOT_REQUIRED
)) {
804 EMLXS_MSGF(EMLXS_CONTEXT
,
806 "Firmware update failed.");
812 * Unload the firmware image from
815 emlxs_fw_unload(hba
, fw
);
816 #endif /* MODFW_SUPPORT */
823 hba
->fw_flag
|= FW_UPDATE_NEEDED
;
825 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_msg
,
826 "Firmware image unavailable.");
828 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_msg
,
829 "Firmware update not needed.");
833 * This means either the adapter database is not
834 * correct or a firmware image is missing from the
837 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_msg
,
838 "Firmware image unavailable. id=%d fw=%d",
839 hba
->model_info
.id
, hba
->model_info
.fwid
);
843 /* Reuse mbq from previous mbox */
844 bzero(mbq
, sizeof (MAILBOXQ
));
846 emlxs_mb_dump_fcoe(hba
, mbq
, 0);
848 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
850 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
851 "No FCOE info found. status=%x", mb
->mbxStatus
);
853 EMLXS_MSGF(EMLXS_CONTEXT
,
854 &emlxs_init_debug_msg
,
855 "FCOE info dumped. rsp_cnt=%d status=%x",
856 mb
->un
.varDmp4
.rsp_cnt
, mb
->mbxStatus
);
857 (void) emlxs_parse_fcoe(hba
,
858 (uint8_t *)hba
->sli
.sli4
.dump_region
.virt
,
859 mb
->un
.varDmp4
.rsp_cnt
);
862 /* Reuse mbq from previous mbox */
863 bzero(mbq
, sizeof (MAILBOXQ
));
866 if (port
->flag
& EMLXS_INI_ENABLED
) {
867 status
|= SLI4_FEATURE_FCP_INITIATOR
;
869 if (port
->flag
& EMLXS_TGT_ENABLED
) {
870 status
|= SLI4_FEATURE_FCP_TARGET
;
872 if (cfg
[CFG_NPIV_ENABLE
].current
) {
873 status
|= SLI4_FEATURE_NPIV
;
875 if (cfg
[CFG_RQD_MODE
].current
) {
876 status
|= SLI4_FEATURE_RQD
;
878 if (cfg
[CFG_PERF_HINT
].current
) {
879 if (hba
->sli
.sli4
.param
.PHON
) {
880 status
|= SLI4_FEATURE_PERF_HINT
;
884 emlxs_mb_request_features(hba
, mbq
, status
);
886 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
887 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
888 "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
889 mb
->mbxCommand
, mb
->mbxStatus
);
894 emlxs_data_dump(port
, "REQ_FEATURE", (uint32_t *)mb
, 6, 0);
896 /* Check to see if we get the features we requested */
897 if (status
!= mb
->un
.varReqFeatures
.featuresEnabled
) {
899 /* Just report descrepencies, don't abort the attach */
901 outptr
= (uint8_t *)emlxs_request_feature_xlate(
902 mb
->un
.varReqFeatures
.featuresRequested
);
903 (void) strlcpy(buf
, (char *)outptr
, sizeof (buf
));
905 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
906 "REQUEST_FEATURES: wanted:%s got:%s",
907 &buf
[0], emlxs_request_feature_xlate(
908 mb
->un
.varReqFeatures
.featuresEnabled
));
912 if ((port
->flag
& EMLXS_INI_ENABLED
) &&
913 !(mb
->un
.varReqFeatures
.featuresEnabled
&
914 SLI4_FEATURE_FCP_INITIATOR
)) {
915 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
916 "Initiator mode not supported by adapter.");
921 /* Check if we can fall back to just target mode */
922 if ((hba
->pm_state
== EMLXS_PM_IN_ATTACH
) &&
923 (mb
->un
.varReqFeatures
.featuresEnabled
&
924 SLI4_FEATURE_FCP_TARGET
) &&
925 (cfg
[CFG_DTM_ENABLE
].current
== 1) &&
926 (cfg
[CFG_TARGET_MODE
].current
== 1)) {
928 cfg
[CFG_DTM_ENABLE
].current
= 0;
930 EMLXS_MSGF(EMLXS_CONTEXT
,
931 &emlxs_init_failed_msg
,
932 "Disabling dynamic target mode. "
933 "Enabling target mode only.");
935 /* This will trigger the driver to reattach */
938 #endif /* SFCT_SUPPORT */
942 if ((port
->flag
& EMLXS_TGT_ENABLED
) &&
943 !(mb
->un
.varReqFeatures
.featuresEnabled
&
944 SLI4_FEATURE_FCP_TARGET
)) {
945 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
946 "Target mode not supported by adapter.");
951 /* Check if we can fall back to just initiator mode */
952 if ((hba
->pm_state
== EMLXS_PM_IN_ATTACH
) &&
953 (mb
->un
.varReqFeatures
.featuresEnabled
&
954 SLI4_FEATURE_FCP_INITIATOR
) &&
955 (cfg
[CFG_DTM_ENABLE
].current
== 1) &&
956 (cfg
[CFG_TARGET_MODE
].current
== 0)) {
958 cfg
[CFG_DTM_ENABLE
].current
= 0;
960 EMLXS_MSGF(EMLXS_CONTEXT
,
961 &emlxs_init_failed_msg
,
962 "Disabling dynamic target mode. "
963 "Enabling initiator mode only.");
965 /* This will trigger the driver to reattach */
968 #endif /* SFCT_SUPPORT */
972 if (mb
->un
.varReqFeatures
.featuresEnabled
& SLI4_FEATURE_NPIV
) {
973 hba
->flag
|= FC_NPIV_ENABLED
;
976 if (mb
->un
.varReqFeatures
.featuresEnabled
& SLI4_FEATURE_PERF_HINT
) {
977 hba
->sli
.sli4
.flag
|= EMLXS_SLI4_PHON
;
978 if (hba
->sli
.sli4
.param
.PHWQ
) {
979 hba
->sli
.sli4
.flag
|= EMLXS_SLI4_PHWQ
;
983 /* Reuse mbq from previous mbox */
984 bzero(mbq
, sizeof (MAILBOXQ
));
986 emlxs_mb_read_config(hba
, mbq
);
987 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
988 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
989 "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
990 mb
->mbxCommand
, mb
->mbxStatus
);
995 emlxs_data_dump(port
, "READ_CONFIG4", (uint32_t *)mb
, 18, 0);
997 /* Set default extents */
998 hba
->sli
.sli4
.XRICount
= mb
->un
.varRdConfig4
.XRICount
;
999 hba
->sli
.sli4
.XRIExtCount
= 1;
1000 hba
->sli
.sli4
.XRIExtSize
= hba
->sli
.sli4
.XRICount
;
1001 hba
->sli
.sli4
.XRIBase
[0] = mb
->un
.varRdConfig4
.XRIBase
;
1003 hba
->sli
.sli4
.RPICount
= mb
->un
.varRdConfig4
.RPICount
;
1004 hba
->sli
.sli4
.RPIExtCount
= 1;
1005 hba
->sli
.sli4
.RPIExtSize
= hba
->sli
.sli4
.RPICount
;
1006 hba
->sli
.sli4
.RPIBase
[0] = mb
->un
.varRdConfig4
.RPIBase
;
1008 hba
->sli
.sli4
.VPICount
= mb
->un
.varRdConfig4
.VPICount
;
1009 hba
->sli
.sli4
.VPIExtCount
= 1;
1010 hba
->sli
.sli4
.VPIExtSize
= hba
->sli
.sli4
.VPICount
;
1011 hba
->sli
.sli4
.VPIBase
[0] = mb
->un
.varRdConfig4
.VPIBase
;
1013 hba
->sli
.sli4
.VFICount
= mb
->un
.varRdConfig4
.VFICount
;
1014 hba
->sli
.sli4
.VFIExtCount
= 1;
1015 hba
->sli
.sli4
.VFIExtSize
= hba
->sli
.sli4
.VFICount
;
1016 hba
->sli
.sli4
.VFIBase
[0] = mb
->un
.varRdConfig4
.VFIBase
;
1018 hba
->sli
.sli4
.FCFICount
= mb
->un
.varRdConfig4
.FCFICount
;
1020 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
1021 "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1022 hba
->sli
.sli4
.XRICount
,
1023 hba
->sli
.sli4
.RPICount
,
1024 hba
->sli
.sli4
.VPICount
,
1025 hba
->sli
.sli4
.VFICount
,
1026 hba
->sli
.sli4
.FCFICount
);
1028 if ((hba
->sli
.sli4
.XRICount
== 0) ||
1029 (hba
->sli
.sli4
.RPICount
== 0) ||
1030 (hba
->sli
.sli4
.VPICount
== 0) ||
1031 (hba
->sli
.sli4
.VFICount
== 0) ||
1032 (hba
->sli
.sli4
.FCFICount
== 0)) {
1033 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1034 "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1036 hba
->sli
.sli4
.XRICount
,
1037 hba
->sli
.sli4
.RPICount
,
1038 hba
->sli
.sli4
.VPICount
,
1039 hba
->sli
.sli4
.VFICount
,
1040 hba
->sli
.sli4
.FCFICount
);
1046 if (mb
->un
.varRdConfig4
.extents
) {
1047 if (emlxs_sli4_init_extents(hba
, mbq
)) {
1048 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1049 "Unable to initialize extents.");
1056 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
1057 "CONFIG: port_name:%c %c %c %c",
1058 hba
->sli
.sli4
.port_name
[0],
1059 hba
->sli
.sli4
.port_name
[1],
1060 hba
->sli
.sli4
.port_name
[2],
1061 hba
->sli
.sli4
.port_name
[3]);
1063 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
1064 "CONFIG: ldv:%d link_type:%d link_number:%d",
1065 mb
->un
.varRdConfig4
.ldv
,
1066 mb
->un
.varRdConfig4
.link_type
,
1067 mb
->un
.varRdConfig4
.link_number
);
1069 if (mb
->un
.varRdConfig4
.ldv
) {
1070 hba
->sli
.sli4
.link_number
= mb
->un
.varRdConfig4
.link_number
;
1072 hba
->sli
.sli4
.link_number
= (uint32_t)-1;
1075 if (hba
->sli
.sli4
.VPICount
) {
1076 hba
->vpi_max
= min(hba
->sli
.sli4
.VPICount
, MAX_VPORTS
) - 1;
1079 /* Set the max node count */
1080 if (cfg
[CFG_NUM_NODES
].current
> 0) {
1082 min(cfg
[CFG_NUM_NODES
].current
,
1083 hba
->sli
.sli4
.RPICount
);
1085 hba
->max_nodes
= hba
->sli
.sli4
.RPICount
;
1088 /* Set the io throttle */
1089 hba
->io_throttle
= hba
->sli
.sli4
.XRICount
- IO_THROTTLE_RESERVE
;
1092 /* We add 1 in case all XRI's are non-zero */
1093 hba
->max_iotag
= hba
->sli
.sli4
.XRICount
+ 1;
1095 if (cfg
[CFG_NUM_IOTAGS
].current
) {
1096 hba
->max_iotag
= min(hba
->max_iotag
,
1097 (uint16_t)cfg
[CFG_NUM_IOTAGS
].current
);
1100 /* Set out-of-range iotag base */
1101 hba
->fc_oor_iotag
= hba
->max_iotag
;
1103 /* Save the link speed capabilities */
1104 vpd
->link_speed
= (uint16_t)mb
->un
.varRdConfig4
.lmt
;
1105 emlxs_process_link_speed(hba
);
1108 * Allocate some memory for buffers
1110 if (emlxs_mem_alloc_buffer(hba
) == 0) {
1111 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1112 "Unable to allocate memory buffers.");
1118 if (emlxs_sli4_resource_alloc(hba
)) {
1119 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1120 "Unable to allocate resources.");
1125 emlxs_data_dump(port
, "XRIp", (uint32_t *)hba
->sli
.sli4
.XRIp
, 18, 0);
1126 emlxs_sli4_zero_queue_stat(hba
);
1128 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1129 if ((cfg
[CFG_NPIV_ENABLE
].current
) && (hba
->flag
& FC_NPIV_ENABLED
)) {
1130 hba
->fca_tran
->fca_num_npivports
= hba
->vpi_max
;
1132 #endif /* >= EMLXS_MODREV5 */
1134 /* Reuse mbq from previous mbox */
1135 bzero(mbq
, sizeof (MAILBOXQ
));
1137 if (emlxs_sli4_post_sgl_pages(hba
, mbq
)) {
1138 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1139 "Unable to post sgl pages.");
1145 /* Reuse mbq from previous mbox */
1146 bzero(mbq
, sizeof (MAILBOXQ
));
1148 if (emlxs_sli4_post_hdr_tmplates(hba
, mbq
)) {
1149 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1150 "Unable to post header templates.");
1157 * Add our interrupt routine to kernel's interrupt chain & enable it
1158 * If MSI is enabled this will cause Solaris to program the MSI address
1159 * and data registers in PCI config space
1161 if (EMLXS_INTR_ADD(hba
) != DDI_SUCCESS
) {
1162 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1163 "Unable to add interrupt(s).");
1169 /* Reuse mbq from previous mbox */
1170 bzero(mbq
, sizeof (MAILBOXQ
));
1172 /* This MUST be done after EMLXS_INTR_ADD */
1173 if (emlxs_sli4_create_queues(hba
, mbq
)) {
1174 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1175 "Unable to create queues.");
1181 EMLXS_STATE_CHANGE(hba
, FC_INIT_CFGPORT
);
1183 /* Get and save the current firmware version (based on sli_mode) */
1184 emlxs_decode_firmware_rev(hba
, vpd
);
1187 EMLXS_STATE_CHANGE(hba
, FC_INIT_INITLINK
);
1190 /* Reuse mbq from previous mbox */
1191 bzero(mbq
, sizeof (MAILBOXQ
));
1193 emlxs_mb_config_link(hba
, mbq
);
1194 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
1196 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1197 "Unable to configure link. Mailbox cmd=%x "
1199 mb
->mbxCommand
, mb
->mbxStatus
);
1206 /* Reuse mbq from previous mbox */
1207 bzero(mbq
, sizeof (MAILBOXQ
));
1210 * We need to get login parameters for NID
1212 (void) emlxs_mb_read_sparam(hba
, mbq
);
1213 mp
= (MATCHMAP
*)mbq
->bp
;
1214 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
1215 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1216 "Unable to read parameters. Mailbox cmd=%x status=%x",
1217 mb
->mbxCommand
, mb
->mbxStatus
);
1223 /* Free the buffer since we were polling */
1224 emlxs_mem_put(hba
, MEM_BUF
, (void *)mp
);
1227 /* If no serial number in VPD data, then use the WWPN */
1228 if (vpd
->serial_num
[0] == 0) {
1229 outptr
= (uint8_t *)&hba
->wwpn
.IEEE
[0];
1230 for (i
= 0; i
< 12; i
++) {
1232 j
= ((status
& 0xf0) >> 4);
1234 vpd
->serial_num
[i
] =
1235 (char)((uint8_t)'0' + (uint8_t)j
);
1237 vpd
->serial_num
[i
] =
1238 (char)((uint8_t)'A' + (uint8_t)(j
- 10));
1244 vpd
->serial_num
[i
] =
1245 (char)((uint8_t)'0' + (uint8_t)j
);
1247 vpd
->serial_num
[i
] =
1248 (char)((uint8_t)'A' + (uint8_t)(j
- 10));
1253 * Set port number and port index to zero
1254 * The WWN's are unique to each port and therefore port_num
1255 * must equal zero. This effects the hba_fru_details structure
1256 * in fca_bind_port()
1258 vpd
->port_num
[0] = 0;
1259 vpd
->port_index
= 0;
1261 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
1262 "CONFIG: WWPN: port_index=0");
1265 /* Make final attempt to set a port index */
1266 if (vpd
->port_index
== (uint32_t)-1) {
1270 p_dip
= ddi_get_parent(hba
->dip
);
1271 c_dip
= ddi_get_child(p_dip
);
1273 vpd
->port_index
= 0;
1274 while (c_dip
&& (hba
->dip
!= c_dip
)) {
1275 c_dip
= ddi_get_next_sibling(c_dip
);
1277 if (strcmp(ddi_get_name(c_dip
), "ethernet") == 0) {
1284 EMLXS_MSGF(EMLXS_CONTEXT
,
1285 &emlxs_init_debug_msg
,
1286 "CONFIG: Device tree: port_index=%d",
1290 if (vpd
->port_num
[0] == 0) {
1291 if (hba
->model_info
.channels
== EMLXS_MULTI_CHANNEL
) {
1292 (void) snprintf(vpd
->port_num
,
1293 (sizeof (vpd
->port_num
)-1),
1294 "%d", vpd
->port_index
);
1298 if (vpd
->id
[0] == 0) {
1299 (void) snprintf(vpd
->id
, (sizeof (vpd
->id
)-1),
1301 hba
->model_info
.model_desc
, vpd
->port_index
);
1305 if (vpd
->manufacturer
[0] == 0) {
1306 (void) strncpy(vpd
->manufacturer
, hba
->model_info
.manufacturer
,
1307 (sizeof (vpd
->manufacturer
)-1));
1310 if (vpd
->part_num
[0] == 0) {
1311 (void) strncpy(vpd
->part_num
, hba
->model_info
.model
,
1312 (sizeof (vpd
->part_num
)-1));
1315 if (vpd
->model_desc
[0] == 0) {
1316 (void) snprintf(vpd
->model_desc
, (sizeof (vpd
->model_desc
)-1),
1318 hba
->model_info
.model_desc
, vpd
->port_index
);
1321 if (vpd
->model
[0] == 0) {
1322 (void) strncpy(vpd
->model
, hba
->model_info
.model
,
1323 (sizeof (vpd
->model
)-1));
1326 if (vpd
->prog_types
[0] == 0) {
1327 emlxs_build_prog_types(hba
, vpd
);
1330 /* Create the symbolic names */
1331 (void) snprintf(hba
->snn
, (sizeof (hba
->snn
)-1),
1332 "Emulex %s FV%s DV%s %s",
1333 hba
->model_info
.model
, hba
->vpd
.fw_version
, emlxs_version
,
1334 (char *)utsname
.nodename
);
1336 (void) snprintf(hba
->spn
, (sizeof (hba
->spn
)-1),
1337 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1338 hba
->wwpn
.nameType
, hba
->wwpn
.IEEEextMsn
, hba
->wwpn
.IEEEextLsb
,
1339 hba
->wwpn
.IEEE
[0], hba
->wwpn
.IEEE
[1], hba
->wwpn
.IEEE
[2],
1340 hba
->wwpn
.IEEE
[3], hba
->wwpn
.IEEE
[4], hba
->wwpn
.IEEE
[5]);
1343 EMLXS_STATE_CHANGE(hba
, FC_LINK_DOWN
);
1344 emlxs_sli4_enable_intr(hba
);
1346 /* Check persist-linkdown */
1347 if (cfg
[CFG_PERSIST_LINKDOWN
].current
) {
1348 EMLXS_STATE_CHANGE(hba
, FC_LINK_DOWN_PERSIST
);
1353 if ((port
->mode
== MODE_TARGET
) &&
1354 !(port
->fct_flags
& FCT_STATE_PORT_ONLINE
)) {
1357 #endif /* SFCT_SUPPORT */
1359 /* Reuse mbq from previous mbox */
1360 bzero(mbq
, sizeof (MAILBOXQ
));
1363 * Setup and issue mailbox INITIALIZE LINK command
1364 * At this point, the interrupt will be generated by the HW
1366 emlxs_mb_init_link(hba
, mbq
,
1367 cfg
[CFG_TOPOLOGY
].current
, cfg
[CFG_LINK_SPEED
].current
);
1369 rval
= emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_NOWAIT
, 0);
1370 if ((rval
!= MBX_SUCCESS
) && (rval
!= MBX_BUSY
)) {
1371 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
1372 "Unable to initialize link. "
1373 "Mailbox cmd=%x status=%x",
1374 mb
->mbxCommand
, mb
->mbxStatus
);
1380 /* Wait for link to come up */
1381 i
= cfg
[CFG_LINKUP_DELAY
].current
;
1382 while (i
&& (hba
->state
< FC_LINK_UP
)) {
1383 /* Check for hardware error */
1384 if (hba
->state
== FC_ERROR
) {
1385 EMLXS_MSGF(EMLXS_CONTEXT
,
1386 &emlxs_init_failed_msg
,
1387 "Adapter error.", mb
->mbxCommand
,
1400 * The leadville driver will now handle the FLOGI at the driver level
1404 (void) kmem_free((uint8_t *)mbq
, sizeof (MAILBOXQ
));
1411 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
1414 emlxs_mem_put(hba
, MEM_BUF
, (void *)mp
);
1419 if (hba
->intr_flags
& EMLXS_MSI_ADDED
) {
1420 (void) EMLXS_INTR_REMOVE(hba
);
1423 emlxs_sli4_resource_free(hba
);
1426 (void) emlxs_mem_free_buffer(hba
);
1430 (void) kmem_free((uint8_t *)mbq
, sizeof (MAILBOXQ
));
1435 if (hba
->sli
.sli4
.dump_region
.virt
) {
1436 (void) emlxs_mem_free(hba
, &hba
->sli
.sli4
.dump_region
);
1445 } /* emlxs_sli4_online() */
1449 emlxs_sli4_offline(emlxs_hba_t
*hba
, uint32_t reset_requested
)
1451 /* Reverse emlxs_sli4_online */
1453 mutex_enter(&EMLXS_PORT_LOCK
);
1454 if (hba
->flag
& FC_INTERLOCKED
) {
1455 mutex_exit(&EMLXS_PORT_LOCK
);
1458 mutex_exit(&EMLXS_PORT_LOCK
);
1460 if (reset_requested
) {
1461 (void) emlxs_sli4_hba_reset(hba
, 0, 0, 0);
1464 /* Shutdown the adapter interface */
1465 emlxs_sli4_hba_kill(hba
);
1469 /* Free SLI shared memory */
1470 emlxs_sli4_resource_free(hba
);
1472 /* Free driver shared memory */
1473 (void) emlxs_mem_free_buffer(hba
);
1475 /* Free the host dump region buffer */
1476 (void) emlxs_mem_free(hba
, &hba
->sli
.sli4
.dump_region
);
1478 } /* emlxs_sli4_offline() */
1483 emlxs_sli4_map_hdw(emlxs_hba_t
*hba
)
1485 emlxs_port_t
*port
= &PPORT
;
1487 ddi_device_acc_attr_t dev_attr
;
1490 dip
= (dev_info_t
*)hba
->dip
;
1491 dev_attr
= emlxs_dev_acc_attr
;
1493 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1494 case SLI_INTF_IF_TYPE_0
:
1496 /* Map in Hardware BAR pages that will be used for */
1497 /* communication with HBA. */
1498 if (hba
->sli
.sli4
.bar1_acc_handle
== 0) {
1499 status
= ddi_regs_map_setup(dip
, PCI_BAR1_RINDEX
,
1500 (caddr_t
*)&hba
->sli
.sli4
.bar1_addr
,
1501 0, 0, &dev_attr
, &hba
->sli
.sli4
.bar1_acc_handle
);
1502 if (status
!= DDI_SUCCESS
) {
1503 EMLXS_MSGF(EMLXS_CONTEXT
,
1504 &emlxs_attach_failed_msg
,
1505 "(PCI) ddi_regs_map_setup BAR1 failed. "
1506 "stat=%d mem=%p attr=%p hdl=%p",
1507 status
, &hba
->sli
.sli4
.bar1_addr
, &dev_attr
,
1508 &hba
->sli
.sli4
.bar1_acc_handle
);
1513 if (hba
->sli
.sli4
.bar2_acc_handle
== 0) {
1514 status
= ddi_regs_map_setup(dip
, PCI_BAR2_RINDEX
,
1515 (caddr_t
*)&hba
->sli
.sli4
.bar2_addr
,
1516 0, 0, &dev_attr
, &hba
->sli
.sli4
.bar2_acc_handle
);
1517 if (status
!= DDI_SUCCESS
) {
1518 EMLXS_MSGF(EMLXS_CONTEXT
,
1519 &emlxs_attach_failed_msg
,
1520 "ddi_regs_map_setup BAR2 failed. status=%x",
1526 /* offset from beginning of register space */
1527 hba
->sli
.sli4
.MPUEPSemaphore_reg_addr
=
1528 (uint32_t *)(hba
->sli
.sli4
.bar1_addr
+
1529 CSR_MPU_EP_SEMAPHORE_OFFSET
);
1530 hba
->sli
.sli4
.MBDB_reg_addr
=
1531 (uint32_t *)(hba
->sli
.sli4
.bar2_addr
+ PD_MB_DB_OFFSET
);
1532 hba
->sli
.sli4
.CQDB_reg_addr
=
1533 (uint32_t *)(hba
->sli
.sli4
.bar2_addr
+ PD_CQ_DB_OFFSET
);
1534 hba
->sli
.sli4
.MQDB_reg_addr
=
1535 (uint32_t *)(hba
->sli
.sli4
.bar2_addr
+ PD_MQ_DB_OFFSET
);
1536 hba
->sli
.sli4
.WQDB_reg_addr
=
1537 (uint32_t *)(hba
->sli
.sli4
.bar2_addr
+ PD_WQ_DB_OFFSET
);
1538 hba
->sli
.sli4
.RQDB_reg_addr
=
1539 (uint32_t *)(hba
->sli
.sli4
.bar2_addr
+ PD_RQ_DB_OFFSET
);
1541 hba
->sli
.sli4
.STATUS_reg_addr
= 0;
1542 hba
->sli
.sli4
.CNTL_reg_addr
= 0;
1544 hba
->sli
.sli4
.ERR1_reg_addr
=
1545 (uint32_t *)(hba
->pci_addr
+ PCICFG_UE_STATUS_LO_OFFSET
);
1546 hba
->sli
.sli4
.ERR2_reg_addr
=
1547 (uint32_t *)(hba
->pci_addr
+ PCICFG_UE_STATUS_HI_OFFSET
);
1549 hba
->sli
.sli4
.PHYSDEV_reg_addr
= 0;
1552 case SLI_INTF_IF_TYPE_2
:
1554 /* Map in Hardware BAR pages that will be used for */
1555 /* communication with HBA. */
1556 if (hba
->sli
.sli4
.bar0_acc_handle
== 0) {
1557 status
= ddi_regs_map_setup(dip
, PCI_BAR0_RINDEX
,
1558 (caddr_t
*)&hba
->sli
.sli4
.bar0_addr
,
1559 0, 0, &dev_attr
, &hba
->sli
.sli4
.bar0_acc_handle
);
1560 if (status
!= DDI_SUCCESS
) {
1561 EMLXS_MSGF(EMLXS_CONTEXT
,
1562 &emlxs_attach_failed_msg
,
1563 "(PCI) ddi_regs_map_setup BAR0 failed. "
1564 "stat=%d mem=%p attr=%p hdl=%p",
1565 status
, &hba
->sli
.sli4
.bar0_addr
, &dev_attr
,
1566 &hba
->sli
.sli4
.bar0_acc_handle
);
1571 /* offset from beginning of register space */
1572 hba
->sli
.sli4
.MPUEPSemaphore_reg_addr
=
1573 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+
1574 SLIPORT_SEMAPHORE_OFFSET
);
1575 hba
->sli
.sli4
.MBDB_reg_addr
=
1576 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+ PD_MB_DB_OFFSET
);
1577 hba
->sli
.sli4
.CQDB_reg_addr
=
1578 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+ PD_CQ_DB_OFFSET
);
1579 hba
->sli
.sli4
.MQDB_reg_addr
=
1580 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+ PD_MQ_DB_OFFSET
);
1581 hba
->sli
.sli4
.WQDB_reg_addr
=
1582 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+ PD_WQ_DB_OFFSET
);
1583 hba
->sli
.sli4
.RQDB_reg_addr
=
1584 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+ PD_RQ_DB_OFFSET
);
1586 hba
->sli
.sli4
.STATUS_reg_addr
=
1587 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+
1588 SLIPORT_STATUS_OFFSET
);
1589 hba
->sli
.sli4
.CNTL_reg_addr
=
1590 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+
1591 SLIPORT_CONTROL_OFFSET
);
1592 hba
->sli
.sli4
.ERR1_reg_addr
=
1593 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+
1594 SLIPORT_ERROR1_OFFSET
);
1595 hba
->sli
.sli4
.ERR2_reg_addr
=
1596 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+
1597 SLIPORT_ERROR2_OFFSET
);
1598 hba
->sli
.sli4
.PHYSDEV_reg_addr
=
1599 (uint32_t *)(hba
->sli
.sli4
.bar0_addr
+
1600 PHYSDEV_CONTROL_OFFSET
);
1604 case SLI_INTF_IF_TYPE_1
:
1605 case SLI_INTF_IF_TYPE_3
:
1607 EMLXS_MSGF(EMLXS_CONTEXT
,
1608 &emlxs_attach_failed_msg
,
1609 "Map hdw: Unsupported if_type %08x",
1610 (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
));
1615 if (hba
->sli
.sli4
.bootstrapmb
.virt
== 0) {
1616 MBUF_INFO
*buf_info
;
1619 buf_info
= &bufinfo
;
1621 bzero(buf_info
, sizeof (MBUF_INFO
));
1622 buf_info
->size
= EMLXS_BOOTSTRAP_MB_SIZE
+ MBOX_EXTENSION_SIZE
;
1624 FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
1625 buf_info
->align
= ddi_ptob(dip
, 1L);
1627 (void) emlxs_mem_alloc(hba
, buf_info
);
1629 if (buf_info
->virt
== NULL
) {
1633 hba
->sli
.sli4
.bootstrapmb
.virt
= buf_info
->virt
;
1634 hba
->sli
.sli4
.bootstrapmb
.phys
= buf_info
->phys
;
1635 hba
->sli
.sli4
.bootstrapmb
.size
= EMLXS_BOOTSTRAP_MB_SIZE
+
1636 MBOX_EXTENSION_SIZE
;
1637 hba
->sli
.sli4
.bootstrapmb
.data_handle
= buf_info
->data_handle
;
1638 hba
->sli
.sli4
.bootstrapmb
.dma_handle
= buf_info
->dma_handle
;
1639 bzero((char *)hba
->sli
.sli4
.bootstrapmb
.virt
,
1640 EMLXS_BOOTSTRAP_MB_SIZE
);
1643 hba
->chan_count
= MAX_CHANNEL
;
1649 emlxs_sli4_unmap_hdw(hba
);
1653 } /* emlxs_sli4_map_hdw() */
1658 emlxs_sli4_unmap_hdw(emlxs_hba_t
*hba
)
1661 MBUF_INFO
*buf_info
= &bufinfo
;
1664 if (hba
->sli
.sli4
.bar0_acc_handle
) {
1665 ddi_regs_map_free(&hba
->sli
.sli4
.bar0_acc_handle
);
1666 hba
->sli
.sli4
.bar0_acc_handle
= 0;
1669 if (hba
->sli
.sli4
.bar1_acc_handle
) {
1670 ddi_regs_map_free(&hba
->sli
.sli4
.bar1_acc_handle
);
1671 hba
->sli
.sli4
.bar1_acc_handle
= 0;
1674 if (hba
->sli
.sli4
.bar2_acc_handle
) {
1675 ddi_regs_map_free(&hba
->sli
.sli4
.bar2_acc_handle
);
1676 hba
->sli
.sli4
.bar2_acc_handle
= 0;
1679 if (hba
->sli
.sli4
.bootstrapmb
.virt
) {
1680 bzero(buf_info
, sizeof (MBUF_INFO
));
1682 if (hba
->sli
.sli4
.bootstrapmb
.phys
) {
1683 buf_info
->phys
= hba
->sli
.sli4
.bootstrapmb
.phys
;
1684 buf_info
->data_handle
=
1685 hba
->sli
.sli4
.bootstrapmb
.data_handle
;
1686 buf_info
->dma_handle
=
1687 hba
->sli
.sli4
.bootstrapmb
.dma_handle
;
1688 buf_info
->flags
= FC_MBUF_DMA
;
1691 buf_info
->virt
= hba
->sli
.sli4
.bootstrapmb
.virt
;
1692 buf_info
->size
= hba
->sli
.sli4
.bootstrapmb
.size
;
1693 emlxs_mem_free(hba
, buf_info
);
1695 hba
->sli
.sli4
.bootstrapmb
.virt
= NULL
;
1700 } /* emlxs_sli4_unmap_hdw() */
1704 emlxs_check_hdw_ready(emlxs_hba_t
*hba
)
1706 emlxs_port_t
*port
= &PPORT
;
1712 /* Wait for reset completion */
1715 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1716 case SLI_INTF_IF_TYPE_0
:
1717 status
= emlxs_sli4_read_sema(hba
);
1719 /* Check to see if any errors occurred during init */
1720 if (status
& ARM_POST_FATAL
) {
1721 EMLXS_MSGF(EMLXS_CONTEXT
,
1722 &emlxs_reset_failed_msg
,
1723 "SEMA Error: status=%x", status
);
1725 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
1730 if ((status
& ARM_UNRECOVERABLE_ERROR
) ==
1731 ARM_UNRECOVERABLE_ERROR
) {
1732 EMLXS_MSGF(EMLXS_CONTEXT
,
1733 &emlxs_reset_failed_msg
,
1734 "Unrecoverable Error: status=%x", status
);
1736 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
1741 if ((status
& ARM_POST_MASK
) == ARM_POST_READY
) {
1743 EMLXS_MSGF(EMLXS_CONTEXT
,
1744 &emlxs_sli_detail_msg
,
1745 "ARM Ready: status=%x", status
);
1751 case SLI_INTF_IF_TYPE_2
:
1752 status
= emlxs_sli4_read_status(hba
);
1754 if (status
& SLI_STATUS_READY
) {
1755 if (!(status
& SLI_STATUS_ERROR
)) {
1757 EMLXS_MSGF(EMLXS_CONTEXT
,
1758 &emlxs_sli_detail_msg
,
1759 "ARM Ready: status=%x", status
);
1764 err1
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
1765 hba
->sli
.sli4
.ERR1_reg_addr
);
1766 err2
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
1767 hba
->sli
.sli4
.ERR2_reg_addr
);
1769 if (status
& SLI_STATUS_RESET_NEEDED
) {
1770 EMLXS_MSGF(EMLXS_CONTEXT
,
1771 &emlxs_sli_detail_msg
,
1772 "ARM Ready (Reset Needed): "
1773 "status=%x err1=%x "
1775 status
, err1
, err2
);
1780 EMLXS_MSGF(EMLXS_CONTEXT
,
1781 &emlxs_reset_failed_msg
,
1782 "Unrecoverable Error: status=%x err1=%x "
1784 status
, err1
, err2
);
1786 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
1794 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
1803 /* Timeout occurred */
1804 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1805 case SLI_INTF_IF_TYPE_0
:
1806 err1
= ddi_get32(hba
->pci_acc_handle
,
1807 hba
->sli
.sli4
.ERR1_reg_addr
);
1808 err2
= ddi_get32(hba
->pci_acc_handle
,
1809 hba
->sli
.sli4
.ERR2_reg_addr
);
1813 err1
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
1814 hba
->sli
.sli4
.ERR1_reg_addr
);
1815 err2
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
1816 hba
->sli
.sli4
.ERR2_reg_addr
);
1820 if (status
& SLI_STATUS_ERROR
) {
1821 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_reset_failed_msg
,
1822 "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
1823 status
, err1
, err2
);
1825 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_reset_failed_msg
,
1826 "Ready Timeout: status=%x err1=%x err2=%x",
1827 status
, err1
, err2
);
1830 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
1834 } /* emlxs_check_hdw_ready() */
1838 emlxs_sli4_read_status(emlxs_hba_t
*hba
)
1841 emlxs_port_t
*port
= &PPORT
;
1842 #endif /* FMA_SUPPORT */
1845 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1846 case SLI_INTF_IF_TYPE_2
:
1847 status
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
1848 hba
->sli
.sli4
.STATUS_reg_addr
);
1850 /* Access handle validation */
1851 EMLXS_CHK_ACC_HANDLE(hba
, hba
->sli
.sli4
.bar0_acc_handle
);
1852 #endif /* FMA_SUPPORT */
1861 } /* emlxs_sli4_read_status() */
1865 emlxs_sli4_read_sema(emlxs_hba_t
*hba
)
1868 emlxs_port_t
*port
= &PPORT
;
1869 #endif /* FMA_SUPPORT */
1872 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1873 case SLI_INTF_IF_TYPE_0
:
1874 status
= ddi_get32(hba
->sli
.sli4
.bar1_acc_handle
,
1875 hba
->sli
.sli4
.MPUEPSemaphore_reg_addr
);
1877 /* Access handle validation */
1878 EMLXS_CHK_ACC_HANDLE(hba
, hba
->sli
.sli4
.bar1_acc_handle
);
1879 #endif /* FMA_SUPPORT */
1882 case SLI_INTF_IF_TYPE_2
:
1883 status
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
1884 hba
->sli
.sli4
.MPUEPSemaphore_reg_addr
);
1886 /* Access handle validation */
1887 EMLXS_CHK_ACC_HANDLE(hba
, hba
->sli
.sli4
.bar0_acc_handle
);
1888 #endif /* FMA_SUPPORT */
1897 } /* emlxs_sli4_read_sema() */
1901 emlxs_sli4_read_mbdb(emlxs_hba_t
*hba
)
1904 emlxs_port_t
*port
= &PPORT
;
1905 #endif /* FMA_SUPPORT */
1908 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1909 case SLI_INTF_IF_TYPE_0
:
1910 status
= ddi_get32(hba
->sli
.sli4
.bar2_acc_handle
,
1911 hba
->sli
.sli4
.MBDB_reg_addr
);
1914 /* Access handle validation */
1915 EMLXS_CHK_ACC_HANDLE(hba
, hba
->sli
.sli4
.bar2_acc_handle
);
1916 #endif /* FMA_SUPPORT */
1919 case SLI_INTF_IF_TYPE_2
:
1920 status
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
1921 hba
->sli
.sli4
.MBDB_reg_addr
);
1923 /* Access handle validation */
1924 EMLXS_CHK_ACC_HANDLE(hba
, hba
->sli
.sli4
.bar0_acc_handle
);
1925 #endif /* FMA_SUPPORT */
1934 } /* emlxs_sli4_read_mbdb() */
1938 emlxs_sli4_write_mbdb(emlxs_hba_t
*hba
, uint32_t value
)
1940 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1941 case SLI_INTF_IF_TYPE_0
:
1942 ddi_put32(hba
->sli
.sli4
.bar2_acc_handle
,
1943 hba
->sli
.sli4
.MBDB_reg_addr
, value
);
1946 case SLI_INTF_IF_TYPE_2
:
1947 ddi_put32(hba
->sli
.sli4
.bar0_acc_handle
,
1948 hba
->sli
.sli4
.MBDB_reg_addr
, value
);
1952 } /* emlxs_sli4_write_mbdb() */
1956 emlxs_sli4_write_cqdb(emlxs_hba_t
*hba
, uint32_t value
)
1958 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1959 case SLI_INTF_IF_TYPE_0
:
1960 ddi_put32(hba
->sli
.sli4
.bar2_acc_handle
,
1961 hba
->sli
.sli4
.CQDB_reg_addr
, value
);
1964 case SLI_INTF_IF_TYPE_2
:
1965 ddi_put32(hba
->sli
.sli4
.bar0_acc_handle
,
1966 hba
->sli
.sli4
.CQDB_reg_addr
, value
);
1970 } /* emlxs_sli4_write_cqdb() */
1974 emlxs_sli4_write_rqdb(emlxs_hba_t
*hba
, uint32_t value
)
1976 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1977 case SLI_INTF_IF_TYPE_0
:
1978 ddi_put32(hba
->sli
.sli4
.bar2_acc_handle
,
1979 hba
->sli
.sli4
.RQDB_reg_addr
, value
);
1982 case SLI_INTF_IF_TYPE_2
:
1983 ddi_put32(hba
->sli
.sli4
.bar0_acc_handle
,
1984 hba
->sli
.sli4
.RQDB_reg_addr
, value
);
1988 } /* emlxs_sli4_write_rqdb() */
1992 emlxs_sli4_write_mqdb(emlxs_hba_t
*hba
, uint32_t value
)
1994 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
1995 case SLI_INTF_IF_TYPE_0
:
1996 ddi_put32(hba
->sli
.sli4
.bar2_acc_handle
,
1997 hba
->sli
.sli4
.MQDB_reg_addr
, value
);
2000 case SLI_INTF_IF_TYPE_2
:
2001 ddi_put32(hba
->sli
.sli4
.bar0_acc_handle
,
2002 hba
->sli
.sli4
.MQDB_reg_addr
, value
);
2006 } /* emlxs_sli4_write_mqdb() */
2010 emlxs_sli4_write_wqdb(emlxs_hba_t
*hba
, uint32_t value
)
2012 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
2013 case SLI_INTF_IF_TYPE_0
:
2014 ddi_put32(hba
->sli
.sli4
.bar2_acc_handle
,
2015 hba
->sli
.sli4
.WQDB_reg_addr
, value
);
2018 case SLI_INTF_IF_TYPE_2
:
2019 ddi_put32(hba
->sli
.sli4
.bar0_acc_handle
,
2020 hba
->sli
.sli4
.WQDB_reg_addr
, value
);
2024 } /* emlxs_sli4_write_wqdb() */
2028 emlxs_check_bootstrap_ready(emlxs_hba_t
*hba
, uint32_t tmo
)
2030 emlxs_port_t
*port
= &PPORT
;
2031 uint32_t status
= 0;
2035 /* Wait for reset completion, tmo is in 10ms ticks */
2037 status
= emlxs_sli4_read_mbdb(hba
);
2039 /* Check to see if any errors occurred during init */
2040 if (status
& BMBX_READY
) {
2041 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
2042 "BMBX Ready: status=0x%x", status
);
2051 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
2052 case SLI_INTF_IF_TYPE_0
:
2053 err1
= ddi_get32(hba
->pci_acc_handle
,
2054 hba
->sli
.sli4
.ERR1_reg_addr
);
2055 err2
= ddi_get32(hba
->pci_acc_handle
,
2056 hba
->sli
.sli4
.ERR2_reg_addr
);
2060 err1
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
2061 hba
->sli
.sli4
.ERR1_reg_addr
);
2062 err2
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
2063 hba
->sli
.sli4
.ERR2_reg_addr
);
2067 /* Timeout occurred */
2068 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_reset_failed_msg
,
2069 "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2070 status
, err1
, err2
);
2072 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
2076 } /* emlxs_check_bootstrap_ready() */
2080 emlxs_issue_bootstrap_mb(emlxs_hba_t
*hba
, uint32_t tmo
)
2082 emlxs_port_t
*port
= &PPORT
;
2087 * This routine assumes the bootstrap mbox is loaded
2088 * with the mailbox command to be executed.
2090 * First, load the high 30 bits of bootstrap mailbox
2092 addr30
= (uint32_t)((hba
->sli
.sli4
.bootstrapmb
.phys
>>32) & 0xfffffffc);
2093 addr30
|= BMBX_ADDR_HI
;
2094 emlxs_sli4_write_mbdb(hba
, addr30
);
2096 tmo
= emlxs_check_bootstrap_ready(hba
, tmo
);
2101 /* Load the low 30 bits of bootstrap mailbox */
2102 addr30
= (uint32_t)((hba
->sli
.sli4
.bootstrapmb
.phys
>>2) & 0xfffffffc);
2103 emlxs_sli4_write_mbdb(hba
, addr30
);
2105 tmo
= emlxs_check_bootstrap_ready(hba
, tmo
);
2110 iptr
= (uint32_t *)hba
->sli
.sli4
.bootstrapmb
.virt
;
2112 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
2113 "BootstrapMB: %p Completed %08x %08x %08x",
2114 hba
->sli
.sli4
.bootstrapmb
.virt
,
2115 *iptr
, *(iptr
+1), *(iptr
+2));
2119 } /* emlxs_issue_bootstrap_mb() */
2123 emlxs_init_bootstrap_mb(emlxs_hba_t
*hba
)
2126 emlxs_port_t
*port
= &PPORT
;
2127 #endif /* FMA_SUPPORT */
2131 if (emlxs_check_hdw_ready(hba
)) {
2135 if (hba
->flag
& FC_BOOTSTRAPMB_INIT
) {
2136 return (0); /* Already initialized */
2139 /* NOTE: tmo is in 10ms ticks */
2140 tmo
= emlxs_check_bootstrap_ready(hba
, 3000);
2145 /* Issue FW_INITIALIZE command */
2147 /* Special words to initialize bootstrap mbox MUST be little endian */
2148 iptr
= (uint32_t *)hba
->sli
.sli4
.bootstrapmb
.virt
;
2149 *iptr
= LE_SWAP32(FW_INITIALIZE_WORD0
);
2150 *(iptr
+1) = LE_SWAP32(FW_INITIALIZE_WORD1
);
2152 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.bootstrapmb
.dma_handle
, 0,
2153 MAILBOX_CMD_BSIZE
, DDI_DMA_SYNC_FORDEV
);
2155 emlxs_data_dump(port
, "FW_INIT", (uint32_t *)iptr
, 6, 0);
2156 if (!emlxs_issue_bootstrap_mb(hba
, tmo
)) {
2161 if (emlxs_fm_check_dma_handle(hba
, hba
->sli
.sli4
.bootstrapmb
.dma_handle
)
2163 EMLXS_MSGF(EMLXS_CONTEXT
,
2164 &emlxs_invalid_dma_handle_msg
,
2165 "init_bootstrap_mb: hdl=%p",
2166 hba
->sli
.sli4
.bootstrapmb
.dma_handle
);
2170 hba
->flag
|= FC_BOOTSTRAPMB_INIT
;
2173 } /* emlxs_init_bootstrap_mb() */
2179 emlxs_sli4_hba_init(emlxs_hba_t
*hba
)
2183 emlxs_port_t
*vport
;
2184 emlxs_config_t
*cfg
= &CFG
;
2188 /* Restart the adapter */
2189 if (emlxs_sli4_hba_reset(hba
, 1, 0, 0)) {
2193 for (i
= 0; i
< hba
->chan_count
; i
++) {
2195 cp
->iopath
= (void *)&hba
->sli
.sli4
.wq
[i
];
2198 /* Initialize all the port objects */
2200 for (i
= 0; i
< MAX_VPORTS
; i
++) {
2205 vpip
= &vport
->VPIobj
;
2209 vpip
->state
= VPI_STATE_OFFLINE
;
2213 /* Set the max node count */
2214 if (hba
->max_nodes
== 0) {
2215 if (cfg
[CFG_NUM_NODES
].current
> 0) {
2216 hba
->max_nodes
= cfg
[CFG_NUM_NODES
].current
;
2218 hba
->max_nodes
= 4096;
2222 rc
= emlxs_init_bootstrap_mb(hba
);
2227 hba
->sli
.sli4
.cfgFCOE
.FCMap
[0] = FCOE_FCF_MAP0
;
2228 hba
->sli
.sli4
.cfgFCOE
.FCMap
[1] = FCOE_FCF_MAP1
;
2229 hba
->sli
.sli4
.cfgFCOE
.FCMap
[2] = FCOE_FCF_MAP2
;
2231 if ((hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) == SLI_INTF_IF_TYPE_0
) {
2232 /* Cache the UE MASK registers value for UE error detection */
2233 hba
->sli
.sli4
.ue_mask_lo
= ddi_get32(hba
->pci_acc_handle
,
2234 (uint32_t *)(hba
->pci_addr
+ PCICFG_UE_MASK_LO_OFFSET
));
2235 hba
->sli
.sli4
.ue_mask_hi
= ddi_get32(hba
->pci_acc_handle
,
2236 (uint32_t *)(hba
->pci_addr
+ PCICFG_UE_MASK_HI_OFFSET
));
2241 } /* emlxs_sli4_hba_init() */
2246 emlxs_sli4_hba_reset(emlxs_hba_t
*hba
, uint32_t restart
, uint32_t skip_post
,
2249 emlxs_port_t
*port
= &PPORT
;
2250 emlxs_port_t
*vport
;
2252 emlxs_config_t
*cfg
= &CFG
;
2261 uint8_t generate_event
= 0;
2263 if (!cfg
[CFG_RESET_ENABLE
].current
) {
2264 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_reset_failed_msg
,
2265 "Adapter reset disabled.");
2266 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
2271 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
2272 case SLI_INTF_IF_TYPE_0
:
2274 emlxs_sli4_hba_kill(hba
);
2277 * Initalize Hardware that will be used to bring
2280 rc
= emlxs_init_bootstrap_mb(hba
);
2286 bzero((void *)&mboxq
, sizeof (MAILBOXQ
));
2287 emlxs_mb_resetport(hba
, &mboxq
);
2290 if (emlxs_sli4_issue_mbox_cmd(hba
, &mboxq
,
2291 MBX_POLL
, 0) != MBX_SUCCESS
) {
2292 /* Timeout occurred */
2293 EMLXS_MSGF(EMLXS_CONTEXT
,
2294 &emlxs_reset_failed_msg
,
2296 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
2297 /* Log a dump event - not supported */
2301 if (emlxs_sli4_issue_mbox_cmd4quiesce(hba
, &mboxq
,
2302 MBX_POLL
, 0) != MBX_SUCCESS
) {
2303 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
2304 /* Log a dump event - not supported */
2308 emlxs_data_dump(port
, "resetPort", (uint32_t *)&mboxq
, 12, 0);
2311 case SLI_INTF_IF_TYPE_2
:
2313 emlxs_sli4_hba_kill(hba
);
2316 rc
= emlxs_check_hdw_ready(hba
);
2318 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_debug_msg
,
2319 "Adapter not ready for reset.");
2324 err1
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
2325 hba
->sli
.sli4
.ERR1_reg_addr
);
2326 err2
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
2327 hba
->sli
.sli4
.ERR2_reg_addr
);
2329 /* Don't generate an event if dump was forced */
2330 if ((err1
!= 0x2) || (err2
!= 0x2)) {
2335 /* Reset the port now */
2337 mutex_enter(&EMLXS_PORT_LOCK
);
2338 value
= SLI_CNTL_INIT_PORT
;
2340 ddi_put32(hba
->sli
.sli4
.bar0_acc_handle
,
2341 hba
->sli
.sli4
.CNTL_reg_addr
, value
);
2342 mutex_exit(&EMLXS_PORT_LOCK
);
2347 /* Reset the hba structure */
2348 hba
->flag
&= FC_RESET_MASK
;
2350 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
2351 cp
= &hba
->chan
[channelno
];
2353 cp
->channelno
= channelno
;
2356 hba
->channel_tx_count
= 0;
2358 hba
->iodone_count
= 0;
2361 hba
->heartbeat_active
= 0;
2362 hba
->discovery_timer
= 0;
2363 hba
->linkup_timer
= 0;
2364 hba
->loopback_tics
= 0;
2366 /* Reset the port objects */
2367 for (i
= 0; i
< MAX_VPORTS
; i
++) {
2370 vport
->flag
&= EMLXS_PORT_RESET_MASK
;
2372 vport
->prev_did
= 0;
2373 vport
->lip_type
= 0;
2374 bzero(&vport
->fabric_sparam
, sizeof (SERV_PARM
));
2375 bzero(&vport
->prev_fabric_sparam
, sizeof (SERV_PARM
));
2377 bzero((caddr_t
)&vport
->node_base
, sizeof (NODELIST
));
2378 vport
->node_base
.nlp_Rpi
= 0;
2379 vport
->node_base
.nlp_DID
= 0xffffff;
2380 vport
->node_base
.nlp_list_next
= NULL
;
2381 vport
->node_base
.nlp_list_prev
= NULL
;
2382 vport
->node_base
.nlp_active
= 1;
2383 vport
->node_count
= 0;
2385 if (vport
->ub_count
< EMLXS_UB_TOKEN_OFFSET
) {
2386 vport
->ub_count
= EMLXS_UB_TOKEN_OFFSET
;
2390 if (emlxs_check_hdw_ready(hba
)) {
2394 if (generate_event
) {
2395 status
= emlxs_sli4_read_status(hba
);
2396 if (status
& SLI_STATUS_DUMP_IMAGE_PRESENT
) {
2397 emlxs_log_dump_event(port
, NULL
, 0);
2403 } /* emlxs_sli4_hba_reset */
2409 #define SGL_LAST 0x80
2413 emlxs_pkt_to_sgl(emlxs_port_t
*port
, fc_packet_t
*pkt
, ULP_SGE64
*sge
,
2414 uint32_t sgl_type
, uint32_t *pcnt
)
2417 emlxs_hba_t
*hba
= HBA
;
2418 #endif /* DEBUG_SGE */
2419 ddi_dma_cookie_t
*cp
;
2428 ULP_SGE64 stage_sge
;
2430 last
= sgl_type
& SGL_LAST
;
2431 sgl_type
&= ~SGL_LAST
;
2433 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2436 cp
= pkt
->pkt_cmd_cookie
;
2437 cookie_cnt
= pkt
->pkt_cmd_cookie_cnt
;
2438 size
= (int32_t)pkt
->pkt_cmdlen
;
2442 cp
= pkt
->pkt_resp_cookie
;
2443 cookie_cnt
= pkt
->pkt_resp_cookie_cnt
;
2444 size
= (int32_t)pkt
->pkt_rsplen
;
2449 cp
= pkt
->pkt_data_cookie
;
2450 cookie_cnt
= pkt
->pkt_data_cookie_cnt
;
2451 size
= (int32_t)pkt
->pkt_datalen
;
2461 cp
= &pkt
->pkt_cmd_cookie
;
2463 size
= (int32_t)pkt
->pkt_cmdlen
;
2467 cp
= &pkt
->pkt_resp_cookie
;
2469 size
= (int32_t)pkt
->pkt_rsplen
;
2474 cp
= &pkt
->pkt_data_cookie
;
2476 size
= (int32_t)pkt
->pkt_datalen
;
2482 #endif /* >= EMLXS_MODREV3 */
2484 stage_sge
.offset
= 0;
2488 for (i
= 0; i
< cookie_cnt
&& size
> 0; i
++, cp
++) {
2490 sge_size
= cp
->dmac_size
;
2491 sge_addr
= cp
->dmac_laddress
;
2492 while (sge_size
&& size
) {
2494 /* Copy staged SGE before we build next one */
2495 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
,
2496 (uint8_t *)sge
, sizeof (ULP_SGE64
));
2499 len
= MIN(EMLXS_MAX_SGE_SIZE
, sge_size
);
2500 len
= MIN(size
, len
);
2502 stage_sge
.addrHigh
=
2506 stage_sge
.length
= len
;
2507 if (sgl_type
== SGL_DATA
) {
2508 stage_sge
.offset
= cnt
;
2511 emlxs_data_dump(port
, "SGE", (uint32_t *)&stage_sge
,
2513 #endif /* DEBUG_SGE */
2525 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
, (uint8_t *)sge
,
2526 sizeof (ULP_SGE64
));
2535 } /* emlxs_pkt_to_sgl */
2540 emlxs_sli4_bde_setup(emlxs_port_t
*port
, emlxs_buf_t
*sbp
)
2542 emlxs_hba_t
*hba
= HBA
;
2548 ddi_dma_cookie_t
*cp_cmd
;
2549 ddi_dma_cookie_t
*cp_data
;
2554 iocbq
= (IOCBQ
*) &sbp
->iocbq
;
2556 pkt
= PRIV2PKT(sbp
);
2558 sge
= xrip
->SGList
.virt
;
2560 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2561 cp_cmd
= pkt
->pkt_cmd_cookie
;
2562 cp_data
= pkt
->pkt_data_cookie
;
2564 cp_cmd
= &pkt
->pkt_cmd_cookie
;
2565 cp_data
= &pkt
->pkt_data_cookie
;
2566 #endif /* >= EMLXS_MODREV3 */
2568 iocbq
= &sbp
->iocbq
;
2569 if (iocbq
->flag
& IOCB_FCP_CMD
) {
2571 if (pkt
->pkt_tran_type
== FC_PKT_OUTBOUND
) {
2576 sge
= emlxs_pkt_to_sgl(port
, pkt
, sge
, SGL_CMD
, &cmd_cnt
);
2582 if (pkt
->pkt_datalen
!= 0) {
2584 sge
= emlxs_pkt_to_sgl(port
, pkt
, sge
,
2585 SGL_RESP
, &resp_cnt
);
2591 sge
= emlxs_pkt_to_sgl(port
, pkt
, sge
,
2592 SGL_DATA
| SGL_LAST
, 0);
2597 if (hba
->sli
.sli4
.flag
& EMLXS_SLI4_PHON
) {
2598 sge_addr
= cp_data
->dmac_laddress
;
2599 wqe
->FirstData
.addrHigh
= PADDR_HI(sge_addr
);
2600 wqe
->FirstData
.addrLow
= PADDR_LO(sge_addr
);
2601 wqe
->FirstData
.tus
.f
.bdeSize
=
2606 sge
= emlxs_pkt_to_sgl(port
, pkt
, sge
,
2607 SGL_RESP
| SGL_LAST
, &resp_cnt
);
2613 wqe
->un
.FcpCmd
.Payload
.addrHigh
=
2614 PADDR_HI(cp_cmd
->dmac_laddress
);
2615 wqe
->un
.FcpCmd
.Payload
.addrLow
=
2616 PADDR_LO(cp_cmd
->dmac_laddress
);
2617 wqe
->un
.FcpCmd
.Payload
.tus
.f
.bdeSize
= cmd_cnt
;
2618 wqe
->un
.FcpCmd
.PayloadLength
= cmd_cnt
+ resp_cnt
;
2622 if (pkt
->pkt_tran_type
== FC_PKT_OUTBOUND
) {
2624 sge
= emlxs_pkt_to_sgl(port
, pkt
, sge
,
2625 SGL_CMD
| SGL_LAST
, &cmd_cnt
);
2631 sge
= emlxs_pkt_to_sgl(port
, pkt
, sge
,
2638 sge
= emlxs_pkt_to_sgl(port
, pkt
, sge
,
2639 SGL_RESP
| SGL_LAST
, &resp_cnt
);
2643 wqe
->un
.GenReq
.PayloadLength
= cmd_cnt
;
2646 wqe
->un
.GenReq
.Payload
.addrHigh
=
2647 PADDR_HI(cp_cmd
->dmac_laddress
);
2648 wqe
->un
.GenReq
.Payload
.addrLow
=
2649 PADDR_LO(cp_cmd
->dmac_laddress
);
2650 wqe
->un
.GenReq
.Payload
.tus
.f
.bdeSize
= cmd_cnt
;
2653 } /* emlxs_sli4_bde_setup */
2661 emlxs_sli4_fct_bde_setup(emlxs_port_t
*port
, emlxs_buf_t
*sbp
)
2663 emlxs_hba_t
*hba
= HBA
;
2665 ULP_SGE64 stage_sge
;
2677 uint32_t *xrdy_vaddr
;
2678 stmf_data_buf_t
*dbuf
;
2680 iocbq
= &sbp
->iocbq
;
2681 iocb
= &iocbq
->iocb
;
2685 if (!sbp
->fct_buf
) {
2689 size
= sbp
->fct_buf
->db_data_size
;
2692 * The hardware will automaticlly round up
2696 * size = (size + 3) & 0xfffffffc;
2699 fct_mp
= (MATCHMAP
*)sbp
->fct_buf
->db_port_private
;
2701 if (sbp
->fct_buf
->db_sglist_length
!= 1) {
2702 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fct_error_msg
,
2703 "fct_bde_setup: Only 1 sglist entry supported: %d",
2704 sbp
->fct_buf
->db_sglist_length
);
2708 sge
= xrip
->SGList
.virt
;
2710 if (iocb
->ULPCOMMAND
== CMD_FCP_TRECEIVE64_CX
) {
2712 mp
= emlxs_mem_buf_alloc(hba
, EMLXS_XFER_RDY_SIZE
);
2713 if (!mp
|| !mp
->virt
|| !mp
->phys
) {
2714 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fct_error_msg
,
2715 "fct_bde_setup: Cannot allocate XRDY memory");
2718 /* Save the MATCHMAP info to free this memory later */
2721 /* Point to XRDY payload */
2722 xrdy_vaddr
= (uint32_t *)(mp
->virt
);
2724 /* Fill in burstsize in payload */
2726 *xrdy_vaddr
++ = LE_SWAP32(size
);
2729 /* First 2 SGEs are XRDY and SKIP */
2730 stage_sge
.addrHigh
= PADDR_HI(mp
->phys
);
2731 stage_sge
.addrLow
= PADDR_LO(mp
->phys
);
2732 stage_sge
.length
= EMLXS_XFER_RDY_SIZE
;
2733 stage_sge
.offset
= 0;
2738 wqe
->un
.FcpCmd
.Payload
.addrHigh
= stage_sge
.addrHigh
;
2739 wqe
->un
.FcpCmd
.Payload
.addrLow
= stage_sge
.addrLow
;
2740 wqe
->un
.FcpCmd
.Payload
.tus
.f
.bdeSize
= EMLXS_XFER_RDY_SIZE
;
2741 wqe
->un
.FcpCmd
.PayloadLength
= EMLXS_XFER_RDY_SIZE
;
2743 } else { /* CMD_FCP_TSEND64_CX */
2744 /* First 2 SGEs are SKIP */
2745 stage_sge
.addrHigh
= 0;
2746 stage_sge
.addrLow
= 0;
2747 stage_sge
.length
= 0;
2748 stage_sge
.offset
= 0;
2749 stage_sge
.type
= EMLXS_SGE_TYPE_SKIP
;
2753 wqe
->un
.FcpCmd
.Payload
.addrHigh
= PADDR_HI(fct_mp
->phys
);
2754 wqe
->un
.FcpCmd
.Payload
.addrLow
= PADDR_LO(fct_mp
->phys
);
2756 /* The BDE should match the contents of the first SGE payload */
2757 len
= MIN(EMLXS_MAX_SGE_SIZE
, size
);
2758 wqe
->un
.FcpCmd
.Payload
.tus
.f
.bdeSize
= len
;
2760 /* The PayloadLength should be set to 0 for TSEND64. */
2761 wqe
->un
.FcpCmd
.PayloadLength
= 0;
2764 dbuf
= sbp
->fct_buf
;
2766 * TotalTransferCount equals to Relative Offset field (Word 4)
2767 * in both TSEND64 and TRECEIVE64 WQE.
2769 wqe
->un
.FcpCmd
.TotalTransferCount
= dbuf
->db_relative_offset
;
2771 /* Copy staged SGE into SGL */
2772 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
,
2773 (uint8_t *)sge
, sizeof (ULP_SGE64
));
2776 stage_sge
.addrHigh
= 0;
2777 stage_sge
.addrLow
= 0;
2778 stage_sge
.length
= 0;
2779 stage_sge
.offset
= 0;
2780 stage_sge
.type
= EMLXS_SGE_TYPE_SKIP
;
2783 /* Copy staged SGE into SGL */
2784 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
,
2785 (uint8_t *)sge
, sizeof (ULP_SGE64
));
2789 sge_addr
= fct_mp
->phys
;
2795 /* Copy staged SGE before we build next one */
2796 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
,
2797 (uint8_t *)sge
, sizeof (ULP_SGE64
));
2801 len
= MIN(EMLXS_MAX_SGE_SIZE
, sge_size
);
2803 stage_sge
.addrHigh
= PADDR_HI(sge_addr
);
2804 stage_sge
.addrLow
= PADDR_LO(sge_addr
);
2805 stage_sge
.length
= len
;
2806 stage_sge
.offset
= cnt
;
2807 stage_sge
.type
= EMLXS_SGE_TYPE_DATA
;
2816 if (hba
->sli
.sli4
.flag
& EMLXS_SLI4_PHON
) {
2817 wqe
->FirstData
.addrHigh
= stage_sge
.addrHigh
;
2818 wqe
->FirstData
.addrLow
= stage_sge
.addrLow
;
2819 wqe
->FirstData
.tus
.f
.bdeSize
= stage_sge
.length
;
2821 /* Copy staged SGE into SGL */
2822 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
,
2823 (uint8_t *)sge
, sizeof (ULP_SGE64
));
2827 } /* emlxs_sli4_fct_bde_setup */
2828 #endif /* SFCT_SUPPORT */
2832 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t
*hba
, CHANNEL
*cp
, IOCBQ
*iocbq
)
2834 emlxs_port_t
*port
= &PPORT
;
2839 emlxs_wqe_t
*wqeslot
;
2845 #ifdef NODE_THROTTLE_SUPPORT
2846 int32_t node_throttle
;
2847 NODELIST
*marked_node
= NULL
;
2848 #endif /* NODE_THROTTLE_SUPPORT */
2851 channelno
= cp
->channelno
;
2852 wq
= (WQ_DESC_t
*)cp
->iopath
;
2854 #ifdef DEBUG_FASTPATH
2855 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
2856 "ISSUE WQE channel: %x %p", channelno
, wq
);
2857 #endif /* DEBUG_FASTPATH */
2861 /* Check if FCP ring and adapter is not ready */
2862 /* We may use any ring for FCP_CMD */
2863 if (iocbq
&& (iocbq
->flag
& IOCB_FCP_CMD
) && (hba
->state
!= FC_READY
)) {
2864 if (!(iocbq
->flag
& IOCB_SPECIAL
) || !iocbq
->port
||
2865 (((emlxs_port_t
*)iocbq
->port
)->mode
== MODE_INITIATOR
)) {
2866 emlxs_tx_put(iocbq
, 1);
2871 /* Attempt to acquire CMD_RING lock */
2872 if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno
)) == 0) {
2873 /* Queue it for later */
2875 if ((hba
->io_count
-
2876 hba
->channel_tx_count
) > 10) {
2877 emlxs_tx_put(iocbq
, 1);
2881 mutex_enter(&EMLXS_QUE_LOCK(channelno
));
2887 /* EMLXS_QUE_LOCK acquired */
2889 /* Throttle check only applies to non special iocb */
2890 if (iocbq
&& (!(iocbq
->flag
& IOCB_SPECIAL
))) {
2891 /* Check if HBA is full */
2892 throttle
= hba
->io_throttle
- hba
->io_active
;
2893 if (throttle
<= 0) {
2894 /* Hitting adapter throttle limit */
2895 /* Queue it for later */
2897 emlxs_tx_put(iocbq
, 1);
2904 /* Check to see if we have room for this WQE */
2905 next_wqe
= wq
->host_index
+ 1;
2906 if (next_wqe
>= wq
->max_index
) {
2910 if (next_wqe
== wq
->port_index
) {
2911 /* Queue it for later */
2913 emlxs_tx_put(iocbq
, 1);
2919 * We have a command ring slot available
2920 * Make sure we have an iocb to send
2923 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
2925 /* Check if the ring already has iocb's waiting */
2926 if (cp
->nodeq
.q_first
!= NULL
) {
2927 /* Put the current iocbq on the tx queue */
2928 emlxs_tx_put(iocbq
, 0);
2931 * Attempt to replace it with the next iocbq
2934 iocbq
= emlxs_tx_get(cp
, 0);
2937 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
2939 iocbq
= emlxs_tx_get(cp
, 1);
2943 /* Process each iocbq */
2947 #ifdef NODE_THROTTLE_SUPPORT
2948 if (sbp
&& sbp
->node
&& sbp
->node
->io_throttle
) {
2949 node_throttle
= sbp
->node
->io_throttle
-
2950 sbp
->node
->io_active
;
2951 if (node_throttle
<= 0) {
2953 /* Queue this iocb and get next iocb from */
2957 marked_node
= sbp
->node
;
2960 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
2961 emlxs_tx_put(iocbq
, 0);
2963 if (cp
->nodeq
.q_first
== marked_node
) {
2964 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
2968 iocbq
= emlxs_tx_get(cp
, 0);
2969 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
2974 #endif /* NODE_THROTTLE_SUPPORT */
2977 #ifdef DEBUG_FASTPATH
2978 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
2979 "ISSUE QID %d WQE iotag:%x xri:%d", wq
->qid
,
2980 wqe
->RequestTag
, wqe
->XRITag
);
2981 #endif /* DEBUG_FASTPATH */
2984 /* If exchange removed after wqe was prep'ed, drop it */
2986 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
2987 "Xmit WQE iotag:%x xri:%d aborted",
2988 wqe
->RequestTag
, wqe
->XRITag
);
2990 /* Get next iocb from the tx queue */
2991 iocbq
= emlxs_tx_get(cp
, 1);
2995 if (sbp
->pkt_flags
& PACKET_DELAY_REQUIRED
) {
2998 if ((channelno
== hba
->channel_els
) &&
2999 !(iocbq
->flag
& IOCB_FCP_CMD
)) {
3000 drv_usecwait(100000);
3002 drv_usecwait(20000);
3006 /* Check for ULP pkt request */
3007 mutex_enter(&sbp
->mtx
);
3009 if (sbp
->node
== NULL
) {
3010 /* Set node to base node by default */
3011 iocbq
->node
= (void *)&port
->node_base
;
3012 sbp
->node
= (void *)&port
->node_base
;
3015 sbp
->pkt_flags
|= PACKET_IN_CHIPQ
;
3016 mutex_exit(&sbp
->mtx
);
3018 atomic_inc_32(&hba
->io_active
);
3019 #ifdef NODE_THROTTLE_SUPPORT
3021 atomic_inc_32(&sbp
->node
->io_active
);
3023 #endif /* NODE_THROTTLE_SUPPORT */
3025 sbp
->xrip
->flag
|= EMLXS_XRI_PENDING_IO
;
3029 emlxs_fct_io_trace(port
, sbp
->fct_cmd
,
3030 EMLXS_FCT_IOCB_ISSUED
);
3031 emlxs_fct_io_trace(port
, sbp
->fct_cmd
,
3034 #endif /* FCT_IO_TRACE */
3035 #endif /* SFCT_SUPPORT */
3036 cp
->hbaSendCmd_sbp
++;
3037 iocbq
->channel
= cp
;
3045 * At this point, we have a command ring slot available
3046 * and an iocb to send
3048 wq
->release_depth
--;
3049 if (wq
->release_depth
== 0) {
3050 wq
->release_depth
= WQE_RELEASE_DEPTH
;
3054 HBASTATS
.IocbIssued
[channelno
]++;
3058 wqeslot
= (emlxs_wqe_t
*)wq
->addr
.virt
;
3059 wqeslot
+= wq
->host_index
;
3061 wqe
->CQId
= wq
->cqid
;
3062 if (hba
->sli
.sli4
.param
.PHWQ
) {
3063 WQE_PHWQ_WQID(wqe
, wq
->qid
);
3065 BE_SWAP32_BCOPY((uint8_t *)wqe
, (uint8_t *)wqeslot
,
3066 sizeof (emlxs_wqe_t
));
3068 emlxs_data_dump(port
, "WQE", (uint32_t *)wqe
, 18, 0);
3069 #endif /* DEBUG_WQE */
3070 offset
= (off_t
)((uint64_t)((unsigned long)
3072 (uint64_t)((unsigned long)
3073 hba
->sli
.sli4
.slim2
.virt
));
3075 EMLXS_MPDATA_SYNC(wq
->addr
.dma_handle
, offset
,
3076 4096, DDI_DMA_SYNC_FORDEV
);
3078 /* Ring the WQ Doorbell */
3080 wqdb
|= ((1 << 24) | (wq
->host_index
<< 16));
3083 * After this, the sbp / iocb / wqe should not be
3084 * accessed in the xmit path.
3087 emlxs_sli4_write_wqdb(hba
, wqdb
);
3088 wq
->host_index
= next_wqe
;
3090 #ifdef DEBUG_FASTPATH
3091 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
3092 "WQ RING: %08x", wqdb
);
3093 #endif /* DEBUG_FASTPATH */
3096 emlxs_mem_put(hba
, MEM_IOCB
, (void *)iocbq
);
3099 if (iocbq
&& (!(flag
& IOCB_SPECIAL
))) {
3100 /* Check if HBA is full */
3101 throttle
= hba
->io_throttle
- hba
->io_active
;
3102 if (throttle
<= 0) {
3107 /* Check to see if we have room for another WQE */
3109 if (next_wqe
>= wq
->max_index
) {
3113 if (next_wqe
== wq
->port_index
) {
3114 /* Queue it for later */
3118 /* Get the next iocb from the tx queue if there is one */
3119 iocbq
= emlxs_tx_get(cp
, 1);
3122 mutex_exit(&EMLXS_QUE_LOCK(channelno
));
3128 if (throttle
<= 0) {
3129 HBASTATS
.IocbThrottled
++;
3131 HBASTATS
.IocbRingFull
[channelno
]++;
3134 mutex_exit(&EMLXS_QUE_LOCK(channelno
));
3138 } /* emlxs_sli4_issue_iocb_cmd() */
3143 emlxs_sli4_issue_mq(emlxs_port_t
*port
, MAILBOX4
*mqe
, MAILBOX
*mb
,
3146 emlxs_hba_t
*hba
= HBA
;
3154 mbq
= (MAILBOXQ
*)mb
;
3155 mb4
= (MAILBOX4
*)mb
;
3156 mp
= (MATCHMAP
*) mbq
->nonembed
;
3157 hba
->mbox_mqe
= (void *)mqe
;
3159 if ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ||
3160 (mb4
->un
.varSLIConfig
.be
.embedded
)) {
3162 * If this is an embedded mbox, everything should fit
3163 * into the mailbox area.
3165 BE_SWAP32_BCOPY((uint8_t *)mb
, (uint8_t *)mqe
,
3166 MAILBOX_CMD_SLI4_BSIZE
);
3168 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.mq
.addr
.dma_handle
, 0,
3169 4096, DDI_DMA_SYNC_FORDEV
);
3171 if (mb
->mbxCommand
!= MBX_HEARTBEAT
) {
3172 emlxs_data_dump(port
, "MBOX CMD", (uint32_t *)mqe
,
3176 /* SLI_CONFIG and non-embedded */
3179 * If this is not embedded, the MQ area
3180 * MUST contain a SGE pointer to a larger area for the
3181 * non-embedded mailbox command.
3182 * mp will point to the actual mailbox command which
3183 * should be copied into the non-embedded area.
3185 mb4
->un
.varSLIConfig
.be
.sge_cnt
= 1;
3186 mb4
->un
.varSLIConfig
.be
.payload_length
= mp
->size
;
3187 iptr
= (uint32_t *)&mb4
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
;
3188 *iptr
++ = (uint32_t)PADDR_LO(mp
->phys
);
3189 *iptr
++ = (uint32_t)PADDR_HI(mp
->phys
);
3192 BE_SWAP32_BUFFER(mp
->virt
, mp
->size
);
3194 EMLXS_MPDATA_SYNC(mp
->dma_handle
, 0, mp
->size
,
3195 DDI_DMA_SYNC_FORDEV
);
3197 BE_SWAP32_BCOPY((uint8_t *)mb
, (uint8_t *)mqe
,
3198 MAILBOX_CMD_SLI4_BSIZE
);
3200 offset
= (off_t
)((uint64_t)((unsigned long)
3201 hba
->sli
.sli4
.mq
.addr
.virt
) -
3202 (uint64_t)((unsigned long)
3203 hba
->sli
.sli4
.slim2
.virt
));
3205 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.mq
.addr
.dma_handle
, offset
,
3206 4096, DDI_DMA_SYNC_FORDEV
);
3208 emlxs_data_dump(port
, "MBOX EXT", (uint32_t *)mqe
, 12, 0);
3209 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3210 "Extension Addr %p %p", mp
->phys
, (uint32_t *)(mp
->virt
));
3211 emlxs_data_dump(port
, "EXT AREA", (uint32_t *)mp
->virt
, 24, 0);
3214 /* Ring the MQ Doorbell */
3215 mqdb
= hba
->sli
.sli4
.mq
.qid
;
3216 mqdb
|= ((1 << MQ_DB_POP_SHIFT
) & MQ_DB_POP_MASK
);
3218 if (mb
->mbxCommand
!= MBX_HEARTBEAT
) {
3219 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
3220 "MQ RING: %08x", mqdb
);
3223 emlxs_sli4_write_mqdb(hba
, mqdb
);
3225 return (MBX_SUCCESS
);
3227 } /* emlxs_sli4_issue_mq() */
3232 emlxs_sli4_issue_bootstrap(emlxs_hba_t
*hba
, MAILBOX
*mb
, uint32_t tmo
)
3234 emlxs_port_t
*port
= &PPORT
;
3237 MATCHMAP
*mp
= NULL
;
3241 mbq
= (MAILBOXQ
*)mb
;
3242 mb4
= (MAILBOX4
*)mb
;
3243 mp
= (MATCHMAP
*) mbq
->nonembed
;
3244 hba
->mbox_mqe
= hba
->sli
.sli4
.bootstrapmb
.virt
;
3246 if ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ||
3247 (mb4
->un
.varSLIConfig
.be
.embedded
)) {
3249 * If this is an embedded mbox, everything should fit
3250 * into the bootstrap mailbox area.
3252 iptr
= (uint32_t *)hba
->sli
.sli4
.bootstrapmb
.virt
;
3253 BE_SWAP32_BCOPY((uint8_t *)mb
, (uint8_t *)iptr
,
3254 MAILBOX_CMD_SLI4_BSIZE
);
3256 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.bootstrapmb
.dma_handle
, 0,
3257 MAILBOX_CMD_SLI4_BSIZE
, DDI_DMA_SYNC_FORDEV
);
3258 emlxs_data_dump(port
, "MBOX CMD", iptr
, 18, 0);
3261 * If this is not embedded, the bootstrap mailbox area
3262 * MUST contain a SGE pointer to a larger area for the
3263 * non-embedded mailbox command.
3264 * mp will point to the actual mailbox command which
3265 * should be copied into the non-embedded area.
3268 mb4
->un
.varSLIConfig
.be
.sge_cnt
= 1;
3269 mb4
->un
.varSLIConfig
.be
.payload_length
= mp
->size
;
3270 iptr
= (uint32_t *)&mb4
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
;
3271 *iptr
++ = (uint32_t)PADDR_LO(mp
->phys
);
3272 *iptr
++ = (uint32_t)PADDR_HI(mp
->phys
);
3275 BE_SWAP32_BUFFER(mp
->virt
, mp
->size
);
3277 EMLXS_MPDATA_SYNC(mp
->dma_handle
, 0, mp
->size
,
3278 DDI_DMA_SYNC_FORDEV
);
3280 iptr
= (uint32_t *)hba
->sli
.sli4
.bootstrapmb
.virt
;
3281 BE_SWAP32_BCOPY((uint8_t *)mb
, (uint8_t *)iptr
,
3282 MAILBOX_CMD_SLI4_BSIZE
);
3284 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.bootstrapmb
.dma_handle
, 0,
3285 EMLXS_BOOTSTRAP_MB_SIZE
+ MBOX_EXTENSION_SIZE
,
3286 DDI_DMA_SYNC_FORDEV
);
3288 emlxs_data_dump(port
, "MBOX EXT", iptr
, 12, 0);
3289 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3290 "Extension Addr %p %p", mp
->phys
,
3291 (uint32_t *)((uint8_t *)mp
->virt
));
3292 iptr
= (uint32_t *)((uint8_t *)mp
->virt
);
3293 emlxs_data_dump(port
, "EXT AREA", (uint32_t *)mp
->virt
, 24, 0);
3297 /* NOTE: tmo is in 10ms ticks */
3298 if (!emlxs_issue_bootstrap_mb(hba
, tmo
)) {
3299 return (MBX_TIMEOUT
);
3302 if ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ||
3303 (mb4
->un
.varSLIConfig
.be
.embedded
)) {
3304 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.bootstrapmb
.dma_handle
, 0,
3305 MAILBOX_CMD_SLI4_BSIZE
, DDI_DMA_SYNC_FORKERNEL
);
3307 iptr
= (uint32_t *)hba
->sli
.sli4
.bootstrapmb
.virt
;
3308 BE_SWAP32_BCOPY((uint8_t *)iptr
, (uint8_t *)mb
,
3309 MAILBOX_CMD_SLI4_BSIZE
);
3311 emlxs_data_dump(port
, "MBOX CMP", iptr
, 18, 0);
3314 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.bootstrapmb
.dma_handle
, 0,
3315 EMLXS_BOOTSTRAP_MB_SIZE
+ MBOX_EXTENSION_SIZE
,
3316 DDI_DMA_SYNC_FORKERNEL
);
3318 EMLXS_MPDATA_SYNC(mp
->dma_handle
, 0, mp
->size
,
3319 DDI_DMA_SYNC_FORKERNEL
);
3321 BE_SWAP32_BUFFER(mp
->virt
, mp
->size
);
3323 iptr
= (uint32_t *)hba
->sli
.sli4
.bootstrapmb
.virt
;
3324 BE_SWAP32_BCOPY((uint8_t *)iptr
, (uint8_t *)mb
,
3325 MAILBOX_CMD_SLI4_BSIZE
);
3327 emlxs_data_dump(port
, "MBOX CMP", iptr
, 12, 0);
3328 iptr
= (uint32_t *)((uint8_t *)mp
->virt
);
3329 emlxs_data_dump(port
, "EXT AREA", (uint32_t *)iptr
, 24, 0);
3333 if (nonembed
&& mp
) {
3334 if (emlxs_fm_check_dma_handle(hba
, mp
->dma_handle
)
3336 EMLXS_MSGF(EMLXS_CONTEXT
,
3337 &emlxs_invalid_dma_handle_msg
,
3338 "sli4_issue_bootstrap: mp_hdl=%p",
3340 return (MBXERR_DMA_ERROR
);
3344 if (emlxs_fm_check_dma_handle(hba
,
3345 hba
->sli
.sli4
.bootstrapmb
.dma_handle
)
3347 EMLXS_MSGF(EMLXS_CONTEXT
,
3348 &emlxs_invalid_dma_handle_msg
,
3349 "sli4_issue_bootstrap: hdl=%p",
3350 hba
->sli
.sli4
.bootstrapmb
.dma_handle
);
3351 return (MBXERR_DMA_ERROR
);
3355 return (MBX_SUCCESS
);
3357 } /* emlxs_sli4_issue_bootstrap() */
3362 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t
*hba
, MAILBOXQ
*mbq
, int32_t flag
,
3368 mbox_rsp_hdr_t
*hdr_rsp
;
3379 port
= (emlxs_port_t
*)mbq
->port
;
3381 mb4
= (MAILBOX4
*)mbq
;
3382 mb
= (MAILBOX
*)mbq
;
3384 mb
->mbxStatus
= MBX_SUCCESS
;
3387 /* Check for minimum timeouts */
3388 switch (mb
->mbxCommand
) {
3389 /* Mailbox commands that erase/write flash */
3391 case MBX_UPDATE_CFG
:
3393 case MBX_LOAD_EXP_ROM
:
3395 case MBX_FLASH_WR_ULA
:
3396 case MBX_DEL_LD_ENTRY
:
3398 case MBX_DUMP_MEMORY
:
3399 case MBX_WRITE_VPARMS
:
3400 case MBX_ACCESS_VDATA
:
3406 case MBX_SLI_CONFIG
: {
3407 mbox_req_hdr_t
*hdr_req
;
3409 hdr_req
= (mbox_req_hdr_t
*)
3410 &mb4
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
;
3412 if (hdr_req
->subsystem
== IOCTL_SUBSYSTEM_COMMON
) {
3413 switch (hdr_req
->opcode
) {
3414 case COMMON_OPCODE_WRITE_OBJ
:
3415 case COMMON_OPCODE_READ_OBJ
:
3416 case COMMON_OPCODE_READ_OBJ_LIST
:
3417 case COMMON_OPCODE_DELETE_OBJ
:
3418 case COMMON_OPCODE_SET_BOOT_CFG
:
3419 case COMMON_OPCODE_GET_PROFILE_CFG
:
3420 case COMMON_OPCODE_SET_PROFILE_CFG
:
3421 case COMMON_OPCODE_GET_PROFILE_LIST
:
3422 case COMMON_OPCODE_SET_ACTIVE_PROFILE
:
3423 case COMMON_OPCODE_GET_PROFILE_CAPS
:
3424 case COMMON_OPCODE_GET_MR_PROFILE_CAPS
:
3425 case COMMON_OPCODE_SET_MR_PROFILE_CAPS
:
3426 case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG
:
3427 case COMMON_OPCODE_SEND_ACTIVATION
:
3428 case COMMON_OPCODE_RESET_LICENSES
:
3429 case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1
:
3430 case COMMON_OPCODE_GET_VPD_DATA
:
3440 } else if (hdr_req
->subsystem
== IOCTL_SUBSYSTEM_FCOE
) {
3441 switch (hdr_req
->opcode
) {
3442 case FCOE_OPCODE_SET_FCLINK_SETTINGS
:
3459 * Also: VENDOR_MANAGE_FFV (0x13, 0x02) (not currently used)
3471 /* Convert tmo seconds to 10 millisecond tics */
3472 tmo_local
= tmo
* 100;
3474 mutex_enter(&EMLXS_PORT_LOCK
);
3476 /* Adjust wait flag */
3477 if (flag
!= MBX_NOWAIT
) {
3478 if (hba
->sli
.sli4
.flag
& EMLXS_SLI4_INTR_ENABLED
) {
3484 /* Must have interrupts enabled to perform MBX_NOWAIT */
3485 if (!(hba
->sli
.sli4
.flag
& EMLXS_SLI4_INTR_ENABLED
)) {
3487 mb
->mbxStatus
= MBX_HARDWARE_ERROR
;
3488 mutex_exit(&EMLXS_PORT_LOCK
);
3490 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3491 "Interrupts disabled. %s failed.",
3492 emlxs_mb_cmd_xlate(mb
->mbxCommand
));
3494 return (MBX_HARDWARE_ERROR
);
3498 /* Check for hardware error ; special case SLI_CONFIG */
3499 if ((hba
->flag
& FC_HARDWARE_ERROR
) &&
3500 ! ((mb4
->mbxCommand
== MBX_SLI_CONFIG
) &&
3501 (mb4
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.opcode
==
3502 COMMON_OPCODE_RESET
))) {
3503 mb
->mbxStatus
= MBX_HARDWARE_ERROR
;
3505 mutex_exit(&EMLXS_PORT_LOCK
);
3507 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3508 "Hardware error reported. %s failed. status=%x mb=%p",
3509 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
->mbxStatus
, mb
);
3511 return (MBX_HARDWARE_ERROR
);
3514 if (hba
->mbox_queue_flag
) {
3515 /* If we are not polling, then queue it for later */
3516 if (flag
== MBX_NOWAIT
) {
3517 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3518 "Busy. %s: mb=%p NoWait.",
3519 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
);
3521 emlxs_mb_put(hba
, mbq
);
3523 HBASTATS
.MboxBusy
++;
3525 mutex_exit(&EMLXS_PORT_LOCK
);
3530 while (hba
->mbox_queue_flag
) {
3531 mutex_exit(&EMLXS_PORT_LOCK
);
3533 if (tmo_local
-- == 0) {
3534 EMLXS_MSGF(EMLXS_CONTEXT
,
3535 &emlxs_mbox_event_msg
,
3536 "Timeout. %s: mb=%p tmo=%d Waiting.",
3537 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
,
3540 /* Non-lethalStatus mailbox timeout */
3541 /* Does not indicate a hardware error */
3542 mb
->mbxStatus
= MBX_TIMEOUT
;
3543 return (MBX_TIMEOUT
);
3547 mutex_enter(&EMLXS_PORT_LOCK
);
3549 /* Check for hardware error ; special case SLI_CONFIG */
3550 if ((hba
->flag
& FC_HARDWARE_ERROR
) &&
3551 ! ((mb4
->mbxCommand
== MBX_SLI_CONFIG
) &&
3552 (mb4
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.opcode
==
3553 COMMON_OPCODE_RESET
))) {
3554 mb
->mbxStatus
= MBX_HARDWARE_ERROR
;
3556 mutex_exit(&EMLXS_PORT_LOCK
);
3558 EMLXS_MSGF(EMLXS_CONTEXT
,
3559 &emlxs_mbox_detail_msg
,
3560 "Hardware error reported. %s failed. "
3562 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
3565 return (MBX_HARDWARE_ERROR
);
3570 /* Initialize mailbox area */
3571 emlxs_mb_init(hba
, mbq
, flag
, tmo
);
3573 if (mb
->mbxCommand
== MBX_DOWN_LINK
) {
3574 hba
->sli
.sli4
.flag
|= EMLXS_SLI4_DOWN_LINK
;
3577 mutex_exit(&EMLXS_PORT_LOCK
);
3581 if (mb
->mbxCommand
!= MBX_HEARTBEAT
) {
3582 if (mb
->mbxCommand
!= MBX_DOWN_LOAD
3583 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3584 EMLXS_MSGF(EMLXS_CONTEXT
,
3585 &emlxs_mbox_detail_msg
,
3586 "Sending. %s: mb=%p NoWait. embedded %d",
3587 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
,
3588 ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ? 1 :
3589 (mb4
->un
.varSLIConfig
.be
.embedded
)));
3593 iptr
= hba
->sli
.sli4
.mq
.addr
.virt
;
3594 iptr
+= (hba
->sli
.sli4
.mq
.host_index
* MAILBOX_CMD_SLI4_WSIZE
);
3595 hba
->sli
.sli4
.mq
.host_index
++;
3596 if (hba
->sli
.sli4
.mq
.host_index
>= hba
->sli
.sli4
.mq
.max_index
) {
3597 hba
->sli
.sli4
.mq
.host_index
= 0;
3601 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3602 "BDE virt %p phys %p size x%x",
3603 ((MATCHMAP
*)mbq
->bp
)->virt
,
3604 ((MATCHMAP
*)mbq
->bp
)->phys
,
3605 ((MATCHMAP
*)mbq
->bp
)->size
);
3606 emlxs_data_dump(port
, "DATA",
3607 (uint32_t *)(((MATCHMAP
*)mbq
->bp
)->virt
), 30, 0);
3609 rc
= emlxs_sli4_issue_mq(port
, (MAILBOX4
*)iptr
, mb
, tmo_local
);
3613 if (mb
->mbxCommand
!= MBX_DOWN_LOAD
3614 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3615 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3616 "Sending. %s: mb=%p Poll. embedded %d",
3617 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
,
3618 ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ? 1 :
3619 (mb4
->un
.varSLIConfig
.be
.embedded
)));
3622 rc
= emlxs_sli4_issue_bootstrap(hba
, mb
, tmo_local
);
3624 /* Clean up the mailbox area */
3625 if (rc
== MBX_TIMEOUT
) {
3626 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3627 "Timeout. %s: mb=%p tmo=%x Poll. embedded %d",
3628 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
, tmo
,
3629 ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ? 1 :
3630 (mb4
->un
.varSLIConfig
.be
.embedded
)));
3632 hba
->flag
|= FC_MBOX_TIMEOUT
;
3633 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
3634 emlxs_mb_fini(hba
, NULL
, MBX_TIMEOUT
);
3637 if (mb
->mbxCommand
!= MBX_DOWN_LOAD
3638 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3639 EMLXS_MSGF(EMLXS_CONTEXT
,
3640 &emlxs_mbox_detail_msg
,
3641 "Completed. %s: mb=%p status=%x Poll. "
3643 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
, rc
,
3644 ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ? 1 :
3645 (mb4
->un
.varSLIConfig
.be
.embedded
)));
3648 /* Process the result */
3649 if (!(mbq
->flag
& MBQ_PASSTHRU
)) {
3650 if (mbq
->mbox_cmpl
) {
3651 (void) (mbq
->mbox_cmpl
)(hba
, mbq
);
3655 emlxs_mb_fini(hba
, NULL
, mb
->mbxStatus
);
3658 mp
= (MATCHMAP
*)mbq
->nonembed
;
3660 hdr_rsp
= (mbox_rsp_hdr_t
*)mp
->virt
;
3661 if (hdr_rsp
->status
) {
3662 EMLXS_MSGF(EMLXS_CONTEXT
,
3663 &emlxs_mbox_detail_msg
,
3664 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3665 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
3666 hdr_rsp
->status
, hdr_rsp
->extra_status
);
3668 mb
->mbxStatus
= MBX_NONEMBED_ERROR
;
3673 /* Attempt to send pending mailboxes */
3674 mbq
= (MAILBOXQ
*)emlxs_mb_get(hba
);
3676 /* Attempt to send pending mailboxes */
3677 i
= emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_NOWAIT
, 0);
3678 if ((i
!= MBX_BUSY
) && (i
!= MBX_SUCCESS
)) {
3679 emlxs_mem_put(hba
, MEM_MBOX
, (void *)mbq
);
3685 if (mb
->mbxCommand
!= MBX_DOWN_LOAD
3686 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3687 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3688 "Sending. %s: mb=%p Sleep. embedded %d",
3689 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
,
3690 ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ? 1 :
3691 (mb4
->un
.varSLIConfig
.be
.embedded
)));
3694 iptr
= hba
->sli
.sli4
.mq
.addr
.virt
;
3695 iptr
+= (hba
->sli
.sli4
.mq
.host_index
* MAILBOX_CMD_SLI4_WSIZE
);
3696 hba
->sli
.sli4
.mq
.host_index
++;
3697 if (hba
->sli
.sli4
.mq
.host_index
>= hba
->sli
.sli4
.mq
.max_index
) {
3698 hba
->sli
.sli4
.mq
.host_index
= 0;
3701 rc
= emlxs_sli4_issue_mq(port
, (MAILBOX4
*)iptr
, mb
, tmo_local
);
3703 if (rc
!= MBX_SUCCESS
) {
3707 /* Wait for completion */
3708 /* The driver clock is timing the mailbox. */
3710 mutex_enter(&EMLXS_MBOX_LOCK
);
3711 while (!(mbq
->flag
& MBQ_COMPLETED
)) {
3712 cv_wait(&EMLXS_MBOX_CV
, &EMLXS_MBOX_LOCK
);
3714 mutex_exit(&EMLXS_MBOX_LOCK
);
3716 mp
= (MATCHMAP
*)mbq
->nonembed
;
3718 hdr_rsp
= (mbox_rsp_hdr_t
*)mp
->virt
;
3719 if (hdr_rsp
->status
) {
3720 EMLXS_MSGF(EMLXS_CONTEXT
,
3721 &emlxs_mbox_detail_msg
,
3722 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3723 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
3724 hdr_rsp
->status
, hdr_rsp
->extra_status
);
3726 mb
->mbxStatus
= MBX_NONEMBED_ERROR
;
3731 if (rc
== MBX_TIMEOUT
) {
3732 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
3733 "Timeout. %s: mb=%p tmo=%x Sleep. embedded %d",
3734 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
, tmo
,
3735 ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ? 1 :
3736 (mb4
->un
.varSLIConfig
.be
.embedded
)));
3738 if (mb
->mbxCommand
!= MBX_DOWN_LOAD
3739 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3740 EMLXS_MSGF(EMLXS_CONTEXT
,
3741 &emlxs_mbox_detail_msg
,
3742 "Completed. %s: mb=%p status=%x Sleep. "
3744 emlxs_mb_cmd_xlate(mb
->mbxCommand
), mb
, rc
,
3745 ((mb
->mbxCommand
!= MBX_SLI_CONFIG
) ? 1 :
3746 (mb4
->un
.varSLIConfig
.be
.embedded
)));
3754 } /* emlxs_sli4_issue_mbox_cmd() */
3760 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t
*hba
, MAILBOXQ
*mbq
, int32_t flag
,
3763 emlxs_port_t
*port
= &PPORT
;
3765 mbox_rsp_hdr_t
*hdr_rsp
;
3770 mb
= (MAILBOX
*)mbq
;
3772 mb
->mbxStatus
= MBX_SUCCESS
;
3779 /* Convert tmo seconds to 10 millisecond tics */
3780 tmo_local
= tmo
* 100;
3784 /* Check for hardware error */
3785 if (hba
->flag
& FC_HARDWARE_ERROR
) {
3786 mb
->mbxStatus
= MBX_HARDWARE_ERROR
;
3787 return (MBX_HARDWARE_ERROR
);
3790 /* Initialize mailbox area */
3791 emlxs_mb_init(hba
, mbq
, flag
, tmo
);
3797 rc
= emlxs_sli4_issue_bootstrap(hba
, mb
, tmo_local
);
3799 /* Clean up the mailbox area */
3800 if (rc
== MBX_TIMEOUT
) {
3801 hba
->flag
|= FC_MBOX_TIMEOUT
;
3802 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
3803 emlxs_mb_fini(hba
, NULL
, MBX_TIMEOUT
);
3806 /* Process the result */
3807 if (!(mbq
->flag
& MBQ_PASSTHRU
)) {
3808 if (mbq
->mbox_cmpl
) {
3809 (void) (mbq
->mbox_cmpl
)(hba
, mbq
);
3813 emlxs_mb_fini(hba
, NULL
, mb
->mbxStatus
);
3816 mp
= (MATCHMAP
*)mbq
->nonembed
;
3818 hdr_rsp
= (mbox_rsp_hdr_t
*)mp
->virt
;
3819 if (hdr_rsp
->status
) {
3820 EMLXS_MSGF(EMLXS_CONTEXT
,
3821 &emlxs_mbox_detail_msg
,
3822 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3823 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
3824 hdr_rsp
->status
, hdr_rsp
->extra_status
);
3826 mb
->mbxStatus
= MBX_NONEMBED_ERROR
;
3836 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
3843 emlxs_sli4_prep_fct_iocb(emlxs_port_t
*port
, emlxs_buf_t
*cmd_sbp
, int channel
)
3845 emlxs_hba_t
*hba
= HBA
;
3846 emlxs_config_t
*cfg
= &CFG
;
3848 stmf_data_buf_t
*dbuf
;
3849 scsi_task_t
*fct_task
;
3857 ULP_SGE64 stage_sge
;
3865 ddi_dma_cookie_t
*cp_cmd
;
3867 pkt
= PRIV2PKT(cmd_sbp
);
3869 cp
= (CHANNEL
*)cmd_sbp
->channel
;
3871 iocbq
= &cmd_sbp
->iocbq
;
3872 iocb
= &iocbq
->iocb
;
3875 if (iocb
->ULPCOMMAND
== CMD_ABORT_XRI_CX
) {
3877 ndlp
= cmd_sbp
->node
;
3878 rpip
= EMLXS_NODE_TO_RPI(port
, ndlp
);
3881 /* Use the fabric rpi */
3882 rpip
= port
->vpip
->fabric_rpip
;
3885 /* Next allocate an Exchange for this command */
3886 xrip
= emlxs_sli4_alloc_xri(port
, cmd_sbp
, rpip
,
3887 EMLXS_XRI_SOL_BLS_TYPE
);
3890 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
3891 "Adapter Busy. Unable to allocate exchange. "
3894 return (FC_TRAN_BUSY
);
3897 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
3898 "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
3899 xrip
->XRI
, xrip
->iotag
, cmd_sbp
, pkt
->pkt_cmd_fhdr
.rx_id
);
3901 cmd_sbp
->xrip
= xrip
;
3905 /* Initalize iocbq */
3906 iocbq
->port
= (void *)port
;
3907 iocbq
->node
= (void *)ndlp
;
3908 iocbq
->channel
= (void *)cp
;
3911 * Don't give the abort priority, we want the IOCB
3912 * we are aborting to be processed first.
3914 iocbq
->flag
|= IOCB_SPECIAL
;
3917 bzero((void *)wqe
, sizeof (emlxs_wqe_t
));
3920 wqe
->un
.Abort
.Criteria
= ABORT_XRI_TAG
;
3921 wqe
->RequestTag
= xrip
->iotag
;
3922 wqe
->AbortTag
= pkt
->pkt_cmd_fhdr
.rx_id
;
3923 wqe
->Command
= CMD_ABORT_XRI_CX
;
3924 wqe
->Class
= CLASS3
;
3926 wqe
->CmdType
= WQE_TYPE_ABORT
;
3928 if (hba
->state
>= FC_LINK_UP
) {
3929 wqe
->un
.Abort
.IA
= 0;
3931 wqe
->un
.Abort
.IA
= 1;
3934 /* Set the pkt timer */
3935 cmd_sbp
->ticks
= hba
->timer_tics
+ pkt
->pkt_timeout
+
3936 ((pkt
->pkt_timeout
> 0xff) ? 0 : 10);
3938 return (IOERR_SUCCESS
);
3940 } else if (iocb
->ULPCOMMAND
== CMD_FCP_TRSP64_CX
) {
3942 timeout
= pkt
->pkt_timeout
;
3943 ndlp
= cmd_sbp
->node
;
3945 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
3946 "Unable to find rpi. did=0x%x", did
);
3948 emlxs_set_pkt_state(cmd_sbp
, IOSTAT_LOCAL_REJECT
,
3949 IOERR_INVALID_RPI
, 0);
3955 /* Initalize iocbq */
3956 iocbq
->port
= (void *)port
;
3957 iocbq
->node
= (void *)ndlp
;
3958 iocbq
->channel
= (void *)cp
;
3961 bzero((void *)wqe
, sizeof (emlxs_wqe_t
));
3963 xrip
= emlxs_sli4_register_xri(port
, cmd_sbp
,
3964 pkt
->pkt_cmd_fhdr
.rx_id
, did
);
3967 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
3968 "Unable to register xri %x. did=0x%x",
3969 pkt
->pkt_cmd_fhdr
.rx_id
, did
);
3971 emlxs_set_pkt_state(cmd_sbp
, IOSTAT_LOCAL_REJECT
,
3976 cmd_sbp
->iotag
= xrip
->iotag
;
3977 cmd_sbp
->channel
= cp
;
3979 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3980 cp_cmd
= pkt
->pkt_cmd_cookie
;
3982 cp_cmd
= &pkt
->pkt_cmd_cookie
;
3983 #endif /* >= EMLXS_MODREV3 */
3985 sge_size
= pkt
->pkt_cmdlen
;
3986 /* Make size a multiple of 4 */
3988 sge_size
= (sge_size
+ 3) & 0xfffffffc;
3990 sge_addr
= cp_cmd
->dmac_laddress
;
3991 sge
= xrip
->SGList
.virt
;
3993 stage_sge
.addrHigh
= PADDR_HI(sge_addr
);
3994 stage_sge
.addrLow
= PADDR_LO(sge_addr
);
3995 stage_sge
.length
= sge_size
;
3996 stage_sge
.offset
= 0;
4000 /* Copy staged SGE into SGL */
4001 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
,
4002 (uint8_t *)sge
, sizeof (ULP_SGE64
));
4005 wqe
->un
.FcpCmd
.Payload
.addrHigh
= stage_sge
.addrHigh
;
4006 wqe
->un
.FcpCmd
.Payload
.addrLow
= stage_sge
.addrLow
;
4007 wqe
->un
.FcpCmd
.Payload
.tus
.f
.bdeSize
= sge_size
;
4008 wqe
->un
.FcpCmd
.PayloadLength
= sge_size
;
4011 wqe
->ContextTag
= ndlp
->nlp_Rpi
;
4012 wqe
->XRITag
= xrip
->XRI
;
4015 wqe
->Command
= iocb
->ULPCOMMAND
;
4016 wqe
->Class
= cmd_sbp
->class;
4017 wqe
->ContextType
= WQE_RPI_CONTEXT
;
4018 wqe
->Timer
= ((timeout
> 0xff) ? 0 : timeout
);
4024 wqe
->RequestTag
= xrip
->iotag
;
4025 wqe
->OXId
= (uint16_t)xrip
->rx_id
;
4028 if (xrip
->flag
& EMLXS_XRI_BUSY
) {
4032 if (!(hba
->sli
.sli4
.param
.PHWQ
)) {
4034 wqe
->DBDE
= 1; /* Data type for BDE 0 */
4038 wqe
->CmdType
= WQE_TYPE_TRSP
;
4039 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4041 /* Set the pkt timer */
4042 cmd_sbp
->ticks
= hba
->timer_tics
+ timeout
+
4043 ((timeout
> 0xff) ? 0 : 10);
4045 if (pkt
->pkt_cmdlen
) {
4046 EMLXS_MPDATA_SYNC(pkt
->pkt_cmd_dma
, 0, pkt
->pkt_cmdlen
,
4047 DDI_DMA_SYNC_FORDEV
);
4050 return (IOERR_SUCCESS
);
4053 fct_cmd
= cmd_sbp
->fct_cmd
;
4054 did
= fct_cmd
->cmd_rportid
;
4055 dbuf
= cmd_sbp
->fct_buf
;
4056 fct_task
= (scsi_task_t
*)fct_cmd
->cmd_specific
;
4057 ndlp
= *(emlxs_node_t
**)fct_cmd
->cmd_rp
->rp_fca_private
;
4059 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4060 "Unable to find rpi. did=0x%x", did
);
4062 emlxs_set_pkt_state(cmd_sbp
, IOSTAT_LOCAL_REJECT
,
4063 IOERR_INVALID_RPI
, 0);
4068 /* Initalize iocbq */
4069 iocbq
->port
= (void *) port
;
4070 iocbq
->node
= (void *)ndlp
;
4071 iocbq
->channel
= (void *) cp
;
4074 bzero((void *)wqe
, sizeof (emlxs_wqe_t
));
4076 xrip
= cmd_sbp
->xrip
;
4078 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4079 "Unable to find xri. did=0x%x", did
);
4081 emlxs_set_pkt_state(cmd_sbp
, IOSTAT_LOCAL_REJECT
,
4086 if (emlxs_sli4_register_xri(port
, cmd_sbp
,
4087 xrip
->XRI
, ndlp
->nlp_DID
) == NULL
) {
4088 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4089 "Unable to register xri. did=0x%x", did
);
4091 emlxs_set_pkt_state(cmd_sbp
, IOSTAT_LOCAL_REJECT
,
4095 cmd_sbp
->iotag
= xrip
->iotag
;
4096 cmd_sbp
->channel
= cp
;
4098 if (cfg
[CFG_TIMEOUT_ENABLE
].current
) {
4100 ((2 * hba
->fc_ratov
) < 60) ? 60 : (2 * hba
->fc_ratov
);
4102 timeout
= 0x80000000;
4105 hba
->timer_tics
+ timeout
+ ((timeout
> 0xff) ? 0 : 10);
4109 if (fct_task
->task_flags
& TF_WRITE_DATA
) {
4110 iocb
->ULPCOMMAND
= CMD_FCP_TRECEIVE64_CX
;
4111 wqe
->CmdType
= WQE_TYPE_TRECEIVE
; /* Word 11 */
4113 } else { /* TF_READ_DATA */
4115 iocb
->ULPCOMMAND
= CMD_FCP_TSEND64_CX
;
4116 wqe
->CmdType
= WQE_TYPE_TSEND
; /* Word 11 */
4118 if ((dbuf
->db_data_size
>=
4119 fct_task
->task_expected_xfer_length
)) {
4120 /* enable auto-rsp AP feature */
4122 iocb
->ULPCT
= 0x1; /* for cmpl */
4126 (void) emlxs_sli4_fct_bde_setup(port
, cmd_sbp
);
4129 wqe
->ContextTag
= ndlp
->nlp_Rpi
;
4130 wqe
->XRITag
= xrip
->XRI
;
4133 wqe
->Command
= iocb
->ULPCOMMAND
;
4134 wqe
->Class
= cmd_sbp
->class;
4135 wqe
->ContextType
= WQE_RPI_CONTEXT
;
4136 wqe
->Timer
= ((timeout
> 0xff) ? 0 : timeout
);
4143 wqe
->RequestTag
= xrip
->iotag
;
4144 wqe
->OXId
= (uint16_t)fct_cmd
->cmd_oxid
;
4147 if (xrip
->flag
& EMLXS_XRI_BUSY
) {
4151 if (!(hba
->sli
.sli4
.param
.PHWQ
)) {
4153 wqe
->DBDE
= 1; /* Data type for BDE 0 */
4157 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4160 wqe
->CmdSpecific
= dbuf
->db_data_size
;
4162 return (IOERR_SUCCESS
);
4164 } /* emlxs_sli4_prep_fct_iocb() */
4165 #endif /* SFCT_SUPPORT */
4170 emlxs_sli4_prep_fcp_iocb(emlxs_port_t
*port
, emlxs_buf_t
*sbp
, int channel
)
4172 emlxs_hba_t
*hba
= HBA
;
4185 pkt
= PRIV2PKT(sbp
);
4186 did
= LE_SWAP24_LO(pkt
->pkt_cmd_fhdr
.d_id
);
4187 cp
= &hba
->chan
[channel
];
4189 iocbq
= &sbp
->iocbq
;
4190 iocbq
->channel
= (void *) cp
;
4191 iocbq
->port
= (void *) port
;
4194 iocb
= &iocbq
->iocb
;
4195 bzero((void *)wqe
, sizeof (emlxs_wqe_t
));
4196 bzero((void *)iocb
, sizeof (IOCB
));
4198 /* Find target node object */
4199 node
= (NODELIST
*)iocbq
->node
;
4200 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
4203 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4204 "Unable to find rpi. did=0x%x", did
);
4206 emlxs_set_pkt_state(sbp
, IOSTAT_LOCAL_REJECT
,
4207 IOERR_INVALID_RPI
, 0);
4212 /* Next allocate an Exchange for this command */
4213 xrip
= emlxs_sli4_alloc_xri(port
, sbp
, rpip
,
4214 EMLXS_XRI_SOL_FCP_TYPE
);
4217 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4218 "Adapter Busy. Unable to allocate exchange. did=0x%x", did
);
4220 return (FC_TRAN_BUSY
);
4225 #ifdef DEBUG_FASTPATH
4226 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4227 "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4228 xrip
->XRI
, xrip
->iotag
, xrip
->rx_id
, rpip
->RPI
);
4229 #endif /* DEBUG_FASTPATH */
4231 /* Indicate this is a FCP cmd */
4232 iocbq
->flag
|= IOCB_FCP_CMD
;
4234 if (emlxs_sli4_bde_setup(port
, sbp
)) {
4235 emlxs_sli4_free_xri(port
, sbp
, xrip
, 1);
4236 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4237 "Adapter Busy. Unable to setup SGE. did=0x%x", did
);
4239 return (FC_TRAN_BUSY
);
4244 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4245 "FCP: SGLaddr virt %p phys %p size %d", xrip
->SGList
.virt
,
4246 xrip
->SGList
.phys
, pkt
->pkt_datalen
);
4247 emlxs_data_dump(port
, "FCP: SGL", (uint32_t *)xrip
->SGList
.virt
, 20, 0);
4248 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4249 "FCP: CMD virt %p len %d:%d:%d",
4250 pkt
->pkt_cmd
, pkt
->pkt_cmdlen
, pkt
->pkt_rsplen
, pkt
->pkt_datalen
);
4251 emlxs_data_dump(port
, "FCP: CMD", (uint32_t *)pkt
->pkt_cmd
, 10, 0);
4252 #endif /* DEBUG_FCP */
4254 offset
= (off_t
)((uint64_t)((unsigned long)
4255 xrip
->SGList
.virt
) -
4256 (uint64_t)((unsigned long)
4257 hba
->sli
.sli4
.slim2
.virt
));
4259 EMLXS_MPDATA_SYNC(xrip
->SGList
.dma_handle
, offset
,
4260 xrip
->SGList
.size
, DDI_DMA_SYNC_FORDEV
);
4262 /* if device is FCP-2 device, set the following bit */
4263 /* that says to run the FC-TAPE protocol. */
4264 if (node
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
4268 if (pkt
->pkt_datalen
== 0) {
4269 iocb
->ULPCOMMAND
= CMD_FCP_ICMND64_CR
;
4270 wqe
->Command
= CMD_FCP_ICMND64_CR
;
4271 wqe
->CmdType
= WQE_TYPE_FCP_DATA_IN
;
4272 } else if (pkt
->pkt_tran_type
== FC_PKT_FCP_READ
) {
4273 iocb
->ULPCOMMAND
= CMD_FCP_IREAD64_CR
;
4274 wqe
->Command
= CMD_FCP_IREAD64_CR
;
4275 wqe
->CmdType
= WQE_TYPE_FCP_DATA_IN
;
4276 wqe
->PU
= PARM_XFER_CHECK
;
4278 iocb
->ULPCOMMAND
= CMD_FCP_IWRITE64_CR
;
4279 wqe
->Command
= CMD_FCP_IWRITE64_CR
;
4280 wqe
->CmdType
= WQE_TYPE_FCP_DATA_OUT
;
4282 wqe
->un
.FcpCmd
.TotalTransferCount
= pkt
->pkt_datalen
;
4284 if (!(hba
->sli
.sli4
.param
.PHWQ
)) {
4285 wqe
->DBDE
= 1; /* Data type for BDE 0 */
4287 wqe
->ContextTag
= rpip
->RPI
;
4288 wqe
->ContextType
= WQE_RPI_CONTEXT
;
4289 wqe
->XRITag
= xrip
->XRI
;
4291 ((pkt
->pkt_timeout
> 0xff) ? 0 : pkt
->pkt_timeout
);
4293 if (pkt
->pkt_cmd_fhdr
.f_ctl
& F_CTL_CHAINED_SEQ
) {
4295 wqe
->CCP
= pkt
->pkt_cmd_fhdr
.rsvd
;
4298 switch (FC_TRAN_CLASS(pkt
->pkt_tran_flags
)) {
4299 case FC_TRAN_CLASS2
:
4300 wqe
->Class
= CLASS2
;
4302 case FC_TRAN_CLASS3
:
4304 wqe
->Class
= CLASS3
;
4307 sbp
->class = wqe
->Class
;
4308 wqe
->RequestTag
= iotag
;
4309 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4311 return (FC_SUCCESS
);
4312 } /* emlxs_sli4_prep_fcp_iocb() */
4317 emlxs_sli4_prep_ip_iocb(emlxs_port_t
*port
, emlxs_buf_t
*sbp
)
4319 return (FC_TRAN_BUSY
);
4321 } /* emlxs_sli4_prep_ip_iocb() */
4326 emlxs_sli4_prep_els_iocb(emlxs_port_t
*port
, emlxs_buf_t
*sbp
)
4328 emlxs_hba_t
*hba
= HBA
;
4334 RPIobj_t
*reserved_rpip
= NULL
;
4335 RPIobj_t
*rpip
= NULL
;
4340 ULP_SGE64 stage_sge
;
4342 ddi_dma_cookie_t
*cp_cmd
;
4343 ddi_dma_cookie_t
*cp_resp
;
4347 pkt
= PRIV2PKT(sbp
);
4348 did
= LE_SWAP24_LO(pkt
->pkt_cmd_fhdr
.d_id
);
4350 iocbq
= &sbp
->iocbq
;
4352 iocb
= &iocbq
->iocb
;
4353 bzero((void *)wqe
, sizeof (emlxs_wqe_t
));
4354 bzero((void *)iocb
, sizeof (IOCB
));
4355 cp
= &hba
->chan
[hba
->channel_els
];
4357 /* Initalize iocbq */
4358 iocbq
->port
= (void *) port
;
4359 iocbq
->channel
= (void *) cp
;
4364 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4365 cp_cmd
= pkt
->pkt_cmd_cookie
;
4366 cp_resp
= pkt
->pkt_resp_cookie
;
4368 cp_cmd
= &pkt
->pkt_cmd_cookie
;
4369 cp_resp
= &pkt
->pkt_resp_cookie
;
4370 #endif /* >= EMLXS_MODREV3 */
4374 sge
->addrHigh
= PADDR_HI(cp_cmd
->dmac_laddress
);
4375 sge
->addrLow
= PADDR_LO(cp_cmd
->dmac_laddress
);
4376 sge
->length
= pkt
->pkt_cmdlen
;
4380 cmd
= *((uint32_t *)pkt
->pkt_cmd
);
4381 cmd
&= ELS_CMD_MASK
;
4383 /* Initalize iocb */
4384 if (pkt
->pkt_tran_type
== FC_PKT_OUTBOUND
) {
4388 xrip
= emlxs_sli4_register_xri(port
, sbp
,
4389 pkt
->pkt_cmd_fhdr
.rx_id
, did
);
4392 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_failed_msg
,
4393 "Unable to find XRI. rxid=%x",
4394 pkt
->pkt_cmd_fhdr
.rx_id
);
4396 emlxs_set_pkt_state(sbp
, IOSTAT_LOCAL_REJECT
,
4404 /* This means that we had a node registered */
4405 /* when the unsol request came in but the node */
4406 /* has since been unregistered. */
4407 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_failed_msg
,
4408 "Unable to find RPI. rxid=%x",
4409 pkt
->pkt_cmd_fhdr
.rx_id
);
4411 emlxs_set_pkt_state(sbp
, IOSTAT_LOCAL_REJECT
,
4412 IOERR_INVALID_RPI
, 0);
4416 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4417 "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4418 xrip
->XRI
, xrip
->iotag
, xrip
->rx_id
, rpip
->RPI
);
4420 iocb
->ULPCOMMAND
= CMD_XMIT_ELS_RSP64_CX
;
4421 wqe
->Command
= CMD_XMIT_ELS_RSP64_CX
;
4422 wqe
->CmdType
= WQE_TYPE_GEN
;
4423 if (!(hba
->sli
.sli4
.param
.PHWQ
)) {
4424 wqe
->DBDE
= 1; /* Data type for BDE 0 */
4427 wqe
->un
.ElsRsp
.Payload
.addrHigh
= sge
->addrHigh
;
4428 wqe
->un
.ElsRsp
.Payload
.addrLow
= sge
->addrLow
;
4429 wqe
->un
.ElsRsp
.Payload
.tus
.f
.bdeSize
= pkt
->pkt_cmdlen
;
4430 wqe
->un
.ElsCmd
.PayloadLength
= pkt
->pkt_cmdlen
;
4432 wqe
->un
.ElsRsp
.RemoteId
= did
;
4434 wqe
->OXId
= xrip
->rx_id
;
4437 /* Now sge is fully staged */
4439 sge
= xrip
->SGList
.virt
;
4440 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
, (uint8_t *)sge
,
4441 sizeof (ULP_SGE64
));
4443 if (rpip
->RPI
== FABRIC_RPI
) {
4444 wqe
->ContextTag
= port
->vpip
->VPI
;
4445 wqe
->ContextType
= WQE_VPI_CONTEXT
;
4447 wqe
->ContextTag
= rpip
->RPI
;
4448 wqe
->ContextType
= WQE_RPI_CONTEXT
;
4451 if ((cmd
== ELS_CMD_ACC
) && (sbp
->ucmd
== ELS_CMD_FLOGI
)) {
4452 wqe
->un
.ElsCmd
.SP
= 1;
4453 wqe
->un
.ElsCmd
.LocalId
= 0xFFFFFE;
4459 fcfp
= port
->vpip
->vfip
->fcfp
;
4460 node
= (emlxs_node_t
*)iocbq
->node
;
4461 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
4464 /* Use the fabric rpi */
4465 rpip
= port
->vpip
->fabric_rpip
;
4468 /* Next allocate an Exchange for this command */
4469 xrip
= emlxs_sli4_alloc_xri(port
, sbp
, rpip
,
4470 EMLXS_XRI_SOL_ELS_TYPE
);
4473 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4474 "Adapter Busy. Unable to allocate exchange. "
4477 return (FC_TRAN_BUSY
);
4480 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4481 "ELS: Prep xri=%d iotag=%d rpi=%d",
4482 xrip
->XRI
, xrip
->iotag
, rpip
->RPI
);
4484 iocb
->ULPCOMMAND
= CMD_ELS_REQUEST64_CR
;
4485 wqe
->Command
= CMD_ELS_REQUEST64_CR
;
4486 wqe
->CmdType
= WQE_TYPE_ELS
;
4487 if (!(hba
->sli
.sli4
.param
.PHWQ
)) {
4488 wqe
->DBDE
= 1; /* Data type for BDE 0 */
4491 wqe
->un
.ElsCmd
.Payload
.addrHigh
= sge
->addrHigh
;
4492 wqe
->un
.ElsCmd
.Payload
.addrLow
= sge
->addrLow
;
4493 wqe
->un
.ElsCmd
.Payload
.tus
.f
.bdeSize
= pkt
->pkt_cmdlen
;
4495 wqe
->un
.ElsCmd
.RemoteId
= did
;
4496 wqe
->Timer
= ((pkt
->pkt_timeout
> 0xff) ? 0 : pkt
->pkt_timeout
);
4499 iocb
->un
.elsreq64
.remoteID
= (did
== BCAST_DID
) ? 0 : did
;
4500 iocb
->ULPPU
= 1; /* Wd4 is relative offset */
4504 sge
= xrip
->SGList
.virt
;
4505 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
, (uint8_t *)sge
,
4506 sizeof (ULP_SGE64
));
4508 wqe
->un
.ElsCmd
.PayloadLength
=
4509 pkt
->pkt_cmdlen
; /* Byte offset of rsp data */
4513 sge
->addrHigh
= PADDR_HI(cp_resp
->dmac_laddress
);
4514 sge
->addrLow
= PADDR_LO(cp_resp
->dmac_laddress
);
4515 sge
->length
= pkt
->pkt_rsplen
;
4518 /* Now sge is fully staged */
4520 sge
= xrip
->SGList
.virt
;
4522 BE_SWAP32_BCOPY((uint8_t *)&stage_sge
, (uint8_t *)sge
,
4523 sizeof (ULP_SGE64
));
4525 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4526 "ELS: SGLaddr virt %p phys %p",
4527 xrip
->SGList
.virt
, xrip
->SGList
.phys
);
4528 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4529 "ELS: PAYLOAD virt %p phys %p",
4530 pkt
->pkt_cmd
, cp_cmd
->dmac_laddress
);
4531 emlxs_data_dump(port
, "ELS: SGL", (uint32_t *)xrip
->SGList
.virt
,
4533 #endif /* DEBUG_ELS */
4537 wqe
->un
.ElsCmd
.SP
= 1;
4539 if ((hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) ==
4540 SLI_INTF_IF_TYPE_0
) {
4541 wqe
->ContextTag
= fcfp
->FCFI
;
4542 wqe
->ContextType
= WQE_FCFI_CONTEXT
;
4544 wqe
->ContextTag
= port
->vpip
->VPI
;
4545 wqe
->ContextType
= WQE_VPI_CONTEXT
;
4548 if (hba
->flag
& FC_FIP_SUPPORTED
) {
4549 wqe
->CmdType
|= WQE_TYPE_MASK_FIP
;
4552 if (hba
->topology
== TOPOLOGY_LOOP
) {
4553 wqe
->un
.ElsCmd
.LocalId
= port
->did
;
4556 wqe
->ELSId
= WQE_ELSID_FLOGI
;
4559 wqe
->un
.ElsCmd
.SP
= 1;
4560 wqe
->ContextTag
= port
->vpip
->VPI
;
4561 wqe
->ContextType
= WQE_VPI_CONTEXT
;
4563 if (hba
->flag
& FC_FIP_SUPPORTED
) {
4564 wqe
->CmdType
|= WQE_TYPE_MASK_FIP
;
4567 wqe
->ELSId
= WQE_ELSID_FDISC
;
4570 if ((did
== FABRIC_DID
) &&
4571 (hba
->flag
& FC_FIP_SUPPORTED
)) {
4572 wqe
->CmdType
|= WQE_TYPE_MASK_FIP
;
4575 wqe
->ContextTag
= port
->vpip
->VPI
;
4576 wqe
->ContextType
= WQE_VPI_CONTEXT
;
4577 wqe
->ELSId
= WQE_ELSID_LOGO
;
4580 if (rpip
->RPI
== FABRIC_RPI
) {
4581 if (hba
->flag
& FC_PT_TO_PT
) {
4582 wqe
->un
.ElsCmd
.SP
= 1;
4583 wqe
->un
.ElsCmd
.LocalId
= port
->did
;
4586 wqe
->ContextTag
= port
->vpip
->VPI
;
4587 wqe
->ContextType
= WQE_VPI_CONTEXT
;
4589 wqe
->ContextTag
= rpip
->RPI
;
4590 wqe
->ContextType
= WQE_RPI_CONTEXT
;
4593 wqe
->ELSId
= WQE_ELSID_PLOGI
;
4596 if (rpip
->RPI
== FABRIC_RPI
) {
4597 wqe
->ContextTag
= port
->vpip
->VPI
;
4598 wqe
->ContextType
= WQE_VPI_CONTEXT
;
4600 wqe
->ContextTag
= rpip
->RPI
;
4601 wqe
->ContextType
= WQE_RPI_CONTEXT
;
4604 wqe
->ELSId
= WQE_ELSID_CMD
;
4609 /* This allows fct to abort the request */
4611 sbp
->fct_cmd
->cmd_oxid
= xrip
->XRI
;
4612 sbp
->fct_cmd
->cmd_rxid
= 0xFFFF;
4614 #endif /* SFCT_SUPPORT */
4617 if (wqe
->ContextType
== WQE_VPI_CONTEXT
) {
4618 reserved_rpip
= emlxs_rpi_reserve_notify(port
, did
, xrip
);
4620 if (!reserved_rpip
) {
4621 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_failed_msg
,
4622 "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4623 pkt
->pkt_cmd_fhdr
.rx_id
);
4625 emlxs_set_pkt_state(sbp
, IOSTAT_LOCAL_REJECT
,
4626 IOERR_INVALID_RPI
, 0);
4630 /* Store the reserved rpi */
4631 if (wqe
->Command
== CMD_ELS_REQUEST64_CR
) {
4632 wqe
->OXId
= reserved_rpip
->RPI
;
4634 wqe
->CmdSpecific
= reserved_rpip
->RPI
;
4638 offset
= (off_t
)((uint64_t)((unsigned long)
4639 xrip
->SGList
.virt
) -
4640 (uint64_t)((unsigned long)
4641 hba
->sli
.sli4
.slim2
.virt
));
4643 EMLXS_MPDATA_SYNC(xrip
->SGList
.dma_handle
, offset
,
4644 xrip
->SGList
.size
, DDI_DMA_SYNC_FORDEV
);
4646 if (pkt
->pkt_cmd_fhdr
.f_ctl
& F_CTL_CHAINED_SEQ
) {
4648 wqe
->CCP
= pkt
->pkt_cmd_fhdr
.rsvd
;
4651 switch (FC_TRAN_CLASS(pkt
->pkt_tran_flags
)) {
4652 case FC_TRAN_CLASS2
:
4653 wqe
->Class
= CLASS2
;
4655 case FC_TRAN_CLASS3
:
4657 wqe
->Class
= CLASS3
;
4660 sbp
->class = wqe
->Class
;
4661 wqe
->XRITag
= xrip
->XRI
;
4662 wqe
->RequestTag
= xrip
->iotag
;
4663 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4664 return (FC_SUCCESS
);
4666 } /* emlxs_sli4_prep_els_iocb() */
4671 emlxs_sli4_prep_ct_iocb(emlxs_port_t
*port
, emlxs_buf_t
*sbp
)
4673 emlxs_hba_t
*hba
= HBA
;
4678 NODELIST
*node
= NULL
;
4685 pkt
= PRIV2PKT(sbp
);
4686 did
= LE_SWAP24_LO(pkt
->pkt_cmd_fhdr
.d_id
);
4688 iocbq
= &sbp
->iocbq
;
4690 iocb
= &iocbq
->iocb
;
4691 bzero((void *)wqe
, sizeof (emlxs_wqe_t
));
4692 bzero((void *)iocb
, sizeof (IOCB
));
4694 cp
= &hba
->chan
[hba
->channel_ct
];
4696 iocbq
->port
= (void *) port
;
4697 iocbq
->channel
= (void *) cp
;
4703 if (pkt
->pkt_tran_type
== FC_PKT_OUTBOUND
) {
4707 xrip
= emlxs_sli4_register_xri(port
, sbp
,
4708 pkt
->pkt_cmd_fhdr
.rx_id
, did
);
4711 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_failed_msg
,
4712 "Unable to find XRI. rxid=%x",
4713 pkt
->pkt_cmd_fhdr
.rx_id
);
4715 emlxs_set_pkt_state(sbp
, IOSTAT_LOCAL_REJECT
,
4723 /* This means that we had a node registered */
4724 /* when the unsol request came in but the node */
4725 /* has since been unregistered. */
4726 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_failed_msg
,
4727 "Unable to find RPI. rxid=%x",
4728 pkt
->pkt_cmd_fhdr
.rx_id
);
4730 emlxs_set_pkt_state(sbp
, IOSTAT_LOCAL_REJECT
,
4731 IOERR_INVALID_RPI
, 0);
4735 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4736 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4737 xrip
->XRI
, xrip
->iotag
, xrip
->rx_id
, rpip
->RPI
);
4739 if (emlxs_sli4_bde_setup(port
, sbp
)) {
4740 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4741 "Adapter Busy. Unable to setup SGE. did=0x%x", did
);
4743 return (FC_TRAN_BUSY
);
4746 if (!(hba
->model_info
.chip
& EMLXS_BE_CHIPS
)) {
4747 wqe
->un
.XmitSeq
.Rsvd0
= 0; /* Word3 now reserved */
4750 if (!(hba
->sli
.sli4
.param
.PHWQ
)) {
4751 wqe
->DBDE
= 1; /* Data type for BDE 0 */
4754 iocb
->ULPCOMMAND
= CMD_XMIT_SEQUENCE64_CR
;
4755 wqe
->CmdType
= WQE_TYPE_GEN
;
4756 wqe
->Command
= CMD_XMIT_SEQUENCE64_CR
;
4759 if (((SLI_CT_REQUEST
*) pkt
->pkt_cmd
)->CommandResponse
.bits
.
4760 CmdRsp
== (LE_SWAP16(SLI_CT_LOOPBACK
))) {
4761 wqe
->un
.XmitSeq
.xo
= 1;
4763 wqe
->un
.XmitSeq
.xo
= 0;
4766 if (pkt
->pkt_cmd_fhdr
.f_ctl
& F_CTL_LAST_SEQ
) {
4767 wqe
->un
.XmitSeq
.ls
= 1;
4770 if (pkt
->pkt_cmd_fhdr
.f_ctl
& F_CTL_SEQ_INITIATIVE
) {
4771 wqe
->un
.XmitSeq
.si
= 1;
4774 wqe
->un
.XmitSeq
.DFctl
= pkt
->pkt_cmd_fhdr
.df_ctl
;
4775 wqe
->un
.XmitSeq
.Rctl
= pkt
->pkt_cmd_fhdr
.r_ctl
;
4776 wqe
->un
.XmitSeq
.Type
= pkt
->pkt_cmd_fhdr
.type
;
4777 wqe
->OXId
= xrip
->rx_id
;
4778 wqe
->XC
= 0; /* xri_tag is a new exchange */
4779 wqe
->CmdSpecific
= wqe
->un
.GenReq
.Payload
.tus
.f
.bdeSize
;
4784 node
= (emlxs_node_t
*)iocbq
->node
;
4785 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
4788 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_failed_msg
,
4789 "Unable to find rpi. did=0x%x rpi=%d",
4790 did
, node
->nlp_Rpi
);
4792 emlxs_set_pkt_state(sbp
, IOSTAT_LOCAL_REJECT
,
4793 IOERR_INVALID_RPI
, 0);
4797 /* Next allocate an Exchange for this command */
4798 xrip
= emlxs_sli4_alloc_xri(port
, sbp
, rpip
,
4799 EMLXS_XRI_SOL_CT_TYPE
);
4802 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4803 "Adapter Busy. Unable to allocate exchange. "
4806 return (FC_TRAN_BUSY
);
4809 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4810 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4811 xrip
->XRI
, xrip
->iotag
, xrip
->rx_id
, rpip
->RPI
);
4813 if (emlxs_sli4_bde_setup(port
, sbp
)) {
4814 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
4815 "Adapter Busy. Unable to setup SGE. did=0x%x", did
);
4817 emlxs_sli4_free_xri(port
, sbp
, xrip
, 1);
4818 return (FC_TRAN_BUSY
);
4821 if (!(hba
->sli
.sli4
.param
.PHWQ
)) {
4822 wqe
->DBDE
= 1; /* Data type for BDE 0 */
4825 iocb
->ULPCOMMAND
= CMD_GEN_REQUEST64_CR
;
4826 wqe
->CmdType
= WQE_TYPE_GEN
;
4827 wqe
->Command
= CMD_GEN_REQUEST64_CR
;
4828 wqe
->un
.GenReq
.la
= 1;
4829 wqe
->un
.GenReq
.DFctl
= pkt
->pkt_cmd_fhdr
.df_ctl
;
4830 wqe
->un
.GenReq
.Rctl
= pkt
->pkt_cmd_fhdr
.r_ctl
;
4831 wqe
->un
.GenReq
.Type
= pkt
->pkt_cmd_fhdr
.type
;
4834 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4835 "CT: SGLaddr virt %p phys %p", xrip
->SGList
.virt
,
4837 emlxs_data_dump(port
, "CT: SGL", (uint32_t *)xrip
->SGList
.virt
,
4839 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4840 "CT: CMD virt %p len %d:%d",
4841 pkt
->pkt_cmd
, pkt
->pkt_cmdlen
, pkt
->pkt_rsplen
);
4842 emlxs_data_dump(port
, "CT: DATA", (uint32_t *)pkt
->pkt_cmd
,
4844 #endif /* DEBUG_CT */
4847 /* This allows fct to abort the request */
4849 sbp
->fct_cmd
->cmd_oxid
= xrip
->XRI
;
4850 sbp
->fct_cmd
->cmd_rxid
= 0xFFFF;
4852 #endif /* SFCT_SUPPORT */
4856 iocb
->un
.genreq64
.w5
.hcsw
.Rctl
= pkt
->pkt_cmd_fhdr
.r_ctl
;
4857 iocb
->un
.genreq64
.w5
.hcsw
.Type
= pkt
->pkt_cmd_fhdr
.type
;
4858 iocb
->un
.genreq64
.w5
.hcsw
.Dfctl
= pkt
->pkt_cmd_fhdr
.df_ctl
;
4859 iocb
->ULPPU
= 1; /* Wd4 is relative offset */
4861 offset
= (off_t
)((uint64_t)((unsigned long)
4862 xrip
->SGList
.virt
) -
4863 (uint64_t)((unsigned long)
4864 hba
->sli
.sli4
.slim2
.virt
));
4866 EMLXS_MPDATA_SYNC(xrip
->SGList
.dma_handle
, offset
,
4867 xrip
->SGList
.size
, DDI_DMA_SYNC_FORDEV
);
4869 wqe
->ContextTag
= rpip
->RPI
;
4870 wqe
->ContextType
= WQE_RPI_CONTEXT
;
4871 wqe
->XRITag
= xrip
->XRI
;
4872 wqe
->Timer
= ((pkt
->pkt_timeout
> 0xff) ? 0 : pkt
->pkt_timeout
);
4874 if (pkt
->pkt_cmd_fhdr
.f_ctl
& F_CTL_CHAINED_SEQ
) {
4876 wqe
->CCP
= pkt
->pkt_cmd_fhdr
.rsvd
;
4879 switch (FC_TRAN_CLASS(pkt
->pkt_tran_flags
)) {
4880 case FC_TRAN_CLASS2
:
4881 wqe
->Class
= CLASS2
;
4883 case FC_TRAN_CLASS3
:
4885 wqe
->Class
= CLASS3
;
4888 sbp
->class = wqe
->Class
;
4889 wqe
->RequestTag
= xrip
->iotag
;
4890 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4891 return (FC_SUCCESS
);
4893 } /* emlxs_sli4_prep_ct_iocb() */
4898 emlxs_sli4_read_eq(emlxs_hba_t
*hba
, EQ_DESC_t
*eq
)
4905 mutex_enter(&EMLXS_PORT_LOCK
);
4907 ptr
= eq
->addr
.virt
;
4908 ptr
+= eq
->host_index
;
4910 offset
= (off_t
)((uint64_t)((unsigned long)
4912 (uint64_t)((unsigned long)
4913 hba
->sli
.sli4
.slim2
.virt
));
4915 EMLXS_MPDATA_SYNC(eq
->addr
.dma_handle
, offset
,
4916 4096, DDI_DMA_SYNC_FORKERNEL
);
4919 eqe
.word
= BE_SWAP32(eqe
.word
);
4921 if (eqe
.word
& EQE_VALID
) {
4925 mutex_exit(&EMLXS_PORT_LOCK
);
4929 } /* emlxs_sli4_read_eq */
4933 emlxs_sli4_poll_intr(emlxs_hba_t
*hba
)
4937 char arg
[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
4939 /* Check attention bits once and process if required */
4941 for (i
= 0; i
< hba
->intr_count
; i
++) {
4942 rc
= emlxs_sli4_read_eq(hba
, &hba
->sli
.sli4
.eq
[i
]);
4952 (void) emlxs_sli4_msi_intr((char *)hba
,
4953 (char *)(unsigned long)arg
[i
]);
4957 } /* emlxs_sli4_poll_intr() */
4962 emlxs_sli4_process_async_event(emlxs_hba_t
*hba
, CQE_ASYNC_t
*cqe
)
4964 emlxs_port_t
*port
= &PPORT
;
4967 /* Save the event tag */
4968 if (hba
->link_event_tag
== cqe
->un
.link
.event_tag
) {
4969 HBASTATS
.LinkMultiEvent
++;
4970 } else if (hba
->link_event_tag
+ 1 < cqe
->un
.link
.event_tag
) {
4971 HBASTATS
.LinkMultiEvent
++;
4973 hba
->link_event_tag
= cqe
->un
.link
.event_tag
;
4975 switch (cqe
->event_code
) {
4976 case ASYNC_EVENT_CODE_FCOE_LINK_STATE
:
4977 HBASTATS
.LinkEvent
++;
4979 switch (cqe
->un
.link
.link_status
) {
4980 case ASYNC_EVENT_PHYS_LINK_UP
:
4981 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4982 "Link Async Event: PHYS_LINK_UP. val=%d "
4984 cqe
->valid
, cqe
->event_type
, HBASTATS
.LinkEvent
);
4987 case ASYNC_EVENT_LOGICAL_LINK_UP
:
4988 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4989 "Link Async Event: LOGICAL_LINK_UP. val=%d "
4991 cqe
->valid
, cqe
->event_type
, HBASTATS
.LinkEvent
);
4993 emlxs_sli4_handle_fcoe_link_event(hba
, cqe
);
4996 case ASYNC_EVENT_PHYS_LINK_DOWN
:
4997 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
4998 "Link Async Event: PHYS_LINK_DOWN. val=%d "
5000 cqe
->valid
, cqe
->event_type
, HBASTATS
.LinkEvent
);
5002 emlxs_sli4_handle_fcoe_link_event(hba
, cqe
);
5005 case ASYNC_EVENT_LOGICAL_LINK_DOWN
:
5006 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5007 "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5009 cqe
->valid
, cqe
->event_type
, HBASTATS
.LinkEvent
);
5011 emlxs_sli4_handle_fcoe_link_event(hba
, cqe
);
5014 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5015 "Link Async Event: Unknown link status=%d event=%x",
5016 cqe
->un
.link
.link_status
, HBASTATS
.LinkEvent
);
5020 case ASYNC_EVENT_CODE_FCOE_FIP
:
5021 switch (cqe
->un
.fcoe
.evt_type
) {
5022 case ASYNC_EVENT_NEW_FCF_DISC
:
5023 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5024 "FIP Async Event: FCF_FOUND %d:%d",
5025 cqe
->un
.fcoe
.ref_index
, cqe
->un
.fcoe
.fcf_count
);
5027 (void) emlxs_fcf_found_notify(port
,
5028 cqe
->un
.fcoe
.ref_index
);
5030 case ASYNC_EVENT_FCF_TABLE_FULL
:
5031 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5032 "FIP Async Event: FCFTAB_FULL %d:%d",
5033 cqe
->un
.fcoe
.ref_index
, cqe
->un
.fcoe
.fcf_count
);
5035 (void) emlxs_fcf_full_notify(port
);
5037 case ASYNC_EVENT_FCF_DEAD
:
5038 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5039 "FIP Async Event: FCF_LOST %d:%d",
5040 cqe
->un
.fcoe
.ref_index
, cqe
->un
.fcoe
.fcf_count
);
5042 (void) emlxs_fcf_lost_notify(port
,
5043 cqe
->un
.fcoe
.ref_index
);
5045 case ASYNC_EVENT_VIRT_LINK_CLEAR
:
5046 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5047 "FIP Async Event: CVL %d",
5048 cqe
->un
.fcoe
.ref_index
);
5050 (void) emlxs_fcf_cvl_notify(port
,
5051 emlxs_sli4_vpi_to_index(hba
,
5052 cqe
->un
.fcoe
.ref_index
));
5055 case ASYNC_EVENT_FCF_MODIFIED
:
5056 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5057 "FIP Async Event: FCF_CHANGED %d",
5058 cqe
->un
.fcoe
.ref_index
);
5060 (void) emlxs_fcf_changed_notify(port
,
5061 cqe
->un
.fcoe
.ref_index
);
5064 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5065 "FIP Async Event: Unknown event type=%d",
5066 cqe
->un
.fcoe
.evt_type
);
5070 case ASYNC_EVENT_CODE_DCBX
:
5071 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5072 "DCBX Async Event: type=%d. Not supported.",
5075 case ASYNC_EVENT_CODE_GRP_5
:
5076 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5077 "Group 5 Async Event: type=%d.", cqe
->event_type
);
5078 if (cqe
->event_type
== ASYNC_EVENT_QOS_SPEED
) {
5079 hba
->qos_linkspeed
= cqe
->un
.qos
.qos_link_speed
;
5082 case ASYNC_EVENT_CODE_FC_EVENT
:
5083 switch (cqe
->event_type
) {
5084 case ASYNC_EVENT_FC_LINK_ATT
:
5085 HBASTATS
.LinkEvent
++;
5087 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5088 "FC Async Event: Link Attention. event=%x",
5089 HBASTATS
.LinkEvent
);
5091 emlxs_sli4_handle_fc_link_att(hba
, cqe
);
5093 case ASYNC_EVENT_FC_SHARED_LINK_ATT
:
5094 HBASTATS
.LinkEvent
++;
5096 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5097 "FC Async Event: Shared Link Attention. event=%x",
5098 HBASTATS
.LinkEvent
);
5100 emlxs_sli4_handle_fc_link_att(hba
, cqe
);
5103 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5104 "FC Async Event: Unknown event. type=%d event=%x",
5105 cqe
->event_type
, HBASTATS
.LinkEvent
);
5108 case ASYNC_EVENT_CODE_PORT
:
5109 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5110 "SLI Port Async Event: type=%d", cqe
->event_type
);
5111 if (cqe
->event_type
== ASYNC_EVENT_MISCONFIG_PORT
) {
5112 *((uint32_t *)cqe
->un
.port
.link_status
) =
5113 BE_SWAP32(*((uint32_t *)cqe
->un
.port
.link_status
));
5115 cqe
->un
.port
.link_status
[hba
->sli
.sli4
.link_number
];
5122 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
5123 "SLI Port Async Event: Physical media not "
5126 "^%s%d: Optics faulted/incorrectly "
5127 "installed/not installed - Reseat optics, "
5128 "if issue not resolved, replace.",
5129 DRIVER_NAME
, hba
->ddiinst
);
5133 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
5134 "SLI Port Async Event: Wrong physical "
5137 "^%s%d: Optics of two types installed - "
5138 "Remove one optic or install matching"
5140 DRIVER_NAME
, hba
->ddiinst
);
5144 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
5145 "SLI Port Async Event: Unsupported "
5146 "physical media detected");
5148 "^%s%d: Incompatible optics - Replace "
5149 "with compatible optics for card to "
5151 DRIVER_NAME
, hba
->ddiinst
);
5155 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
5156 "SLI Port Async Event: Physical media "
5157 "error, status=%x", status
);
5159 "^%s%d: Misconfigured port: status=0x%x - "
5160 "Check optics on card.",
5161 DRIVER_NAME
, hba
->ddiinst
, status
);
5166 case ASYNC_EVENT_CODE_VF
:
5167 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5168 "VF Async Event: type=%d",
5171 case ASYNC_EVENT_CODE_MR
:
5172 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5173 "MR Async Event: type=%d",
5177 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5178 "Unknown Async Event: code=%d type=%d.",
5179 cqe
->event_code
, cqe
->event_type
);
5183 } /* emlxs_sli4_process_async_event() */
5188 emlxs_sli4_process_mbox_event(emlxs_hba_t
*hba
, CQE_MBOX_t
*cqe
)
5190 emlxs_port_t
*port
= &PPORT
;
5193 MATCHMAP
*mbox_nonembed
;
5194 MAILBOXQ
*mbq
= NULL
;
5200 if (cqe
->consumed
&& !cqe
->completed
) {
5201 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5202 "CQ ENTRY: Mbox event. Entry consumed but not completed");
5206 mutex_enter(&EMLXS_PORT_LOCK
);
5207 switch (hba
->mbox_queue_flag
) {
5209 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_stray_mbox_intr_msg
,
5210 "CQ ENTRY: Mbox event. No mailbox active.");
5212 mutex_exit(&EMLXS_PORT_LOCK
);
5217 /* Mark mailbox complete, this should wake up any polling */
5218 /* threads. This can happen if interrupts are enabled while */
5219 /* a polled mailbox command is outstanding. If we don't set */
5220 /* MBQ_COMPLETED here, the polling thread may wait until */
5221 /* timeout error occurs */
5223 mutex_enter(&EMLXS_MBOX_LOCK
);
5224 mbq
= (MAILBOXQ
*)hba
->mbox_mbq
;
5226 port
= (emlxs_port_t
*)mbq
->port
;
5227 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
5228 "CQ ENTRY: Mbox event. Completing Polled command.");
5229 mbq
->flag
|= MBQ_COMPLETED
;
5231 mutex_exit(&EMLXS_MBOX_LOCK
);
5233 mutex_exit(&EMLXS_PORT_LOCK
);
5238 /* Check mbox_timer, it acts as a service flag too */
5239 /* The first to service the mbox queue will clear the timer */
5240 if (hba
->mbox_timer
) {
5241 hba
->mbox_timer
= 0;
5243 mutex_enter(&EMLXS_MBOX_LOCK
);
5244 mbq
= (MAILBOXQ
*)hba
->mbox_mbq
;
5245 mutex_exit(&EMLXS_MBOX_LOCK
);
5249 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
5250 "Mailbox event. No service required.");
5251 mutex_exit(&EMLXS_PORT_LOCK
);
5255 mb
= (MAILBOX4
*)mbq
;
5256 mutex_exit(&EMLXS_PORT_LOCK
);
5260 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_completion_error_msg
,
5261 "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5262 hba
->mbox_queue_flag
);
5264 mutex_exit(&EMLXS_PORT_LOCK
);
5268 /* Set port context */
5269 port
= (emlxs_port_t
*)mbq
->port
;
5271 offset
= (off_t
)((uint64_t)((unsigned long)
5272 hba
->sli
.sli4
.mq
.addr
.virt
) -
5273 (uint64_t)((unsigned long)
5274 hba
->sli
.sli4
.slim2
.virt
));
5276 /* Now that we are the owner, DMA Sync entire MQ if needed */
5277 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.mq
.addr
.dma_handle
, offset
,
5278 4096, DDI_DMA_SYNC_FORDEV
);
5280 BE_SWAP32_BCOPY((uint8_t *)hba
->mbox_mqe
, (uint8_t *)mb
,
5281 MAILBOX_CMD_SLI4_BSIZE
);
5283 if (mb
->mbxCommand
!= MBX_HEARTBEAT
) {
5284 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5285 "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5286 mb
->mbxStatus
, mb
->mbxCommand
);
5288 emlxs_data_dump(port
, "MBOX CMP", (uint32_t *)hba
->mbox_mqe
,
5292 if (mb
->mbxCommand
== MBX_SLI_CONFIG
) {
5293 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5294 "Mbox sge_cnt: %d length: %d embed: %d",
5295 mb
->un
.varSLIConfig
.be
.sge_cnt
,
5296 mb
->un
.varSLIConfig
.be
.payload_length
,
5297 mb
->un
.varSLIConfig
.be
.embedded
);
5300 /* Now sync the memory buffer if one was used */
5302 mbox_bp
= (MATCHMAP
*)mbq
->bp
;
5303 EMLXS_MPDATA_SYNC(mbox_bp
->dma_handle
, 0, mbox_bp
->size
,
5304 DDI_DMA_SYNC_FORKERNEL
);
5306 if (emlxs_fm_check_dma_handle(hba
, mbox_bp
->dma_handle
)
5308 EMLXS_MSGF(EMLXS_CONTEXT
,
5309 &emlxs_invalid_dma_handle_msg
,
5310 "sli4_process_mbox_event: hdl=%p",
5311 mbox_bp
->dma_handle
);
5313 mb
->mbxStatus
= MBXERR_DMA_ERROR
;
5318 /* Now sync the memory buffer if one was used */
5319 if (mbq
->nonembed
) {
5320 mbox_nonembed
= (MATCHMAP
*)mbq
->nonembed
;
5321 size
= mbox_nonembed
->size
;
5322 EMLXS_MPDATA_SYNC(mbox_nonembed
->dma_handle
, 0, size
,
5323 DDI_DMA_SYNC_FORKERNEL
);
5324 iptr
= (uint32_t *)((uint8_t *)mbox_nonembed
->virt
);
5325 BE_SWAP32_BCOPY((uint8_t *)iptr
, (uint8_t *)iptr
, size
);
5328 if (emlxs_fm_check_dma_handle(hba
,
5329 mbox_nonembed
->dma_handle
) != DDI_FM_OK
) {
5330 EMLXS_MSGF(EMLXS_CONTEXT
,
5331 &emlxs_invalid_dma_handle_msg
,
5332 "sli4_process_mbox_event: hdl=%p",
5333 mbox_nonembed
->dma_handle
);
5335 mb
->mbxStatus
= MBXERR_DMA_ERROR
;
5338 emlxs_data_dump(port
, "EXT AREA", (uint32_t *)iptr
, 24, 0);
5341 /* Mailbox has been completely received at this point */
5343 if (mb
->mbxCommand
== MBX_HEARTBEAT
) {
5344 hba
->heartbeat_active
= 0;
5348 if (hba
->mbox_queue_flag
== MBX_SLEEP
) {
5349 if (mb
->mbxCommand
!= MBX_DOWN_LOAD
5350 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5351 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
5352 "Received. %s: status=%x Sleep.",
5353 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
5357 if (mb
->mbxCommand
!= MBX_DOWN_LOAD
5358 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5359 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
5360 "Completed. %s: status=%x",
5361 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
5366 /* Filter out passthru mailbox */
5367 if (mbq
->flag
& MBQ_PASSTHRU
) {
5371 if (mb
->mbxStatus
) {
5372 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_detail_msg
,
5373 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb
->mbxCommand
),
5374 (uint32_t)mb
->mbxStatus
);
5377 if (mbq
->mbox_cmpl
) {
5378 rc
= (mbq
->mbox_cmpl
)(hba
, mbq
);
5380 /* If mbox was retried, return immediately */
5388 /* Clean up the mailbox area */
5389 emlxs_mb_fini(hba
, (MAILBOX
*)mb
, mb
->mbxStatus
);
5391 /* Attempt to send pending mailboxes */
5392 mbq
= (MAILBOXQ
*)emlxs_mb_get(hba
);
5394 /* Attempt to send pending mailboxes */
5395 rc
= emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_NOWAIT
, 0);
5396 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
5397 emlxs_mem_put(hba
, MEM_MBOX
, (void *)mbq
);
5402 } /* emlxs_sli4_process_mbox_event() */
5407 emlxs_CQE_to_IOCB(emlxs_hba_t
*hba
, CQE_CmplWQ_t
*cqe
, emlxs_buf_t
*sbp
)
5409 #ifdef DEBUG_FASTPATH
5410 emlxs_port_t
*port
= &PPORT
;
5411 #endif /* DEBUG_FASTPATH */
5418 iocbq
= &sbp
->iocbq
;
5420 iocb
= &iocbq
->iocb
;
5422 #ifdef DEBUG_FASTPATH
5423 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5424 "CQE to IOCB: cmd:%x tag:%x xri:%d", wqe
->Command
,
5425 wqe
->RequestTag
, wqe
->XRITag
);
5426 #endif /* DEBUG_FASTPATH */
5428 iocb
->ULPSTATUS
= cqe
->Status
;
5429 iocb
->un
.ulpWord
[4] = cqe
->Parameter
;
5430 iocb
->ULPIOTAG
= cqe
->RequestTag
;
5431 iocb
->ULPCONTEXT
= wqe
->XRITag
;
5433 switch (wqe
->Command
) {
5435 case CMD_FCP_ICMND64_CR
:
5436 iocb
->ULPCOMMAND
= CMD_FCP_ICMND64_CX
;
5439 case CMD_FCP_IREAD64_CR
:
5440 iocb
->ULPCOMMAND
= CMD_FCP_IREAD64_CX
;
5441 iocb
->ULPPU
= PARM_XFER_CHECK
;
5442 if (iocb
->ULPSTATUS
== IOSTAT_FCP_RSP_ERROR
) {
5443 iocb
->un
.fcpi64
.fcpi_parm
=
5444 wqe
->un
.FcpCmd
.TotalTransferCount
-
5449 case CMD_FCP_IWRITE64_CR
:
5450 iocb
->ULPCOMMAND
= CMD_FCP_IWRITE64_CX
;
5451 if (iocb
->ULPSTATUS
== IOSTAT_FCP_RSP_ERROR
) {
5452 if (wqe
->un
.FcpCmd
.TotalTransferCount
>
5454 iocb
->un
.fcpi64
.fcpi_parm
=
5455 wqe
->un
.FcpCmd
.TotalTransferCount
-
5458 iocb
->un
.fcpi64
.fcpi_parm
= 0;
5463 case CMD_ELS_REQUEST64_CR
:
5464 iocb
->ULPCOMMAND
= CMD_ELS_REQUEST64_CX
;
5465 iocb
->un
.elsreq64
.bdl
.bdeSize
= cqe
->CmdSpecific
;
5466 if (iocb
->ULPSTATUS
== 0) {
5467 iocb
->unsli3
.ext_iocb
.rsplen
= cqe
->CmdSpecific
;
5469 if (iocb
->ULPSTATUS
== IOSTAT_LS_RJT
) {
5470 /* For LS_RJT, the driver populates the rsp buffer */
5471 pkt
= PRIV2PKT(sbp
);
5472 iptr
= (uint32_t *)pkt
->pkt_resp
;
5473 *iptr
++ = ELS_CMD_LS_RJT
;
5474 *iptr
= cqe
->Parameter
;
5478 case CMD_GEN_REQUEST64_CR
:
5479 iocb
->ULPCOMMAND
= CMD_GEN_REQUEST64_CX
;
5480 iocb
->unsli3
.ext_iocb
.rsplen
= cqe
->CmdSpecific
;
5483 case CMD_XMIT_SEQUENCE64_CR
:
5484 iocb
->ULPCOMMAND
= CMD_XMIT_SEQUENCE64_CX
;
5487 case CMD_ABORT_XRI_CX
:
5488 iocb
->ULPCONTEXT
= wqe
->AbortTag
;
5491 case CMD_FCP_TRECEIVE64_CX
:
5492 /* free memory for XRDY */
5494 emlxs_mem_buf_free(hba
, iocbq
->bp
);
5500 case CMD_FCP_TSEND64_CX
:
5501 case CMD_FCP_TRSP64_CX
:
5503 iocb
->ULPCOMMAND
= wqe
->Command
;
5506 } /* emlxs_CQE_to_IOCB() */
5511 emlxs_sli4_hba_flush_chipq(emlxs_hba_t
*hba
)
5513 emlxs_port_t
*port
= &PPORT
;
5518 uint32_t trigger
= 0;
5521 mutex_enter(&EMLXS_FCTAB_LOCK
);
5522 for (i
= 0; i
< hba
->max_iotag
; i
++) {
5523 sbp
= hba
->fc_table
[i
];
5524 if (sbp
== NULL
|| sbp
== STALE_PACKET
) {
5527 hba
->fc_table
[i
] = STALE_PACKET
;
5530 mutex_exit(&EMLXS_FCTAB_LOCK
);
5533 bzero(&cqe
, sizeof (CQE_CmplWQ_t
));
5535 cqe
.Status
= IOSTAT_LOCAL_REJECT
;
5536 cqe
.Parameter
= IOERR_SEQUENCE_TIMEOUT
;
5538 cp
->hbaCmplCmd_sbp
++;
5543 emlxs_fct_io_trace(port
, sbp
->fct_cmd
,
5544 EMLXS_FCT_IOCB_COMPLETE
);
5546 #endif /* FCT_IO_TRACE */
5547 #endif /* SFCT_SUPPORT */
5549 if (sbp
->pkt_flags
& PACKET_IN_CHIPQ
) {
5550 atomic_dec_32(&hba
->io_active
);
5551 #ifdef NODE_THROTTLE_SUPPORT
5553 atomic_dec_32(&sbp
->node
->io_active
);
5555 #endif /* NODE_THROTTLE_SUPPORT */
5558 /* Copy entry to sbp's iocbq */
5559 iocbq
= &sbp
->iocbq
;
5560 emlxs_CQE_to_IOCB(hba
, &cqe
, sbp
);
5564 /* Exchange is no longer busy on-chip, free it */
5565 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 1);
5567 if (!(sbp
->pkt_flags
&
5568 (PACKET_POLLED
| PACKET_ALLOCATED
))) {
5569 /* Add the IOCB to the channel list */
5570 mutex_enter(&cp
->rsp_lock
);
5571 if (cp
->rsp_head
== NULL
) {
5572 cp
->rsp_head
= iocbq
;
5573 cp
->rsp_tail
= iocbq
;
5575 cp
->rsp_tail
->next
= iocbq
;
5576 cp
->rsp_tail
= iocbq
;
5578 mutex_exit(&cp
->rsp_lock
);
5581 emlxs_proc_channel_event(hba
, cp
, iocbq
);
5583 mutex_enter(&EMLXS_FCTAB_LOCK
);
5585 mutex_exit(&EMLXS_FCTAB_LOCK
);
5588 for (i
= 0; i
< hba
->chan_count
; i
++) {
5590 if (cp
->rsp_head
!= NULL
) {
5591 emlxs_thread_trigger2(&cp
->intr_thread
,
5592 emlxs_proc_channel
, cp
);
5597 } /* emlxs_sli4_hba_flush_chipq() */
5602 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t
*hba
,
5603 CQ_DESC_t
*cq
, CQE_CmplWQ_t
*cqe
)
5605 emlxs_port_t
*port
= &PPORT
;
5607 uint16_t request_tag
;
5609 request_tag
= cqe
->RequestTag
;
5611 /* 1 to 1 mapping between CQ and channel */
5616 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5617 "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag
);
5619 emlxs_data_dump(port
, "CQE", (uint32_t *)cqe
, 4, 0);
5621 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5626 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t
*hba
, CQ_DESC_t
*cq
, CQE_CmplWQ_t
*cqe
)
5628 emlxs_port_t
*port
= &PPORT
;
5632 uint16_t request_tag
;
5636 emlxs_buf_t
*cmd_sbp
;
5637 #endif /* FCT_IO_TRACE */
5638 #endif /* SFCT_SUPPORT */
5640 request_tag
= cqe
->RequestTag
;
5642 /* 1 to 1 mapping between CQ and channel */
5645 mutex_enter(&EMLXS_FCTAB_LOCK
);
5646 sbp
= hba
->fc_table
[request_tag
];
5650 mutex_exit(&EMLXS_FCTAB_LOCK
);
5651 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5652 "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
5657 if (sbp
== STALE_PACKET
) {
5658 cp
->hbaCmplCmd_sbp
++;
5659 mutex_exit(&EMLXS_FCTAB_LOCK
);
5660 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5661 "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag
);
5665 if (sbp
->pkt_flags
& PACKET_IN_CHIPQ
) {
5666 atomic_add_32(&hba
->io_active
, -1);
5667 #ifdef NODE_THROTTLE_SUPPORT
5669 atomic_add_32(&sbp
->node
->io_active
, -1);
5671 #endif /* NODE_THROTTLE_SUPPORT */
5676 mutex_exit(&EMLXS_FCTAB_LOCK
);
5677 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5678 "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
5683 #ifdef DEBUG_FASTPATH
5684 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5685 "CQ ENTRY: process wqe compl");
5686 #endif /* DEBUG_FASTPATH */
5687 cp
->hbaCmplCmd_sbp
++;
5689 /* Copy entry to sbp's iocbq */
5690 iocbq
= &sbp
->iocbq
;
5691 emlxs_CQE_to_IOCB(hba
, cqe
, sbp
);
5696 /* Mark exchange as ABORT in progress */
5697 sbp
->xrip
->flag
&= ~EMLXS_XRI_PENDING_IO
;
5698 sbp
->xrip
->flag
|= EMLXS_XRI_BUSY
;
5700 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5701 "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag
,
5704 emlxs_sli4_free_xri(port
, sbp
, 0, 0);
5706 /* Exchange is no longer busy on-chip, free it */
5707 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 0);
5710 mutex_exit(&EMLXS_FCTAB_LOCK
);
5714 fct_cmd
= sbp
->fct_cmd
;
5716 cmd_sbp
= (emlxs_buf_t
*)fct_cmd
->cmd_fca_private
;
5717 mutex_enter(&cmd_sbp
->fct_mtx
);
5718 EMLXS_FCT_STATE_CHG(fct_cmd
, cmd_sbp
, EMLXS_FCT_IOCB_COMPLETE
);
5719 mutex_exit(&cmd_sbp
->fct_mtx
);
5721 #endif /* FCT_IO_TRACE */
5722 #endif /* SFCT_SUPPORT */
5725 * If this is NOT a polled command completion
5726 * or a driver allocated pkt, then defer pkt
5729 if (!(sbp
->pkt_flags
&
5730 (PACKET_POLLED
| PACKET_ALLOCATED
))) {
5731 /* Add the IOCB to the channel list */
5732 mutex_enter(&cp
->rsp_lock
);
5733 if (cp
->rsp_head
== NULL
) {
5734 cp
->rsp_head
= iocbq
;
5735 cp
->rsp_tail
= iocbq
;
5737 cp
->rsp_tail
->next
= iocbq
;
5738 cp
->rsp_tail
= iocbq
;
5740 mutex_exit(&cp
->rsp_lock
);
5742 /* Delay triggering thread till end of ISR */
5743 cp
->chan_flag
|= EMLXS_NEEDS_TRIGGER
;
5745 emlxs_proc_channel_event(hba
, cp
, iocbq
);
5748 } /* emlxs_sli4_process_wqe_cmpl() */
5753 emlxs_sli4_process_release_wqe(emlxs_hba_t
*hba
, CQ_DESC_t
*cq
,
5756 emlxs_port_t
*port
= &PPORT
;
5762 wqi
= emlxs_sli4_wqid_to_index(hba
, (uint16_t)cqe
->WQid
);
5764 /* Verify WQ index */
5765 if (wqi
== 0xffff) {
5766 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
5767 "CQ ENTRY: Invalid WQid:%d. Dropping...",
5772 wq
= &hba
->sli
.sli4
.wq
[wqi
];
5774 #ifdef DEBUG_FASTPATH
5775 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5776 "CQ ENTRY: process release wqe: old %d new %d", wq
->port_index
,
5778 #endif /* DEBUG_FASTPATH */
5780 wq
->port_index
= cqe
->WQindex
;
5782 /* Cmd ring may be available. Try sending more iocbs */
5783 for (i
= 0; i
< hba
->chan_count
; i
++) {
5785 if (wq
== (WQ_DESC_t
*)cp
->iopath
) {
5786 emlxs_sli4_issue_iocb_cmd(hba
, cp
, 0);
5790 } /* emlxs_sli4_process_release_wqe() */
5795 emlxs_sli4_rxq_get(emlxs_hba_t
*hba
, fc_frame_hdr_t
*fchdr
)
5798 emlxs_iocbq_t
*iocbq
;
5799 emlxs_iocbq_t
*prev
;
5800 fc_frame_hdr_t
*fchdr2
;
5803 switch (fchdr
->type
) {
5805 rxq
= &hba
->sli
.sli4
.rxq
[EMLXS_RXQ_ELS
];
5808 rxq
= &hba
->sli
.sli4
.rxq
[EMLXS_RXQ_CT
];
5814 mutex_enter(&rxq
->lock
);
5817 iocbq
= (emlxs_iocbq_t
*)q
->q_first
;
5822 fchdr2
= (fc_frame_hdr_t
*)iocbq
->iocb
.un
.ulpWord
;
5824 if ((fchdr2
->s_id
== fchdr
->s_id
) &&
5825 (fchdr2
->ox_id
== fchdr
->ox_id
) &&
5826 (fchdr2
->seq_id
== fchdr
->seq_id
)) {
5829 prev
->next
= iocbq
->next
;
5831 if (q
->q_first
== (uint8_t *)iocbq
) {
5832 q
->q_first
= (uint8_t *)iocbq
->next
;
5834 if (q
->q_last
== (uint8_t *)iocbq
) {
5835 q
->q_last
= (uint8_t *)prev
;
5843 iocbq
= iocbq
->next
;
5846 mutex_exit(&rxq
->lock
);
5850 } /* emlxs_sli4_rxq_get() */
5855 emlxs_sli4_rxq_put(emlxs_hba_t
*hba
, emlxs_iocbq_t
*iocbq
)
5858 fc_frame_hdr_t
*fchdr
;
5861 fchdr
= (fc_frame_hdr_t
*)iocbq
->iocb
.RXFCHDR
;
5863 switch (fchdr
->type
) {
5865 rxq
= &hba
->sli
.sli4
.rxq
[EMLXS_RXQ_ELS
];
5868 rxq
= &hba
->sli
.sli4
.rxq
[EMLXS_RXQ_CT
];
5874 mutex_enter(&rxq
->lock
);
5879 ((emlxs_iocbq_t
*)q
->q_last
)->next
= iocbq
;
5882 q
->q_first
= (uint8_t *)iocbq
;
5886 q
->q_last
= (uint8_t *)iocbq
;
5889 mutex_exit(&rxq
->lock
);
5893 } /* emlxs_sli4_rxq_put() */
5897 emlxs_sli4_rq_post(emlxs_port_t
*port
, uint16_t rqid
)
5899 emlxs_hba_t
*hba
= HBA
;
5902 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5903 "RQ POST: rqid=%d count=1", rqid
);
5905 /* Ring the RQ doorbell once to repost the RQ buffer */
5908 rqdb
.db
.NumPosted
= 1;
5910 emlxs_sli4_write_rqdb(hba
, rqdb
.word
);
5912 } /* emlxs_sli4_rq_post() */
5917 emlxs_sli4_process_unsol_rcv(emlxs_hba_t
*hba
, CQ_DESC_t
*cq
,
5918 CQE_UnsolRcv_t
*cqe
)
5920 emlxs_port_t
*port
= &PPORT
;
5921 emlxs_port_t
*vport
;
5928 fc_frame_hdr_t fchdr
;
5930 uint32_t host_index
;
5931 emlxs_iocbq_t
*iocbq
= NULL
;
5933 emlxs_node_t
*node
= NULL
;
5942 RPIobj_t
*rpip
= NULL
;
5944 uint32_t posted
= 0;
5954 if (cqe
->Code
== CQE_TYPE_UNSOL_RCV_V1
) {
5955 CQE_UnsolRcvV1_t
*cqeV1
= (CQE_UnsolRcvV1_t
*)cqe
;
5957 status
= cqeV1
->Status
;
5958 data_size
= cqeV1
->data_size
;
5960 hdr_size
= cqeV1
->hdr_size
;
5962 status
= cqe
->Status
;
5963 data_size
= cqe
->data_size
;
5965 hdr_size
= cqe
->hdr_size
;
5968 /* Validate the CQE */
5972 case RQ_STATUS_SUCCESS
: /* 0x10 */
5975 case RQ_STATUS_BUFLEN_EXCEEDED
: /* 0x11 */
5976 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
5977 "CQ ENTRY: Unsol Rcv: Payload truncated.");
5980 case RQ_STATUS_NEED_BUFFER
: /* 0x12 */
5981 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5982 "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
5985 case RQ_STATUS_FRAME_DISCARDED
: /* 0x13 */
5986 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
5987 "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
5991 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
5992 "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
5997 /* Make sure there is a frame header */
5998 if (hdr_size
< sizeof (fc_frame_hdr_t
)) {
5999 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6000 "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6004 hdr_rqi
= emlxs_sli4_rqid_to_index(hba
, rqid
);
6006 /* Verify RQ index */
6007 if (hdr_rqi
== 0xffff) {
6008 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6009 "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6014 hdr_rq
= &hba
->sli
.sli4
.rq
[hdr_rqi
];
6015 data_rq
= &hba
->sli
.sli4
.rq
[hdr_rqi
+ 1];
6017 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6018 "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6019 "hdr_size=%d data_size=%d",
6020 cqe
->Code
, rqid
, hdr_rqi
, hdr_rq
->host_index
, status
, hdr_size
,
6025 /* Update host index */
6026 mutex_enter(&hba
->sli
.sli4
.rq
[hdr_rqi
].lock
);
6027 host_index
= hdr_rq
->host_index
;
6028 hdr_rq
->host_index
++;
6030 if (hdr_rq
->host_index
>= hdr_rq
->max_index
) {
6031 hdr_rq
->host_index
= 0;
6033 data_rq
->host_index
= hdr_rq
->host_index
;
6034 mutex_exit(&hba
->sli
.sli4
.rq
[hdr_rqi
].lock
);
6036 /* Get the next header rqb */
6037 hdr_mp
= &hdr_rq
->rqb
[host_index
];
6039 offset
= (off_t
)((uint64_t)((unsigned long)hdr_mp
->virt
) -
6040 (uint64_t)((unsigned long)hba
->sli
.sli4
.slim2
.virt
));
6042 EMLXS_MPDATA_SYNC(hdr_mp
->dma_handle
, offset
,
6043 sizeof (fc_frame_hdr_t
), DDI_DMA_SYNC_FORKERNEL
);
6045 LE_SWAP32_BCOPY(hdr_mp
->virt
, (uint8_t *)&fchdr
,
6046 sizeof (fc_frame_hdr_t
));
6048 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6049 "RQ HDR[%d]: rctl:%x type:%x "
6050 "sid:%x did:%x oxid:%x rxid:%x",
6051 host_index
, fchdr
.r_ctl
, fchdr
.type
,
6052 fchdr
.s_id
, fchdr
.d_id
, fchdr
.ox_id
, fchdr
.rx_id
);
6054 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6055 "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6056 host_index
, fchdr
.f_ctl
, fchdr
.seq_id
, fchdr
.seq_cnt
,
6057 fchdr
.df_ctl
, fchdr
.ro
);
6059 /* Verify fc header type */
6060 switch (fchdr
.type
) {
6062 if (fchdr
.r_ctl
!= 0x81) {
6063 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6064 "RQ ENTRY: Unexpected FC rctl (0x%x) "
6065 "received. Dropping...",
6071 /* Make sure there is no payload */
6072 if (data_size
!= 0) {
6073 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6074 "RQ ENTRY: ABTS payload provided. Dropping...");
6079 buf_type
= 0xFFFFFFFF;
6080 (void) strlcpy(label
, "ABTS", sizeof (label
));
6081 cp
= &hba
->chan
[hba
->channel_els
];
6084 case 0x01: /* ELS */
6085 /* Make sure there is a payload */
6086 if (data_size
== 0) {
6087 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6088 "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6094 buf_type
= MEM_ELSBUF
;
6095 (void) strlcpy(label
, "Unsol ELS", sizeof (label
));
6096 cp
= &hba
->chan
[hba
->channel_els
];
6100 /* Make sure there is a payload */
6101 if (data_size
== 0) {
6102 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6103 "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6109 buf_type
= MEM_CTBUF
;
6110 (void) strlcpy(label
, "Unsol CT", sizeof (label
));
6111 cp
= &hba
->chan
[hba
->channel_ct
];
6114 case 0x08: /* FCT */
6115 /* Make sure there is a payload */
6116 if (data_size
== 0) {
6117 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6118 "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6124 buf_type
= MEM_FCTBUF
;
6125 (void) strlcpy(label
, "Unsol FCT", sizeof (label
));
6126 cp
= &hba
->chan
[hba
->CHANNEL_FCT
];
6130 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6131 "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6136 /* Fc Header is valid */
6138 /* Check if this is an active sequence */
6139 iocbq
= emlxs_sli4_rxq_get(hba
, &fchdr
);
6142 if (fchdr
.type
!= 0) {
6143 if (!(fchdr
.f_ctl
& F_CTL_FIRST_SEQ
)) {
6144 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6145 "RQ ENTRY: %s: First of sequence not"
6146 " set. Dropping...",
6153 if ((fchdr
.type
!= 0) && (fchdr
.seq_cnt
!= 0)) {
6154 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6155 "RQ ENTRY: %s: Sequence count not zero (%d). "
6157 label
, fchdr
.seq_cnt
);
6163 for (i
= 0; i
< MAX_VPORTS
; i
++) {
6166 if (vport
->did
== fchdr
.d_id
) {
6172 if (i
== MAX_VPORTS
) {
6173 /* Allow unsol FLOGI & PLOGI for P2P */
6174 if ((fchdr
.type
!= 1 /* ELS*/) ||
6175 ((fchdr
.d_id
!= FABRIC_DID
) &&
6176 !(hba
->flag
& FC_PT_TO_PT
))) {
6177 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6178 "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6185 /* Allocate an IOCBQ */
6186 iocbq
= (emlxs_iocbq_t
*)emlxs_mem_get(hba
, MEM_IOCB
);
6189 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6190 "RQ ENTRY: %s: Out of IOCB "
6191 "resources. Dropping...",
6198 if (fchdr
.type
!= 0) {
6199 /* Allocate a buffer */
6200 seq_mp
= (MATCHMAP
*)emlxs_mem_get(hba
, buf_type
);
6203 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6204 "RQ ENTRY: %s: Out of buffer "
6205 "resources. Dropping...",
6211 iocbq
->bp
= (uint8_t *)seq_mp
;
6214 node
= (void *)emlxs_node_find_did(port
, fchdr
.s_id
, 1);
6216 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6217 "RQ ENTRY: %s: Node not found. sid=%x",
6221 /* Initialize the iocbq */
6223 iocbq
->channel
= cp
;
6226 iocb
= &iocbq
->iocb
;
6235 iocb
= &iocbq
->iocb
;
6237 node
= (emlxs_node_t
*)iocbq
->node
;
6239 seq_mp
= (MATCHMAP
*)iocbq
->bp
;
6240 seq_len
= iocb
->RXSEQLEN
;
6241 seq_cnt
= iocb
->RXSEQCNT
;
6243 /* Check sequence order */
6244 if (fchdr
.seq_cnt
!= seq_cnt
) {
6245 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6246 "RQ ENTRY: %s: Out of order frame received "
6247 "(%d != %d). Dropping...",
6248 label
, fchdr
.seq_cnt
, seq_cnt
);
6254 /* We now have an iocbq */
6256 if (!port
->vpip
->vfip
) {
6257 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6258 "RQ ENTRY: %s: No fabric connection. "
6265 /* Save the frame data to our seq buffer */
6266 if (data_size
&& seq_mp
) {
6267 /* Get the next data rqb */
6268 data_mp
= &data_rq
->rqb
[host_index
];
6270 offset
= (off_t
)((uint64_t)((unsigned long)
6272 (uint64_t)((unsigned long)
6273 hba
->sli
.sli4
.slim2
.virt
));
6275 EMLXS_MPDATA_SYNC(data_mp
->dma_handle
, offset
,
6276 data_size
, DDI_DMA_SYNC_FORKERNEL
);
6278 data
= (uint32_t *)data_mp
->virt
;
6280 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6281 "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6282 host_index
, data
[0], data
[1], data
[2], data
[3],
6285 /* Check sequence length */
6286 if ((seq_len
+ data_size
) > seq_mp
->size
) {
6287 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6288 "RQ ENTRY: %s: Sequence buffer overflow. "
6289 "(%d > %d). Dropping...",
6290 label
, (seq_len
+ data_size
), seq_mp
->size
);
6295 /* Copy data to local receive buffer */
6296 bcopy((uint8_t *)data
, ((uint8_t *)seq_mp
->virt
+
6297 seq_len
), data_size
);
6299 seq_len
+= data_size
;
6302 /* If this is not the last frame of sequence, queue it. */
6303 if (!(fchdr
.f_ctl
& F_CTL_END_SEQ
)) {
6304 /* Save sequence header */
6306 bcopy((uint8_t *)&fchdr
, (uint8_t *)iocb
->RXFCHDR
,
6307 sizeof (fc_frame_hdr_t
));
6310 /* Update sequence info in iocb */
6311 iocb
->RXSEQCNT
= seq_cnt
+ 1;
6312 iocb
->RXSEQLEN
= seq_len
;
6314 /* Queue iocbq for next frame */
6315 emlxs_sli4_rxq_put(hba
, iocbq
);
6317 /* Don't free resources */
6320 /* No need to abort */
6326 emlxs_sli4_rq_post(port
, hdr_rq
->qid
);
6329 /* End of sequence found. Process request now. */
6332 /* Retrieve first frame of sequence */
6333 bcopy((uint8_t *)iocb
->RXFCHDR
, (uint8_t *)&fchdr
,
6334 sizeof (fc_frame_hdr_t
));
6336 bzero((uint8_t *)iocb
, sizeof (emlxs_iocb_t
));
6339 /* Build rcv iocb and process it */
6340 switch (fchdr
.type
) {
6343 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6344 "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6345 label
, fchdr
.ox_id
, fchdr
.rx_id
, fchdr
.s_id
);
6347 /* Try to send abort response */
6348 if (!(pkt
= emlxs_pkt_alloc(port
, 0, 0, 0, KM_NOSLEEP
))) {
6349 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6350 "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6355 /* Setup sbp / iocb for driver initiated cmd */
6356 sbp
= PKT2PRIV(pkt
);
6358 /* Free the temporary iocbq */
6359 emlxs_mem_put(hba
, MEM_IOCB
, (void *)iocbq
);
6361 iocbq
= (emlxs_iocbq_t
*)&sbp
->iocbq
;
6363 iocbq
->channel
= cp
;
6366 sbp
->pkt_flags
&= ~PACKET_ULP_OWNED
;
6370 sbp
->did
= node
->nlp_DID
;
6373 iocbq
->flag
|= (IOCB_PRIORITY
| IOCB_SPECIAL
);
6375 /* BLS ACC Response */
6377 bzero((void *)wqe
, sizeof (emlxs_wqe_t
));
6379 iocbq
->iocb
.ULPCOMMAND
= CMD_XMIT_BLS_RSP64_CX
;
6380 wqe
->Command
= CMD_XMIT_BLS_RSP64_CX
;
6381 wqe
->CmdType
= WQE_TYPE_GEN
;
6383 wqe
->un
.BlsRsp
.Payload0
= 0x80;
6384 wqe
->un
.BlsRsp
.Payload1
= fchdr
.seq_id
;
6386 wqe
->un
.BlsRsp
.OXId
= fchdr
.ox_id
;
6387 wqe
->un
.BlsRsp
.RXId
= fchdr
.rx_id
;
6389 wqe
->un
.BlsRsp
.SeqCntLow
= 0;
6390 wqe
->un
.BlsRsp
.SeqCntHigh
= 0xFFFF;
6392 wqe
->un
.BlsRsp
.XO
= ((fchdr
.f_ctl
& F_CTL_XCHG_CONTEXT
)? 1:0);
6393 wqe
->un
.BlsRsp
.AR
= 0;
6395 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
6398 wqe
->ContextType
= WQE_RPI_CONTEXT
;
6399 wqe
->ContextTag
= rpip
->RPI
;
6401 wqe
->ContextType
= WQE_VPI_CONTEXT
;
6402 wqe
->ContextTag
= port
->vpip
->VPI
;
6404 rpip
= emlxs_rpi_reserve_notify(port
, fchdr
.s_id
, 0);
6407 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6408 "RQ ENTRY: %s: Unable to alloc "
6409 "reserved RPI. Dropping...",
6415 /* Store the reserved rpi */
6416 wqe
->CmdSpecific
= rpip
->RPI
;
6418 wqe
->un
.BlsRsp
.RemoteId
= fchdr
.s_id
;
6419 wqe
->un
.BlsRsp
.LocalId
= fchdr
.d_id
;
6422 if (fchdr
.f_ctl
& F_CTL_CHAINED_SEQ
) {
6424 wqe
->CCP
= fchdr
.rsvd
;
6427 /* Allocate an exchange for this command */
6428 xrip
= emlxs_sli4_alloc_xri(port
, sbp
, rpip
,
6429 EMLXS_XRI_SOL_BLS_TYPE
);
6432 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6433 "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6438 wqe
->XRITag
= xrip
->XRI
;
6439 wqe
->Class
= CLASS3
;
6440 wqe
->RequestTag
= xrip
->iotag
;
6441 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
6443 sbp
->ticks
= hba
->timer_tics
+ 30;
6445 emlxs_sli4_issue_iocb_cmd(hba
, iocbq
->channel
, iocbq
);
6447 /* The temporary iocbq has been freed already */
6453 cmd
= *((uint32_t *)seq_mp
->virt
);
6454 cmd
&= ELS_CMD_MASK
;
6456 if (!(port
->vpip
->flag
& EMLXS_VPI_PORT_ENABLED
)) {
6457 uint32_t dropit
= 1;
6459 /* Allow for P2P handshaking */
6467 if (hba
->flag
& FC_PT_TO_PT
) {
6474 EMLXS_MSGF(EMLXS_CONTEXT
,
6475 &emlxs_sli_detail_msg
,
6476 "RQ ENTRY: %s: Port not yet enabled. "
6485 if (cmd
!= ELS_CMD_LOGO
) {
6486 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
6490 /* Use the fabric rpi */
6491 rpip
= port
->vpip
->fabric_rpip
;
6494 xrip
= emlxs_sli4_reserve_xri(port
, rpip
,
6495 EMLXS_XRI_UNSOL_ELS_TYPE
, fchdr
.ox_id
);
6498 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6499 "RQ ENTRY: %s: Out of exchange "
6500 "resources. Dropping...",
6506 /* Build CMD_RCV_ELS64_CX */
6507 iocb
->un
.rcvels64
.elsReq
.tus
.f
.bdeFlags
= 0;
6508 iocb
->un
.rcvels64
.elsReq
.tus
.f
.bdeSize
= seq_len
;
6509 iocb
->un
.rcvels64
.elsReq
.addrLow
= PADDR_LO(seq_mp
->phys
);
6510 iocb
->un
.rcvels64
.elsReq
.addrHigh
= PADDR_HI(seq_mp
->phys
);
6511 iocb
->ULPBDECOUNT
= 1;
6513 iocb
->un
.rcvels64
.remoteID
= fchdr
.s_id
;
6514 iocb
->un
.rcvels64
.parmRo
= fchdr
.d_id
;
6517 iocb
->ULPCONTEXT
= xrip
->XRI
;
6518 iocb
->ULPIOTAG
= ((node
)? node
->nlp_Rpi
:0);
6519 iocb
->ULPCLASS
= CLASS3
;
6520 iocb
->ULPCOMMAND
= CMD_RCV_ELS64_CX
;
6522 iocb
->unsli3
.ext_rcv
.seq_len
= seq_len
;
6523 iocb
->unsli3
.ext_rcv
.vpi
= port
->vpip
->VPI
;
6524 iocb
->unsli3
.ext_rcv
.oxid
= fchdr
.ox_id
;
6526 if (fchdr
.f_ctl
& F_CTL_CHAINED_SEQ
) {
6527 iocb
->unsli3
.ext_rcv
.ccpe
= 1;
6528 iocb
->unsli3
.ext_rcv
.ccp
= fchdr
.rsvd
;
6531 if (port
->mode
== MODE_INITIATOR
) {
6532 (void) emlxs_els_handle_unsol_req(port
, iocbq
->channel
,
6533 iocbq
, seq_mp
, seq_len
);
6536 else if (port
->mode
== MODE_TARGET
) {
6537 (void) emlxs_fct_handle_unsol_els(port
, iocbq
->channel
,
6538 iocbq
, seq_mp
, seq_len
);
6540 #endif /* SFCT_SUPPORT */
6545 if (!(port
->VPIobj
.flag
& EMLXS_VPI_PORT_ENABLED
)) {
6546 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6547 "RQ ENTRY: %s: Port not yet enabled. "
6554 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
6557 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6558 "RQ ENTRY: %s: Port not logged in. "
6565 xrip
= emlxs_sli4_reserve_xri(port
, rpip
,
6566 EMLXS_XRI_UNSOL_FCP_TYPE
, fchdr
.ox_id
);
6569 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6570 "RQ ENTRY: %s: Out of exchange "
6571 "resources. Dropping...",
6577 /* Build CMD_RCV_SEQUENCE64_CX */
6578 iocb
->un
.rcvseq64
.rcvBde
.tus
.f
.bdeFlags
= 0;
6579 iocb
->un
.rcvseq64
.rcvBde
.tus
.f
.bdeSize
= seq_len
;
6580 iocb
->un
.rcvseq64
.rcvBde
.addrLow
= PADDR_LO(seq_mp
->phys
);
6581 iocb
->un
.rcvseq64
.rcvBde
.addrHigh
= PADDR_HI(seq_mp
->phys
);
6582 iocb
->ULPBDECOUNT
= 1;
6585 iocb
->ULPCONTEXT
= xrip
->XRI
;
6586 iocb
->ULPIOTAG
= ((node
)? node
->nlp_Rpi
:0);
6587 iocb
->ULPCLASS
= CLASS3
;
6588 iocb
->ULPCOMMAND
= CMD_RCV_ELS64_CX
;
6590 iocb
->unsli3
.ext_rcv
.seq_len
= seq_len
;
6591 iocb
->unsli3
.ext_rcv
.vpi
= port
->VPIobj
.VPI
;
6592 iocb
->unsli3
.ext_rcv
.oxid
= fchdr
.ox_id
;
6594 if (fchdr
.f_ctl
& F_CTL_CHAINED_SEQ
) {
6595 iocb
->unsli3
.ext_rcv
.ccpe
= 1;
6596 iocb
->unsli3
.ext_rcv
.ccp
= fchdr
.rsvd
;
6599 /* pass xrip to FCT in the iocbq */
6602 #define EMLXS_FIX_CISCO_BUG1
6603 #ifdef EMLXS_FIX_CISCO_BUG1
6606 ptr
= ((uint8_t *)seq_mp
->virt
);
6607 if (((*ptr
+12) != 0xa0) && (*(ptr
+20) == 0x8) && (*(ptr
+21) == 0x8)) {
6608 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6609 "RQ ENTRY: Bad CDB fixed");
6615 (void) emlxs_fct_handle_unsol_req(port
, cp
, iocbq
,
6618 #endif /* SFCT_SUPPORT */
6621 if (!(port
->vpip
->flag
& EMLXS_VPI_PORT_ENABLED
) &&
6622 !(hba
->flag
& FC_LOOPBACK_MODE
)) {
6623 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6624 "RQ ENTRY: %s: Port not yet enabled. "
6632 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6633 "RQ ENTRY: %s: Node not found (did=%x). "
6640 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
6643 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6644 "RQ ENTRY: %s: RPI not found (did=%x rpi=%d). "
6646 label
, fchdr
.d_id
, node
->nlp_Rpi
);
6651 xrip
= emlxs_sli4_reserve_xri(port
, rpip
,
6652 EMLXS_XRI_UNSOL_CT_TYPE
, fchdr
.ox_id
);
6655 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6656 "RQ ENTRY: %s: Out of exchange "
6657 "resources. Dropping...",
6663 /* Build CMD_RCV_SEQ64_CX */
6664 iocb
->un
.rcvseq64
.rcvBde
.tus
.f
.bdeFlags
= 0;
6665 iocb
->un
.rcvseq64
.rcvBde
.tus
.f
.bdeSize
= seq_len
;
6666 iocb
->un
.rcvseq64
.rcvBde
.addrLow
= PADDR_LO(seq_mp
->phys
);
6667 iocb
->un
.rcvseq64
.rcvBde
.addrHigh
= PADDR_HI(seq_mp
->phys
);
6668 iocb
->ULPBDECOUNT
= 1;
6670 iocb
->un
.rcvseq64
.xrsqRo
= 0;
6671 iocb
->un
.rcvseq64
.w5
.hcsw
.Rctl
= fchdr
.r_ctl
;
6672 iocb
->un
.rcvseq64
.w5
.hcsw
.Type
= fchdr
.type
;
6673 iocb
->un
.rcvseq64
.w5
.hcsw
.Dfctl
= fchdr
.df_ctl
;
6674 iocb
->un
.rcvseq64
.w5
.hcsw
.Fctl
= fchdr
.f_ctl
;
6677 iocb
->ULPCONTEXT
= xrip
->XRI
;
6678 iocb
->ULPIOTAG
= rpip
->RPI
;
6679 iocb
->ULPCLASS
= CLASS3
;
6680 iocb
->ULPCOMMAND
= CMD_RCV_SEQ64_CX
;
6682 iocb
->unsli3
.ext_rcv
.seq_len
= seq_len
;
6683 iocb
->unsli3
.ext_rcv
.vpi
= port
->vpip
->VPI
;
6685 if (fchdr
.f_ctl
& F_CTL_CHAINED_SEQ
) {
6686 iocb
->unsli3
.ext_rcv
.ccpe
= 1;
6687 iocb
->unsli3
.ext_rcv
.ccp
= fchdr
.rsvd
;
6690 (void) emlxs_ct_handle_unsol_req(port
, iocbq
->channel
,
6691 iocbq
, seq_mp
, seq_len
);
6696 /* Sequence handled, no need to abort */
6702 emlxs_sli4_rq_post(port
, hdr_rq
->qid
);
6706 /* Send ABTS for this exchange */
6707 /* !!! Currently, we have no implementation for this !!! */
6711 /* Return memory resources to pools */
6714 emlxs_mem_put(hba
, buf_type
, (void *)iocbq
->bp
);
6718 emlxs_mem_put(hba
, MEM_IOCB
, (void *)iocbq
);
6722 if (emlxs_fm_check_dma_handle(hba
,
6723 hba
->sli
.sli4
.slim2
.dma_handle
)
6725 EMLXS_MSGF(EMLXS_CONTEXT
,
6726 &emlxs_invalid_dma_handle_msg
,
6727 "sli4_process_unsol_rcv: hdl=%p",
6728 hba
->sli
.sli4
.slim2
.dma_handle
);
6730 emlxs_thread_spawn(hba
, emlxs_restart_thread
,
6736 } /* emlxs_sli4_process_unsol_rcv() */
6741 emlxs_sli4_process_xri_aborted(emlxs_hba_t
*hba
, CQ_DESC_t
*cq
,
6742 CQE_XRI_Abort_t
*cqe
)
6744 emlxs_port_t
*port
= &PPORT
;
6747 mutex_enter(&EMLXS_FCTAB_LOCK
);
6749 xrip
= emlxs_sli4_find_xri(port
, cqe
->XRI
);
6751 /* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
6752 /* "CQ ENTRY: process xri aborted ignored"); */
6754 mutex_exit(&EMLXS_FCTAB_LOCK
);
6758 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6759 "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
6760 cqe
->XRI
, cqe
->IA
, cqe
->EO
, cqe
->BR
);
6762 if (!(xrip
->flag
& EMLXS_XRI_BUSY
)) {
6763 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6764 "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
6765 xrip
->XRI
, xrip
->flag
);
6767 mutex_exit(&EMLXS_FCTAB_LOCK
);
6771 /* Exchange is no longer busy on-chip, free it */
6772 emlxs_sli4_free_xri(port
, 0, xrip
, 0);
6774 mutex_exit(&EMLXS_FCTAB_LOCK
);
6778 } /* emlxs_sli4_process_xri_aborted () */
6783 emlxs_sli4_process_cq(emlxs_hba_t
*hba
, CQ_DESC_t
*cq
)
6785 emlxs_port_t
*port
= &PPORT
;
6789 int num_entries
= 0;
6792 /* EMLXS_PORT_LOCK must be held when entering this routine */
6794 cqe
= (CQE_u
*)cq
->addr
.virt
;
6795 cqe
+= cq
->host_index
;
6797 offset
= (off_t
)((uint64_t)((unsigned long)
6799 (uint64_t)((unsigned long)
6800 hba
->sli
.sli4
.slim2
.virt
));
6802 EMLXS_MPDATA_SYNC(cq
->addr
.dma_handle
, offset
,
6803 4096, DDI_DMA_SYNC_FORKERNEL
);
6806 cq_entry
.word
[3] = BE_SWAP32(cqe
->word
[3]);
6807 if (!(cq_entry
.word
[3] & CQE_VALID
)) {
6811 cq_entry
.word
[2] = BE_SWAP32(cqe
->word
[2]);
6812 cq_entry
.word
[1] = BE_SWAP32(cqe
->word
[1]);
6813 cq_entry
.word
[0] = BE_SWAP32(cqe
->word
[0]);
6816 emlxs_data_dump(port
, "CQE", (uint32_t *)cqe
, 6, 0);
6817 #endif /* DEBUG_CQE */
6822 if (cq
->host_index
>= cq
->max_index
) {
6824 cqe
= (CQE_u
*)cq
->addr
.virt
;
6828 mutex_exit(&EMLXS_PORT_LOCK
);
6830 /* Now handle specific cq type */
6831 if (cq
->type
== EMLXS_CQ_TYPE_GROUP1
) {
6832 if (cq_entry
.cqAsyncEntry
.async_evt
) {
6833 emlxs_sli4_process_async_event(hba
,
6834 (CQE_ASYNC_t
*)&cq_entry
);
6836 emlxs_sli4_process_mbox_event(hba
,
6837 (CQE_MBOX_t
*)&cq_entry
);
6839 } else { /* EMLXS_CQ_TYPE_GROUP2 */
6840 switch (cq_entry
.cqCmplEntry
.Code
) {
6841 case CQE_TYPE_WQ_COMPLETION
:
6842 if (cq_entry
.cqCmplEntry
.RequestTag
<
6844 emlxs_sli4_process_wqe_cmpl(hba
, cq
,
6845 (CQE_CmplWQ_t
*)&cq_entry
);
6847 emlxs_sli4_process_oor_wqe_cmpl(hba
, cq
,
6848 (CQE_CmplWQ_t
*)&cq_entry
);
6851 case CQE_TYPE_RELEASE_WQE
:
6852 emlxs_sli4_process_release_wqe(hba
, cq
,
6853 (CQE_RelWQ_t
*)&cq_entry
);
6855 case CQE_TYPE_UNSOL_RCV
:
6856 case CQE_TYPE_UNSOL_RCV_V1
:
6857 emlxs_sli4_process_unsol_rcv(hba
, cq
,
6858 (CQE_UnsolRcv_t
*)&cq_entry
);
6860 case CQE_TYPE_XRI_ABORTED
:
6861 emlxs_sli4_process_xri_aborted(hba
, cq
,
6862 (CQE_XRI_Abort_t
*)&cq_entry
);
6865 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6866 "Invalid CQ entry %d: %08x %08x %08x %08x",
6867 cq_entry
.cqCmplEntry
.Code
, cq_entry
.word
[0],
6868 cq_entry
.word
[1], cq_entry
.word
[2],
6874 mutex_enter(&EMLXS_PORT_LOCK
);
6877 /* Number of times this routine gets called for this CQ */
6880 /* num_entries is the number of CQEs we process in this specific CQ */
6881 cq
->num_proc
+= num_entries
;
6882 if (cq
->max_proc
< num_entries
)
6883 cq
->max_proc
= num_entries
;
6886 cqdb
|= CQ_DB_REARM
;
6887 if (num_entries
!= 0) {
6888 cqdb
|= ((num_entries
<< CQ_DB_POP_SHIFT
) & CQ_DB_POP_MASK
);
6891 #ifdef DEBUG_FASTPATH
6892 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6893 "CQE: CLEAR cqdb=%08x: pops=%d", cqdb
, num_entries
);
6894 #endif /* DEBUG_FASTPATH */
6896 emlxs_sli4_write_cqdb(hba
, cqdb
);
6898 /* EMLXS_PORT_LOCK must be held when exiting this routine */
6900 } /* emlxs_sli4_process_cq() */
6905 emlxs_sli4_process_eq(emlxs_hba_t
*hba
, EQ_DESC_t
*eq
)
6907 emlxs_port_t
*port
= &PPORT
;
6914 int num_entries
= 0;
6917 /* EMLXS_PORT_LOCK must be held when entering this routine */
6919 hba
->intr_busy_cnt
++;
6921 ptr
= eq
->addr
.virt
;
6922 ptr
+= eq
->host_index
;
6924 offset
= (off_t
)((uint64_t)((unsigned long)
6926 (uint64_t)((unsigned long)
6927 hba
->sli
.sli4
.slim2
.virt
));
6929 EMLXS_MPDATA_SYNC(eq
->addr
.dma_handle
, offset
,
6930 4096, DDI_DMA_SYNC_FORKERNEL
);
6934 eqe
.word
= BE_SWAP32(eqe
.word
);
6936 if (!(eqe
.word
& EQE_VALID
)) {
6940 #ifdef DEBUG_FASTPATH
6941 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6942 "EQE00: %08x", eqe
.word
);
6943 #endif /* DEBUG_FASTPATH */
6948 if (eq
->host_index
>= eq
->max_index
) {
6950 ptr
= eq
->addr
.virt
;
6955 cqi
= emlxs_sli4_cqid_to_index(hba
, eqe
.entry
.CQId
);
6957 /* Verify CQ index */
6958 if (cqi
== 0xffff) {
6959 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
6960 "EQE: Invalid CQid: %d. Dropping...",
6965 #ifdef DEBUG_FASTPATH
6966 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6967 "EQE: CQIndex:%x cqid:%x", cqi
, eqe
.entry
.CQId
);
6968 #endif /* DEBUG_FASTPATH */
6970 emlxs_sli4_process_cq(hba
, &hba
->sli
.sli4
.cq
[cqi
]);
6973 /* Number of times the ISR for this EQ gets called */
6976 /* num_entries is the number of EQEs we process in this specific ISR */
6977 eq
->num_proc
+= num_entries
;
6978 if (eq
->max_proc
< num_entries
) {
6979 eq
->max_proc
= num_entries
;
6983 eqdb
|= (EQ_DB_CLEAR
| EQ_DB_EVENT
| EQ_DB_REARM
);
6985 #ifdef DEBUG_FASTPATH
6986 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
6987 "EQE: CLEAR eqdb=%08x pops=%d", eqdb
, num_entries
);
6988 #endif /* DEBUG_FASTPATH */
6990 if (num_entries
!= 0) {
6991 eqdb
|= ((num_entries
<< EQ_DB_POP_SHIFT
) & EQ_DB_POP_MASK
);
6992 for (i
= 0; i
< hba
->chan_count
; i
++) {
6994 if (cp
->chan_flag
& EMLXS_NEEDS_TRIGGER
) {
6995 cp
->chan_flag
&= ~EMLXS_NEEDS_TRIGGER
;
6996 emlxs_thread_trigger2(&cp
->intr_thread
,
6997 emlxs_proc_channel
, cp
);
7002 emlxs_sli4_write_cqdb(hba
, eqdb
);
7004 /* EMLXS_PORT_LOCK must be held when exiting this routine */
7006 hba
->intr_busy_cnt
--;
7008 } /* emlxs_sli4_process_eq() */
7014 emlxs_sli4_msi_intr(char *arg1
, char *arg2
)
7016 emlxs_hba_t
*hba
= (emlxs_hba_t
*)arg1
;
7017 #ifdef DEBUG_FASTPATH
7018 emlxs_port_t
*port
= &PPORT
;
7019 #endif /* DEBUG_FASTPATH */
7023 #ifdef DEBUG_FASTPATH
7024 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
7025 "msiINTR arg1:%p arg2:%p", arg1
, arg2
);
7026 #endif /* DEBUG_FASTPATH */
7028 /* Check for legacy interrupt handling */
7029 if (hba
->intr_type
== DDI_INTR_TYPE_FIXED
) {
7030 rc
= emlxs_sli4_intx_intr(arg1
);
7034 /* Get MSI message id */
7035 msgid
= (uint16_t)((unsigned long)arg2
);
7037 /* Validate the message id */
7038 if (msgid
>= hba
->intr_count
) {
7041 mutex_enter(&EMLXS_PORT_LOCK
);
7043 if ((hba
->state
== FC_KILLED
) || (hba
->flag
& FC_OFFLINE_MODE
)) {
7044 mutex_exit(&EMLXS_PORT_LOCK
);
7045 return (DDI_INTR_UNCLAIMED
);
7048 /* The eq[] index == the MSI vector number */
7049 emlxs_sli4_process_eq(hba
, &hba
->sli
.sli4
.eq
[msgid
]);
7051 mutex_exit(&EMLXS_PORT_LOCK
);
7052 return (DDI_INTR_CLAIMED
);
7054 } /* emlxs_sli4_msi_intr() */
7055 #endif /* MSI_SUPPORT */
7060 emlxs_sli4_intx_intr(char *arg
)
7062 emlxs_hba_t
*hba
= (emlxs_hba_t
*)arg
;
7063 #ifdef DEBUG_FASTPATH
7064 emlxs_port_t
*port
= &PPORT
;
7065 #endif /* DEBUG_FASTPATH */
7067 #ifdef DEBUG_FASTPATH
7068 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
7069 "intxINTR arg:%p", arg
);
7070 #endif /* DEBUG_FASTPATH */
7072 mutex_enter(&EMLXS_PORT_LOCK
);
7074 if ((hba
->state
== FC_KILLED
) || (hba
->flag
& FC_OFFLINE_MODE
)) {
7075 mutex_exit(&EMLXS_PORT_LOCK
);
7076 return (DDI_INTR_UNCLAIMED
);
7079 emlxs_sli4_process_eq(hba
, &hba
->sli
.sli4
.eq
[0]);
7081 mutex_exit(&EMLXS_PORT_LOCK
);
7082 return (DDI_INTR_CLAIMED
);
7083 } /* emlxs_sli4_intx_intr() */
7087 emlxs_sli4_hba_kill(emlxs_hba_t
*hba
)
7089 emlxs_port_t
*port
= &PPORT
;
7092 mutex_enter(&EMLXS_PORT_LOCK
);
7093 if (hba
->flag
& FC_INTERLOCKED
) {
7094 EMLXS_STATE_CHANGE_LOCKED(hba
, FC_KILLED
);
7096 mutex_exit(&EMLXS_PORT_LOCK
);
7102 while (j
++ < 10000) {
7103 if ((hba
->mbox_queue_flag
== 0) &&
7104 (hba
->intr_busy_cnt
== 0)) {
7108 mutex_exit(&EMLXS_PORT_LOCK
);
7110 mutex_enter(&EMLXS_PORT_LOCK
);
7113 if ((hba
->mbox_queue_flag
!= 0) || (hba
->intr_busy_cnt
> 0)) {
7114 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
7115 "Board kill failed. Adapter busy, %d, %d.",
7116 hba
->mbox_queue_flag
, hba
->intr_busy_cnt
);
7117 mutex_exit(&EMLXS_PORT_LOCK
);
7121 hba
->flag
|= FC_INTERLOCKED
;
7123 EMLXS_STATE_CHANGE_LOCKED(hba
, FC_KILLED
);
7125 mutex_exit(&EMLXS_PORT_LOCK
);
7127 } /* emlxs_sli4_hba_kill() */
7131 emlxs_sli4_hba_reset_all(emlxs_hba_t
*hba
, uint32_t flag
)
7133 emlxs_port_t
*port
= &PPORT
;
7136 mutex_enter(&EMLXS_PORT_LOCK
);
7138 if ((hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) != SLI_INTF_IF_TYPE_2
) {
7139 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_debug_msg
,
7140 "Reset All failed. Invalid Operation.");
7141 mutex_exit(&EMLXS_PORT_LOCK
);
7145 /* Issue a Firmware Reset All Request */
7147 value
= SLI_PHYDEV_FRST
| SLI_PHYDEV_FRL_ALL
| SLI_PHYDEV_DD
;
7149 value
= SLI_PHYDEV_FRST
| SLI_PHYDEV_FRL_ALL
;
7152 ddi_put32(hba
->sli
.sli4
.bar0_acc_handle
,
7153 hba
->sli
.sli4
.PHYSDEV_reg_addr
, value
);
7155 mutex_exit(&EMLXS_PORT_LOCK
);
7157 } /* emlxs_sli4_hba_reset_all() */
7161 emlxs_sli4_enable_intr(emlxs_hba_t
*hba
)
7163 emlxs_config_t
*cfg
= &CFG
;
7168 hba
->sli
.sli4
.flag
|= EMLXS_SLI4_INTR_ENABLED
;
7170 num_cq
= (hba
->intr_count
* cfg
[CFG_NUM_WQ
].current
) +
7174 for (i
= 0; i
< num_cq
; i
++) {
7175 data
= hba
->sli
.sli4
.cq
[i
].qid
;
7176 data
|= CQ_DB_REARM
;
7177 emlxs_sli4_write_cqdb(hba
, data
);
7179 for (i
= 0; i
< hba
->intr_count
; i
++) {
7180 data
= hba
->sli
.sli4
.eq
[i
].qid
;
7181 data
|= (EQ_DB_REARM
| EQ_DB_EVENT
);
7182 emlxs_sli4_write_cqdb(hba
, data
);
7184 } /* emlxs_sli4_enable_intr() */
7188 emlxs_sli4_disable_intr(emlxs_hba_t
*hba
, uint32_t att
)
7194 hba
->sli
.sli4
.flag
&= ~EMLXS_SLI4_INTR_ENABLED
;
7196 /* Short of reset, we cannot disable interrupts */
7197 } /* emlxs_sli4_disable_intr() */
7201 emlxs_sli4_resource_free(emlxs_hba_t
*hba
)
7203 emlxs_port_t
*port
= &PPORT
;
7204 MBUF_INFO
*buf_info
;
7207 buf_info
= &hba
->sli
.sli4
.slim2
;
7208 if (buf_info
->virt
== 0) {
7213 emlxs_fcf_fini(hba
);
7215 buf_info
= &hba
->sli
.sli4
.HeaderTmplate
;
7216 if (buf_info
->virt
) {
7217 bzero(buf_info
, sizeof (MBUF_INFO
));
7220 if (hba
->sli
.sli4
.XRIp
) {
7221 if ((hba
->sli
.sli4
.XRIinuse_f
!=
7222 (XRIobj_t
*)&hba
->sli
.sli4
.XRIinuse_f
) ||
7223 (hba
->sli
.sli4
.XRIinuse_b
!=
7224 (XRIobj_t
*)&hba
->sli
.sli4
.XRIinuse_f
)) {
7225 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_debug_msg
,
7226 "XRIs in use during free!: %p %p != %p\n",
7227 hba
->sli
.sli4
.XRIinuse_f
,
7228 hba
->sli
.sli4
.XRIinuse_b
,
7229 &hba
->sli
.sli4
.XRIinuse_f
);
7231 kmem_free(hba
->sli
.sli4
.XRIp
,
7232 (sizeof (XRIobj_t
) * hba
->sli
.sli4
.XRICount
));
7233 hba
->sli
.sli4
.XRIp
= NULL
;
7235 hba
->sli
.sli4
.XRIfree_f
=
7236 (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
;
7237 hba
->sli
.sli4
.XRIfree_b
=
7238 (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
;
7239 hba
->sli
.sli4
.xrif_count
= 0;
7242 for (i
= 0; i
< hba
->intr_count
; i
++) {
7243 mutex_destroy(&hba
->sli
.sli4
.eq
[i
].lastwq_lock
);
7244 bzero(&hba
->sli
.sli4
.eq
[i
], sizeof (EQ_DESC_t
));
7245 hba
->sli
.sli4
.eq
[i
].qid
= 0xffff;
7247 for (i
= 0; i
< EMLXS_MAX_CQS
; i
++) {
7248 bzero(&hba
->sli
.sli4
.cq
[i
], sizeof (CQ_DESC_t
));
7249 hba
->sli
.sli4
.cq
[i
].qid
= 0xffff;
7251 for (i
= 0; i
< EMLXS_MAX_WQS
; i
++) {
7252 bzero(&hba
->sli
.sli4
.wq
[i
], sizeof (WQ_DESC_t
));
7253 hba
->sli
.sli4
.wq
[i
].qid
= 0xffff;
7255 for (i
= 0; i
< EMLXS_MAX_RXQS
; i
++) {
7256 mutex_destroy(&hba
->sli
.sli4
.rxq
[i
].lock
);
7257 bzero(&hba
->sli
.sli4
.rxq
[i
], sizeof (RXQ_DESC_t
));
7259 for (i
= 0; i
< EMLXS_MAX_RQS
; i
++) {
7260 mutex_destroy(&hba
->sli
.sli4
.rq
[i
].lock
);
7261 bzero(&hba
->sli
.sli4
.rq
[i
], sizeof (RQ_DESC_t
));
7262 hba
->sli
.sli4
.rq
[i
].qid
= 0xffff;
7266 bzero(&hba
->sli
.sli4
.mq
, sizeof (MQ_DESC_t
));
7268 buf_info
= &hba
->sli
.sli4
.slim2
;
7269 if (buf_info
->virt
) {
7270 buf_info
->flags
= FC_MBUF_DMA
;
7271 emlxs_mem_free(hba
, buf_info
);
7272 bzero(buf_info
, sizeof (MBUF_INFO
));
7275 } /* emlxs_sli4_resource_free() */
7279 emlxs_sli4_resource_alloc(emlxs_hba_t
*hba
)
7281 emlxs_port_t
*port
= &PPORT
;
7282 emlxs_config_t
*cfg
= &CFG
;
7283 MBUF_INFO
*buf_info
;
7304 uint32_t hddr_size
= 0;
7308 buf_info
= &hba
->sli
.sli4
.slim2
;
7309 if (buf_info
->virt
) {
7310 /* Already allocated */
7314 emlxs_fcf_init(hba
);
7316 switch (hba
->sli
.sli4
.param
.CQV
) {
7318 cq_depth
= CQ_DEPTH
;
7322 cq_depth
= CQ_DEPTH_V2
;
7325 cq_size
= (cq_depth
* CQE_SIZE
);
7327 /* EQs - 1 per Interrupt vector */
7328 num_eq
= hba
->intr_count
;
7330 /* CQs - number of WQs + 1 for RQs + 1 for mbox/async events */
7331 num_wq
= cfg
[CFG_NUM_WQ
].current
* num_eq
;
7333 /* Calculate total dmable memory we need */
7334 /* WARNING: make sure each section is aligned on 4K boundary */
7337 count
+= num_eq
* 4096;
7340 count
+= (num_wq
+ EMLXS_CQ_OFFSET_WQ
) * cq_size
;
7343 count
+= num_wq
* (4096 * EMLXS_NUM_WQ_PAGES
);
7346 count
+= EMLXS_MAX_MQS
* 4096;
7349 count
+= EMLXS_MAX_RQS
* 4096;
7352 count
+= RQB_COUNT
* (RQB_DATA_SIZE
+ RQB_HEADER_SIZE
);
7353 count
+= (4096 - (count
%4096)); /* Ensure 4K alignment */
7356 count
+= hba
->sli
.sli4
.XRIExtSize
* hba
->sli
.sli4
.mem_sgl_size
;
7357 count
+= (4096 - (count
%4096)); /* Ensure 4K alignment */
7359 /* RPI Header Templates */
7360 if (hba
->sli
.sli4
.param
.HDRR
) {
7361 /* Bytes per extent */
7362 j
= hba
->sli
.sli4
.RPIExtSize
* sizeof (RPIHdrTmplate_t
);
7364 /* Pages required per extent (page == 4096 bytes) */
7365 k
= (j
/4096) + ((j
%4096)? 1:0);
7368 hddr_size
= (k
* hba
->sli
.sli4
.RPIExtCount
* 4096);
7373 /* Allocate slim2 for SLI4 */
7374 buf_info
= &hba
->sli
.sli4
.slim2
;
7375 buf_info
->size
= count
;
7376 buf_info
->flags
= FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7377 buf_info
->align
= ddi_ptob(hba
->dip
, 1L);
7379 (void) emlxs_mem_alloc(hba
, buf_info
);
7381 if (buf_info
->virt
== NULL
) {
7382 EMLXS_MSGF(EMLXS_CONTEXT
,
7383 &emlxs_init_failed_msg
,
7384 "Unable to allocate internal memory for SLI4: %d",
7388 bzero(buf_info
->virt
, buf_info
->size
);
7389 EMLXS_MPDATA_SYNC(buf_info
->dma_handle
, 0,
7390 buf_info
->size
, DDI_DMA_SYNC_FORDEV
);
7392 /* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
7393 data_handle
= buf_info
->data_handle
;
7394 dma_handle
= buf_info
->dma_handle
;
7395 phys
= buf_info
->phys
;
7396 virt
= (char *)buf_info
->virt
;
7398 /* Allocate space for queues */
7402 for (i
= 0; i
< num_eq
; i
++) {
7403 bzero(&hba
->sli
.sli4
.eq
[i
], sizeof (EQ_DESC_t
));
7405 buf_info
= &hba
->sli
.sli4
.eq
[i
].addr
;
7406 buf_info
->size
= size
;
7408 FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7409 buf_info
->align
= ddi_ptob(hba
->dip
, 1L);
7410 buf_info
->phys
= phys
;
7411 buf_info
->virt
= (void *)virt
;
7412 buf_info
->data_handle
= data_handle
;
7413 buf_info
->dma_handle
= dma_handle
;
7418 hba
->sli
.sli4
.eq
[i
].max_index
= EQ_DEPTH
;
7419 hba
->sli
.sli4
.eq
[i
].qid
= 0xffff;
7421 mutex_init(&hba
->sli
.sli4
.eq
[i
].lastwq_lock
, NULL
,
7422 MUTEX_DRIVER
, NULL
);
7427 for (i
= 0; i
< (num_wq
+ EMLXS_CQ_OFFSET_WQ
); i
++) {
7428 bzero(&hba
->sli
.sli4
.cq
[i
], sizeof (CQ_DESC_t
));
7430 buf_info
= &hba
->sli
.sli4
.cq
[i
].addr
;
7431 buf_info
->size
= cq_size
;
7433 FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7434 buf_info
->align
= ddi_ptob(hba
->dip
, 1L);
7435 buf_info
->phys
= phys
;
7436 buf_info
->virt
= (void *)virt
;
7437 buf_info
->data_handle
= data_handle
;
7438 buf_info
->dma_handle
= dma_handle
;
7443 hba
->sli
.sli4
.cq
[i
].max_index
= cq_depth
;
7444 hba
->sli
.sli4
.cq
[i
].qid
= 0xffff;
7449 size
= 4096 * EMLXS_NUM_WQ_PAGES
;
7450 for (i
= 0; i
< num_wq
; i
++) {
7451 bzero(&hba
->sli
.sli4
.wq
[i
], sizeof (WQ_DESC_t
));
7453 buf_info
= &hba
->sli
.sli4
.wq
[i
].addr
;
7454 buf_info
->size
= size
;
7456 FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7457 buf_info
->align
= ddi_ptob(hba
->dip
, 1L);
7458 buf_info
->phys
= phys
;
7459 buf_info
->virt
= (void *)virt
;
7460 buf_info
->data_handle
= data_handle
;
7461 buf_info
->dma_handle
= dma_handle
;
7466 hba
->sli
.sli4
.wq
[i
].max_index
= WQ_DEPTH
;
7467 hba
->sli
.sli4
.wq
[i
].release_depth
= WQE_RELEASE_DEPTH
;
7468 hba
->sli
.sli4
.wq
[i
].qid
= 0xFFFF;
7474 bzero(&hba
->sli
.sli4
.mq
, sizeof (MQ_DESC_t
));
7476 buf_info
= &hba
->sli
.sli4
.mq
.addr
;
7477 buf_info
->size
= size
;
7479 FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7480 buf_info
->align
= ddi_ptob(hba
->dip
, 1L);
7481 buf_info
->phys
= phys
;
7482 buf_info
->virt
= (void *)virt
;
7483 buf_info
->data_handle
= data_handle
;
7484 buf_info
->dma_handle
= dma_handle
;
7489 hba
->sli
.sli4
.mq
.max_index
= MQ_DEPTH
;
7493 for (i
= 0; i
< EMLXS_MAX_RXQS
; i
++) {
7494 bzero(&hba
->sli
.sli4
.rxq
[i
], sizeof (RXQ_DESC_t
));
7496 mutex_init(&hba
->sli
.sli4
.rxq
[i
].lock
, NULL
, MUTEX_DRIVER
,
7503 for (i
= 0; i
< EMLXS_MAX_RQS
; i
++) {
7504 bzero(&hba
->sli
.sli4
.rq
[i
], sizeof (RQ_DESC_t
));
7506 buf_info
= &hba
->sli
.sli4
.rq
[i
].addr
;
7507 buf_info
->size
= size
;
7509 FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7510 buf_info
->align
= ddi_ptob(hba
->dip
, 1L);
7511 buf_info
->phys
= phys
;
7512 buf_info
->virt
= (void *)virt
;
7513 buf_info
->data_handle
= data_handle
;
7514 buf_info
->dma_handle
= dma_handle
;
7519 hba
->sli
.sli4
.rq
[i
].max_index
= RQ_DEPTH
;
7520 hba
->sli
.sli4
.rq
[i
].qid
= 0xFFFF;
7522 mutex_init(&hba
->sli
.sli4
.rq
[i
].lock
, NULL
, MUTEX_DRIVER
, NULL
);
7527 for (i
= 0; i
< EMLXS_MAX_RQS
; i
++) {
7528 size
= (i
& 0x1) ? RQB_DATA_SIZE
: RQB_HEADER_SIZE
;
7532 /* Initialize the RQEs */
7533 rqe
= (RQE_t
*)hba
->sli
.sli4
.rq
[i
].addr
.virt
;
7534 for (j
= 0; j
< (RQ_DEPTH
/RQB_COUNT
); j
++) {
7537 for (k
= 0; k
< RQB_COUNT
; k
++) {
7538 word
= PADDR_HI(phys
);
7539 rqe
->AddrHi
= BE_SWAP32(word
);
7541 word
= PADDR_LO(phys
);
7542 rqe
->AddrLo
= BE_SWAP32(word
);
7544 rqb
= &hba
->sli
.sli4
.rq
[i
].
7545 rqb
[k
+ (j
* RQB_COUNT
)];
7547 rqb
->flags
= FC_MBUF_DMA
|
7548 FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7549 rqb
->align
= ddi_ptob(hba
->dip
, 1L);
7551 rqb
->virt
= (void *)virt
;
7552 rqb
->data_handle
= data_handle
;
7553 rqb
->dma_handle
= dma_handle
;
7558 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
7559 "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p iotag=%d",
7560 i
, j
, k
, mp
, mp
->tag
);
7561 #endif /* DEBUG_RQE */
7567 offset
= (off_t
)((uint64_t)((unsigned long)
7568 hba
->sli
.sli4
.rq
[i
].addr
.virt
) -
7569 (uint64_t)((unsigned long)
7570 hba
->sli
.sli4
.slim2
.virt
));
7572 /* Sync the RQ buffer list */
7573 EMLXS_MPDATA_SYNC(hba
->sli
.sli4
.rq
[i
].addr
.dma_handle
, offset
,
7574 hba
->sli
.sli4
.rq
[i
].addr
.size
, DDI_DMA_SYNC_FORDEV
);
7578 align
= (4096 - (phys
%4096));
7583 /* Initialize double linked lists */
7584 hba
->sli
.sli4
.XRIinuse_f
=
7585 (XRIobj_t
*)&hba
->sli
.sli4
.XRIinuse_f
;
7586 hba
->sli
.sli4
.XRIinuse_b
=
7587 (XRIobj_t
*)&hba
->sli
.sli4
.XRIinuse_f
;
7588 hba
->sli
.sli4
.xria_count
= 0;
7590 hba
->sli
.sli4
.XRIfree_f
=
7591 (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
;
7592 hba
->sli
.sli4
.XRIfree_b
=
7593 (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
;
7594 hba
->sli
.sli4
.xria_count
= 0;
7596 hba
->sli
.sli4
.XRIp
= (XRIobj_t
*)kmem_zalloc(
7597 (sizeof (XRIobj_t
) * hba
->sli
.sli4
.XRICount
), KM_SLEEP
);
7599 xrip
= hba
->sli
.sli4
.XRIp
;
7600 size
= hba
->sli
.sli4
.mem_sgl_size
;
7602 for (i
= 0; i
< hba
->sli
.sli4
.XRICount
; i
++) {
7603 xrip
->XRI
= emlxs_sli4_index_to_xri(hba
, i
);
7605 /* We don't use XRI==0, since it also represents an */
7606 /* uninitialized exchange */
7607 if (xrip
->XRI
== 0) {
7612 xrip
->iotag
= iotag
++;
7614 (hba
->sli
.sli4
.mem_sgl_size
/ sizeof (ULP_SGE64
));
7616 /* Add xrip to end of free list */
7617 xrip
->_b
= hba
->sli
.sli4
.XRIfree_b
;
7618 hba
->sli
.sli4
.XRIfree_b
->_f
= xrip
;
7619 xrip
->_f
= (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
;
7620 hba
->sli
.sli4
.XRIfree_b
= xrip
;
7621 hba
->sli
.sli4
.xrif_count
++;
7623 /* Allocate SGL for this xrip */
7624 buf_info
= &xrip
->SGList
;
7625 buf_info
->size
= size
;
7627 FC_MBUF_DMA
| FC_MBUF_SNGLSG
| FC_MBUF_DMA32
;
7628 buf_info
->align
= size
;
7629 buf_info
->phys
= phys
;
7630 buf_info
->virt
= (void *)virt
;
7631 buf_info
->data_handle
= data_handle
;
7632 buf_info
->dma_handle
= dma_handle
;
7641 align
= (4096 - (phys
%4096));
7645 /* RPI Header Templates */
7646 if (hba
->sli
.sli4
.param
.HDRR
) {
7647 buf_info
= &hba
->sli
.sli4
.HeaderTmplate
;
7648 bzero(buf_info
, sizeof (MBUF_INFO
));
7649 buf_info
->size
= hddr_size
;
7650 buf_info
->flags
= FC_MBUF_DMA
| FC_MBUF_DMA32
;
7651 buf_info
->align
= ddi_ptob(hba
->dip
, 1L);
7652 buf_info
->phys
= phys
;
7653 buf_info
->virt
= (void *)virt
;
7654 buf_info
->data_handle
= data_handle
;
7655 buf_info
->dma_handle
= dma_handle
;
7659 if (hba
->sli
.sli4
.slim2
.dma_handle
) {
7660 if (emlxs_fm_check_dma_handle(hba
,
7661 hba
->sli
.sli4
.slim2
.dma_handle
)
7663 EMLXS_MSGF(EMLXS_CONTEXT
,
7664 &emlxs_invalid_dma_handle_msg
,
7665 "sli4_resource_alloc: hdl=%p",
7666 hba
->sli
.sli4
.slim2
.dma_handle
);
7670 #endif /* FMA_SUPPORT */
7676 (void) emlxs_sli4_resource_free(hba
);
7679 } /* emlxs_sli4_resource_alloc */
7683 emlxs_sli4_zero_queue_stat(emlxs_hba_t
*hba
)
7687 emlxs_config_t
*cfg
= &CFG
;
7691 for (i
= 0; i
< hba
->intr_count
; i
++) {
7692 hba
->sli
.sli4
.eq
[i
].num_proc
= 0;
7693 hba
->sli
.sli4
.eq
[i
].max_proc
= 0;
7694 hba
->sli
.sli4
.eq
[i
].isr_count
= 0;
7696 num_wq
= cfg
[CFG_NUM_WQ
].current
* hba
->intr_count
;
7698 for (i
= 0; i
< (num_wq
+ EMLXS_CQ_OFFSET_WQ
); i
++) {
7699 hba
->sli
.sli4
.cq
[i
].num_proc
= 0;
7700 hba
->sli
.sli4
.cq
[i
].max_proc
= 0;
7701 hba
->sli
.sli4
.cq
[i
].isr_count
= 0;
7704 for (i
= 0; i
< num_wq
; i
++) {
7705 hba
->sli
.sli4
.wq
[i
].num_proc
= 0;
7706 hba
->sli
.sli4
.wq
[i
].num_busy
= 0;
7709 for (i
= 0; i
< EMLXS_MAX_RQS
; i
++) {
7710 hba
->sli
.sli4
.rq
[i
].num_proc
= 0;
7712 (void) drv_getparm(LBOLT
, &time
);
7713 hba
->sli
.sli4
.que_stat_timer
= (uint32_t)time
;
7715 } /* emlxs_sli4_zero_queue_stat */
7719 emlxs_sli4_reserve_xri(emlxs_port_t
*port
, RPIobj_t
*rpip
, uint32_t type
,
7722 emlxs_hba_t
*hba
= HBA
;
7726 mutex_enter(&EMLXS_FCTAB_LOCK
);
7728 xrip
= hba
->sli
.sli4
.XRIfree_f
;
7730 if (xrip
== (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
) {
7731 mutex_exit(&EMLXS_FCTAB_LOCK
);
7733 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
7734 "Unable to reserve XRI. type=%d",
7740 iotag
= xrip
->iotag
;
7743 ((hba
->fc_table
[iotag
] != NULL
) &&
7744 (hba
->fc_table
[iotag
] != STALE_PACKET
))) {
7746 * No more command slots available, retry later
7748 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
7749 "Adapter Busy. Unable to reserve iotag. type=%d",
7752 mutex_exit(&EMLXS_FCTAB_LOCK
);
7756 xrip
->state
= XRI_STATE_ALLOCATED
;
7758 xrip
->flag
= EMLXS_XRI_RESERVED
;
7762 xrip
->rx_id
= rx_id
;
7765 /* Take it off free list */
7766 (xrip
->_b
)->_f
= xrip
->_f
;
7767 (xrip
->_f
)->_b
= xrip
->_b
;
7770 hba
->sli
.sli4
.xrif_count
--;
7772 /* Add it to end of inuse list */
7773 xrip
->_b
= hba
->sli
.sli4
.XRIinuse_b
;
7774 hba
->sli
.sli4
.XRIinuse_b
->_f
= xrip
;
7775 xrip
->_f
= (XRIobj_t
*)&hba
->sli
.sli4
.XRIinuse_f
;
7776 hba
->sli
.sli4
.XRIinuse_b
= xrip
;
7777 hba
->sli
.sli4
.xria_count
++;
7779 mutex_exit(&EMLXS_FCTAB_LOCK
);
7782 } /* emlxs_sli4_reserve_xri() */
7786 emlxs_sli4_unreserve_xri(emlxs_port_t
*port
, uint16_t xri
, uint32_t lock
)
7788 emlxs_hba_t
*hba
= HBA
;
7792 mutex_enter(&EMLXS_FCTAB_LOCK
);
7795 xrip
= emlxs_sli4_find_xri(port
, xri
);
7797 if (!xrip
|| xrip
->state
== XRI_STATE_FREE
) {
7799 mutex_exit(&EMLXS_FCTAB_LOCK
);
7802 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
7803 "sli4_unreserve_xri:%d already freed.", xri
);
7807 /* Flush this unsolicited ct command */
7808 if (xrip
->type
== EMLXS_XRI_UNSOL_CT_TYPE
) {
7809 (void) emlxs_flush_ct_event(port
, xrip
->rx_id
);
7812 if (!(xrip
->flag
& EMLXS_XRI_RESERVED
)) {
7814 mutex_exit(&EMLXS_FCTAB_LOCK
);
7817 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
7818 "sli4_unreserve_xri:%d in use. type=%d",
7819 xrip
->XRI
, xrip
->type
);
7824 (hba
->fc_table
[xrip
->iotag
] != NULL
) &&
7825 (hba
->fc_table
[xrip
->iotag
] != STALE_PACKET
)) {
7826 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_err_msg
,
7827 "sli4_unreserve_xri:%d sbp dropped:%p type=%d",
7828 xrip
->XRI
, hba
->fc_table
[xrip
->iotag
], xrip
->type
);
7830 hba
->fc_table
[xrip
->iotag
] = NULL
;
7834 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
7835 "sli4_unreserve_xri:%d unreserved. type=%d",
7836 xrip
->XRI
, xrip
->type
);
7838 xrip
->state
= XRI_STATE_FREE
;
7842 xrip
->rpip
->xri_count
--;
7846 if (xrip
->reserved_rpip
) {
7847 xrip
->reserved_rpip
->xri_count
--;
7848 xrip
->reserved_rpip
= NULL
;
7851 /* Take it off inuse list */
7852 (xrip
->_b
)->_f
= xrip
->_f
;
7853 (xrip
->_f
)->_b
= xrip
->_b
;
7856 hba
->sli
.sli4
.xria_count
--;
7858 /* Add it to end of free list */
7859 xrip
->_b
= hba
->sli
.sli4
.XRIfree_b
;
7860 hba
->sli
.sli4
.XRIfree_b
->_f
= xrip
;
7861 xrip
->_f
= (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
;
7862 hba
->sli
.sli4
.XRIfree_b
= xrip
;
7863 hba
->sli
.sli4
.xrif_count
++;
7866 mutex_exit(&EMLXS_FCTAB_LOCK
);
7871 } /* emlxs_sli4_unreserve_xri() */
7875 emlxs_sli4_register_xri(emlxs_port_t
*port
, emlxs_buf_t
*sbp
, uint16_t xri
,
7878 emlxs_hba_t
*hba
= HBA
;
7884 mutex_enter(&EMLXS_FCTAB_LOCK
);
7888 xrip
= emlxs_sli4_find_xri(port
, xri
);
7891 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
7892 "sli4_register_xri:%d XRI not found.", xri
);
7894 mutex_exit(&EMLXS_FCTAB_LOCK
);
7899 if ((xrip
->state
== XRI_STATE_FREE
) ||
7900 !(xrip
->flag
& EMLXS_XRI_RESERVED
)) {
7902 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
7903 "sli4_register_xri:%d Invalid XRI. xrip=%p "
7905 xrip
->XRI
, xrip
, xrip
->state
, xrip
->flag
);
7907 mutex_exit(&EMLXS_FCTAB_LOCK
);
7911 iotag
= xrip
->iotag
;
7914 ((hba
->fc_table
[iotag
] != NULL
) &&
7915 (hba
->fc_table
[iotag
] != STALE_PACKET
))) {
7917 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
7918 "sli4_register_xri:%d Invalid fc_table entry. "
7919 "iotag=%d entry=%p",
7920 xrip
->XRI
, iotag
, hba
->fc_table
[iotag
]);
7922 mutex_exit(&EMLXS_FCTAB_LOCK
);
7926 hba
->fc_table
[iotag
] = sbp
;
7932 xrip
->flag
&= ~EMLXS_XRI_RESERVED
;
7935 /* If we did not have a registered RPI when we reserved */
7936 /* this exchange, check again now. */
7937 if (xrip
->rpip
&& (xrip
->rpip
->RPI
== FABRIC_RPI
)) {
7938 node
= emlxs_node_find_did(port
, did
, 1);
7939 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
7941 if (rpip
&& (rpip
->RPI
!= FABRIC_RPI
)) {
7942 /* Move the XRI to the new RPI */
7943 xrip
->rpip
->xri_count
--;
7949 mutex_exit(&EMLXS_FCTAB_LOCK
);
7953 } /* emlxs_sli4_register_xri() */
7956 /* Performs both reserve and register functions for XRI */
7958 emlxs_sli4_alloc_xri(emlxs_port_t
*port
, emlxs_buf_t
*sbp
, RPIobj_t
*rpip
,
7961 emlxs_hba_t
*hba
= HBA
;
7965 mutex_enter(&EMLXS_FCTAB_LOCK
);
7967 xrip
= hba
->sli
.sli4
.XRIfree_f
;
7969 if (xrip
== (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
) {
7970 mutex_exit(&EMLXS_FCTAB_LOCK
);
7975 /* Get the iotag by registering the packet */
7976 iotag
= xrip
->iotag
;
7979 ((hba
->fc_table
[iotag
] != NULL
) &&
7980 (hba
->fc_table
[iotag
] != STALE_PACKET
))) {
7982 * No more command slots available, retry later
7984 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_trans_msg
,
7985 "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
7986 iotag
, hba
->fc_table
[iotag
], type
);
7988 mutex_exit(&EMLXS_FCTAB_LOCK
);
7992 hba
->fc_table
[iotag
] = sbp
;
7998 xrip
->state
= XRI_STATE_ALLOCATED
;
8006 /* Take it off free list */
8007 (xrip
->_b
)->_f
= xrip
->_f
;
8008 (xrip
->_f
)->_b
= xrip
->_b
;
8011 hba
->sli
.sli4
.xrif_count
--;
8013 /* Add it to end of inuse list */
8014 xrip
->_b
= hba
->sli
.sli4
.XRIinuse_b
;
8015 hba
->sli
.sli4
.XRIinuse_b
->_f
= xrip
;
8016 xrip
->_f
= (XRIobj_t
*)&hba
->sli
.sli4
.XRIinuse_f
;
8017 hba
->sli
.sli4
.XRIinuse_b
= xrip
;
8018 hba
->sli
.sli4
.xria_count
++;
8020 mutex_exit(&EMLXS_FCTAB_LOCK
);
8024 } /* emlxs_sli4_alloc_xri() */
8027 /* EMLXS_FCTAB_LOCK must be held to enter */
8029 emlxs_sli4_find_xri(emlxs_port_t
*port
, uint16_t xri
)
8031 emlxs_hba_t
*hba
= HBA
;
8034 xrip
= (XRIobj_t
*)hba
->sli
.sli4
.XRIinuse_f
;
8035 while (xrip
!= (XRIobj_t
*)&hba
->sli
.sli4
.XRIinuse_f
) {
8036 if ((xrip
->state
>= XRI_STATE_ALLOCATED
) &&
8037 (xrip
->XRI
== xri
)) {
8043 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8044 "Unable to find XRI x%x", xri
);
8048 } /* emlxs_sli4_find_xri() */
8054 emlxs_sli4_free_xri(emlxs_port_t
*port
, emlxs_buf_t
*sbp
, XRIobj_t
*xrip
,
8057 emlxs_hba_t
*hba
= HBA
;
8060 mutex_enter(&EMLXS_FCTAB_LOCK
);
8064 if (xrip
->state
== XRI_STATE_FREE
) {
8066 mutex_exit(&EMLXS_FCTAB_LOCK
);
8068 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8069 "Free XRI:%x, Already freed. type=%d",
8070 xrip
->XRI
, xrip
->type
);
8074 if (xrip
->type
== EMLXS_XRI_UNSOL_CT_TYPE
) {
8075 (void) emlxs_flush_ct_event(port
, xrip
->rx_id
);
8079 (hba
->fc_table
[xrip
->iotag
] != NULL
) &&
8080 (hba
->fc_table
[xrip
->iotag
] != STALE_PACKET
)) {
8081 hba
->fc_table
[xrip
->iotag
] = NULL
;
8085 xrip
->state
= XRI_STATE_FREE
;
8090 xrip
->rpip
->xri_count
--;
8094 if (xrip
->reserved_rpip
) {
8095 xrip
->reserved_rpip
->xri_count
--;
8096 xrip
->reserved_rpip
= NULL
;
8099 /* Take it off inuse list */
8100 (xrip
->_b
)->_f
= xrip
->_f
;
8101 (xrip
->_f
)->_b
= xrip
->_b
;
8104 hba
->sli
.sli4
.xria_count
--;
8106 /* Add it to end of free list */
8107 xrip
->_b
= hba
->sli
.sli4
.XRIfree_b
;
8108 hba
->sli
.sli4
.XRIfree_b
->_f
= xrip
;
8109 xrip
->_f
= (XRIobj_t
*)&hba
->sli
.sli4
.XRIfree_f
;
8110 hba
->sli
.sli4
.XRIfree_b
= xrip
;
8111 hba
->sli
.sli4
.xrif_count
++;
8115 if (!(sbp
->pkt_flags
& PACKET_VALID
) ||
8117 (PACKET_ULP_OWNED
|PACKET_COMPLETED
|PACKET_IN_COMPLETION
))) {
8119 mutex_exit(&EMLXS_FCTAB_LOCK
);
8121 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8122 "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8123 sbp
, sbp
->pkt_flags
, ((xrip
)? xrip
->XRI
:0));
8127 if (xrip
&& (xrip
->iotag
!= sbp
->iotag
)) {
8128 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_debug_msg
,
8129 "sbp/iotag mismatch %p iotag:%d %d", sbp
,
8130 sbp
->iotag
, xrip
->iotag
);
8134 if (sbp
== hba
->fc_table
[sbp
->iotag
]) {
8135 hba
->fc_table
[sbp
->iotag
] = NULL
;
8139 /* Exchange is still reserved */
8140 sbp
->xrip
->flag
|= EMLXS_XRI_RESERVED
;
8151 mutex_exit(&EMLXS_FCTAB_LOCK
);
8154 /* Clean up the sbp */
8155 mutex_enter(&sbp
->mtx
);
8157 if (sbp
->pkt_flags
& PACKET_IN_TXQ
) {
8158 sbp
->pkt_flags
&= ~PACKET_IN_TXQ
;
8159 hba
->channel_tx_count
--;
8162 if (sbp
->pkt_flags
& PACKET_IN_CHIPQ
) {
8163 sbp
->pkt_flags
&= ~PACKET_IN_CHIPQ
;
8166 mutex_exit(&sbp
->mtx
);
8169 mutex_exit(&EMLXS_FCTAB_LOCK
);
8173 } /* emlxs_sli4_free_xri() */
8177 emlxs_sli4_post_sgl_pages(emlxs_hba_t
*hba
, MAILBOXQ
*mbq
)
8179 MAILBOX4
*mb
= (MAILBOX4
*)mbq
;
8180 emlxs_port_t
*port
= &PPORT
;
8183 mbox_req_hdr_t
*hdr_req
;
8189 IOCTL_FCOE_CFG_POST_SGL_PAGES
*post_sgl
;
8191 bzero((void *) mb
, MAILBOX_CMD_SLI4_BSIZE
);
8193 mbq
->mbox_cmpl
= NULL
;
8195 if ((mp
= emlxs_mem_buf_alloc(hba
, EMLXS_MAX_NONEMBED_SIZE
)) == 0) {
8196 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8197 "Unable to POST_SGL. Mailbox cmd=%x ",
8201 mbq
->nonembed
= (void *)mp
;
8204 * Signifies a non embedded command
8206 mb
->un
.varSLIConfig
.be
.embedded
= 0;
8207 mb
->mbxCommand
= MBX_SLI_CONFIG
;
8208 mb
->mbxOwner
= OWN_HOST
;
8210 hdr_req
= (mbox_req_hdr_t
*)mp
->virt
;
8212 (IOCTL_FCOE_CFG_POST_SGL_PAGES
*)(hdr_req
+ 1);
8214 xrip
= hba
->sli
.sli4
.XRIp
;
8216 /* For each extent */
8217 for (j
= 0; j
< hba
->sli
.sli4
.XRIExtCount
; j
++) {
8218 cnt
= hba
->sli
.sli4
.XRIExtSize
;
8220 if (xrip
->XRI
== 0) {
8226 bzero((void *) hdr_req
, mp
->size
);
8227 size
= mp
->size
- IOCTL_HEADER_SZ
;
8229 mb
->un
.varSLIConfig
.be
.payload_length
=
8231 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.subsystem
=
8232 IOCTL_SUBSYSTEM_FCOE
;
8233 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.opcode
=
8234 FCOE_OPCODE_CFG_POST_SGL_PAGES
;
8235 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.timeout
= 0;
8236 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.req_length
= size
;
8238 hdr_req
->subsystem
= IOCTL_SUBSYSTEM_FCOE
;
8239 hdr_req
->opcode
= FCOE_OPCODE_CFG_POST_SGL_PAGES
;
8240 hdr_req
->timeout
= 0;
8241 hdr_req
->req_length
= size
;
8243 post_sgl
->params
.request
.xri_count
= 0;
8244 post_sgl
->params
.request
.xri_start
= xrip
->XRI
;
8247 sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES
)) /
8248 sizeof (FCOE_SGL_PAGES
);
8250 for (i
= 0; (i
< xri_cnt
) && cnt
; i
++) {
8251 post_sgl
->params
.request
.xri_count
++;
8252 post_sgl
->params
.request
.pages
[i
].\
8254 PADDR_LO(xrip
->SGList
.phys
);
8255 post_sgl
->params
.request
.pages
[i
].\
8256 sgl_page0
.addrHigh
=
8257 PADDR_HI(xrip
->SGList
.phys
);
8263 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8265 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8266 "Unable to POST_SGL. Mailbox cmd=%x "
8267 "status=%x XRI cnt:%d start:%d",
8268 mb
->mbxCommand
, mb
->mbxStatus
,
8269 post_sgl
->params
.request
.xri_count
,
8270 post_sgl
->params
.request
.xri_start
);
8271 emlxs_mem_buf_free(hba
, mp
);
8272 mbq
->nonembed
= NULL
;
8278 emlxs_mem_buf_free(hba
, mp
);
8279 mbq
->nonembed
= NULL
;
8282 } /* emlxs_sli4_post_sgl_pages() */
8286 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t
*hba
, MAILBOXQ
*mbq
)
8288 MAILBOX4
*mb
= (MAILBOX4
*)mbq
;
8289 emlxs_port_t
*port
= &PPORT
;
8293 IOCTL_FCOE_POST_HDR_TEMPLATES
*post_hdr
;
8296 if (!(hba
->sli
.sli4
.param
.HDRR
)) {
8300 /* Bytes per extent */
8301 j
= hba
->sli
.sli4
.RPIExtSize
* sizeof (RPIHdrTmplate_t
);
8303 /* Pages required per extent (page == 4096 bytes) */
8304 num_pages
= (j
/4096) + ((j
%4096)? 1:0);
8306 addr
= hba
->sli
.sli4
.HeaderTmplate
.phys
;
8308 /* For each extent */
8309 for (j
= 0; j
< hba
->sli
.sli4
.RPIExtCount
; j
++) {
8310 bzero((void *) mb
, MAILBOX_CMD_SLI4_BSIZE
);
8312 mbq
->mbox_cmpl
= NULL
;
8315 * Signifies an embedded command
8317 mb
->un
.varSLIConfig
.be
.embedded
= 1;
8319 mb
->mbxCommand
= MBX_SLI_CONFIG
;
8320 mb
->mbxOwner
= OWN_HOST
;
8321 mb
->un
.varSLIConfig
.be
.payload_length
=
8322 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES
) + IOCTL_HEADER_SZ
;
8323 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.subsystem
=
8324 IOCTL_SUBSYSTEM_FCOE
;
8325 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.opcode
=
8326 FCOE_OPCODE_POST_HDR_TEMPLATES
;
8327 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.timeout
= 0;
8328 mb
->un
.varSLIConfig
.be
.un_hdr
.hdr_req
.req_length
=
8329 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES
);
8332 (IOCTL_FCOE_POST_HDR_TEMPLATES
*)
8333 &mb
->un
.varSLIConfig
.payload
;
8334 post_hdr
->params
.request
.num_pages
= num_pages
;
8335 post_hdr
->params
.request
.rpi_offset
= hba
->sli
.sli4
.RPIBase
[j
];
8337 for (k
= 0; k
< num_pages
; k
++) {
8338 post_hdr
->params
.request
.pages
[k
].addrLow
=
8340 post_hdr
->params
.request
.pages
[k
].addrHigh
=
8345 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8347 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8348 "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8350 mb
->mbxCommand
, mb
->mbxStatus
);
8353 emlxs_data_dump(port
, "POST_HDR", (uint32_t *)mb
, 18, 0);
8358 } /* emlxs_sli4_post_hdr_tmplates() */
8362 emlxs_sli4_create_queues(emlxs_hba_t
*hba
, MAILBOXQ
*mbq
)
8364 MAILBOX4
*mb
= (MAILBOX4
*)mbq
;
8365 emlxs_port_t
*port
= &PPORT
;
8366 emlxs_config_t
*cfg
= &CFG
;
8367 IOCTL_COMMON_EQ_CREATE
*eq
;
8368 IOCTL_COMMON_CQ_CREATE
*cq
;
8369 IOCTL_FCOE_WQ_CREATE
*wq
;
8370 IOCTL_FCOE_RQ_CREATE
*rq
;
8371 IOCTL_COMMON_MQ_CREATE
*mq
;
8372 IOCTL_COMMON_MQ_CREATE_EXT
*mq_ext
;
8375 uint16_t num_cq
, total_cq
;
8376 uint16_t num_wq
, total_wq
;
8379 * The first CQ is reserved for ASYNC events,
8380 * the second is reserved for unsol rcv, the rest
8381 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8388 for (i
= 0; i
< hba
->intr_count
; i
++) {
8389 emlxs_mb_eq_create(hba
, mbq
, i
);
8390 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8392 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
8393 "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8394 i
, mb
->mbxCommand
, mb
->mbxStatus
);
8397 eq
= (IOCTL_COMMON_EQ_CREATE
*)&mb
->un
.varSLIConfig
.payload
;
8398 hba
->sli
.sli4
.eq
[i
].qid
= eq
->params
.response
.EQId
;
8399 hba
->sli
.sli4
.eq
[i
].lastwq
= total_wq
;
8400 hba
->sli
.sli4
.eq
[i
].msix_vector
= i
;
8402 emlxs_data_dump(port
, "EQ0_CREATE", (uint32_t *)mb
, 18, 0);
8403 num_wq
= cfg
[CFG_NUM_WQ
].current
;
8406 /* One for RQ handling, one for mbox/event handling */
8407 num_cq
+= EMLXS_CQ_OFFSET_WQ
;
8411 for (j
= 0; j
< num_cq
; j
++) {
8412 /* Reuse mbq from previous mbox */
8413 bzero(mbq
, sizeof (MAILBOXQ
));
8415 hba
->sli
.sli4
.cq
[total_cq
].eqid
=
8416 hba
->sli
.sli4
.eq
[i
].qid
;
8418 emlxs_mb_cq_create(hba
, mbq
, total_cq
);
8419 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8421 EMLXS_MSGF(EMLXS_CONTEXT
,
8422 &emlxs_init_failed_msg
, "Unable to Create "
8423 "CQ %d: Mailbox cmd=%x status=%x ",
8424 total_cq
, mb
->mbxCommand
, mb
->mbxStatus
);
8427 cq
= (IOCTL_COMMON_CQ_CREATE
*)
8428 &mb
->un
.varSLIConfig
.payload
;
8429 hba
->sli
.sli4
.cq
[total_cq
].qid
=
8430 cq
->params
.response
.CQId
;
8434 /* First CQ is for async event handling */
8435 hba
->sli
.sli4
.cq
[total_cq
].type
=
8436 EMLXS_CQ_TYPE_GROUP1
;
8440 /* Second CQ is for unsol receive handling */
8441 hba
->sli
.sli4
.cq
[total_cq
].type
=
8442 EMLXS_CQ_TYPE_GROUP2
;
8446 /* Setup CQ to channel mapping */
8447 hba
->sli
.sli4
.cq
[total_cq
].type
=
8448 EMLXS_CQ_TYPE_GROUP2
;
8449 hba
->sli
.sli4
.cq
[total_cq
].channelp
=
8450 &hba
->chan
[total_cq
- EMLXS_CQ_OFFSET_WQ
];
8453 emlxs_data_dump(port
, "CQX_CREATE", (uint32_t *)mb
,
8459 for (j
= 0; j
< num_wq
; j
++) {
8460 /* Reuse mbq from previous mbox */
8461 bzero(mbq
, sizeof (MAILBOXQ
));
8463 hba
->sli
.sli4
.wq
[total_wq
].cqid
=
8464 hba
->sli
.sli4
.cq
[total_wq
+ EMLXS_CQ_OFFSET_WQ
].qid
;
8466 emlxs_mb_wq_create(hba
, mbq
, total_wq
);
8467 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8469 EMLXS_MSGF(EMLXS_CONTEXT
,
8470 &emlxs_init_failed_msg
, "Unable to Create "
8471 "WQ %d: Mailbox cmd=%x status=%x ",
8472 total_wq
, mb
->mbxCommand
, mb
->mbxStatus
);
8475 wq
= (IOCTL_FCOE_WQ_CREATE
*)
8476 &mb
->un
.varSLIConfig
.payload
;
8477 hba
->sli
.sli4
.wq
[total_wq
].qid
=
8478 wq
->params
.response
.WQId
;
8480 hba
->sli
.sli4
.wq
[total_wq
].cqid
=
8481 hba
->sli
.sli4
.cq
[total_wq
+EMLXS_CQ_OFFSET_WQ
].qid
;
8482 emlxs_data_dump(port
, "WQ_CREATE", (uint32_t *)mb
,
8486 hba
->last_msiid
= i
;
8489 /* We assume 1 RQ pair will handle ALL incoming data */
8491 for (i
= 0; i
< EMLXS_MAX_RQS
; i
++) {
8492 /* Personalize the RQ */
8495 hba
->sli
.sli4
.rq
[i
].cqid
=
8496 hba
->sli
.sli4
.cq
[EMLXS_CQ_RCV
].qid
;
8499 hba
->sli
.sli4
.rq
[i
].cqid
=
8500 hba
->sli
.sli4
.cq
[EMLXS_CQ_RCV
].qid
;
8503 hba
->sli
.sli4
.rq
[i
].cqid
= 0xffff;
8506 /* Reuse mbq from previous mbox */
8507 bzero(mbq
, sizeof (MAILBOXQ
));
8509 emlxs_mb_rq_create(hba
, mbq
, i
);
8510 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8512 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
8513 "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8514 i
, mb
->mbxCommand
, mb
->mbxStatus
);
8518 rq
= (IOCTL_FCOE_RQ_CREATE
*)&mb
->un
.varSLIConfig
.payload
;
8519 hba
->sli
.sli4
.rq
[i
].qid
= rq
->params
.response
.RQId
;
8520 emlxs_data_dump(port
, "RQ CREATE", (uint32_t *)mb
, 18, 0);
8522 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8523 "RQ CREATE: rq[%d].qid=%d cqid=%d",
8524 i
, hba
->sli
.sli4
.rq
[i
].qid
, hba
->sli
.sli4
.rq
[i
].cqid
);
8526 /* Initialize the host_index */
8527 hba
->sli
.sli4
.rq
[i
].host_index
= 0;
8529 /* If Data queue was just created, */
8530 /* then post buffers using the header qid */
8532 /* Ring the RQ doorbell to post buffers */
8534 rqdb
.db
.Qid
= hba
->sli
.sli4
.rq
[i
-1].qid
;
8535 rqdb
.db
.NumPosted
= RQB_COUNT
;
8537 emlxs_sli4_write_rqdb(hba
, rqdb
.word
);
8539 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8540 "RQ CREATE: Doorbell rang: qid=%d count=%d",
8541 hba
->sli
.sli4
.rq
[i
-1].qid
, RQB_COUNT
);
8547 /* Personalize the MQ */
8548 hba
->sli
.sli4
.mq
.cqid
= hba
->sli
.sli4
.cq
[EMLXS_CQ_MBOX
].qid
;
8550 /* Reuse mbq from previous mbox */
8551 bzero(mbq
, sizeof (MAILBOXQ
));
8553 emlxs_mb_mq_create_ext(hba
, mbq
);
8554 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8556 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
8557 "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8558 i
, mb
->mbxCommand
, mb
->mbxStatus
);
8560 /* Reuse mbq from previous mbox */
8561 bzero(mbq
, sizeof (MAILBOXQ
));
8563 emlxs_mb_mq_create(hba
, mbq
);
8564 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) !=
8566 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
8567 "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8568 i
, mb
->mbxCommand
, mb
->mbxStatus
);
8572 mq
= (IOCTL_COMMON_MQ_CREATE
*)&mb
->un
.varSLIConfig
.payload
;
8573 hba
->sli
.sli4
.mq
.qid
= mq
->params
.response
.MQId
;
8577 mq_ext
= (IOCTL_COMMON_MQ_CREATE_EXT
*)&mb
->un
.varSLIConfig
.payload
;
8578 hba
->sli
.sli4
.mq
.qid
= mq_ext
->params
.response
.MQId
;
8581 } /* emlxs_sli4_create_queues() */
8585 emlxs_sli4_timer(emlxs_hba_t
*hba
)
8587 /* Perform SLI4 level timer checks */
8589 emlxs_fcf_timer_notify(hba
);
8591 emlxs_sli4_timer_check_mbox(hba
);
8595 } /* emlxs_sli4_timer() */
8599 emlxs_sli4_timer_check_mbox(emlxs_hba_t
*hba
)
8601 emlxs_port_t
*port
= &PPORT
;
8602 emlxs_config_t
*cfg
= &CFG
;
8605 if (!cfg
[CFG_TIMEOUT_ENABLE
].current
) {
8609 mutex_enter(&EMLXS_PORT_LOCK
);
8611 /* Return if timer hasn't expired */
8612 if (!hba
->mbox_timer
|| (hba
->timer_tics
< hba
->mbox_timer
)) {
8613 mutex_exit(&EMLXS_PORT_LOCK
);
8617 /* The first to service the mbox queue will clear the timer */
8618 hba
->mbox_timer
= 0;
8620 if (hba
->mbox_queue_flag
) {
8621 if (hba
->mbox_mbq
) {
8622 mb
= (MAILBOX
*)hba
->mbox_mbq
;
8627 switch (hba
->mbox_queue_flag
) {
8629 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_timeout_msg
,
8631 emlxs_mb_cmd_xlate(mb
->mbxCommand
));
8635 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_timeout_msg
,
8637 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
8642 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_timeout_msg
,
8643 "%s: mb=%p Polled.",
8644 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
8649 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_timeout_msg
,
8651 emlxs_mb_cmd_xlate(mb
->mbxCommand
),
8652 mb
, hba
->mbox_queue_flag
);
8656 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_mbox_timeout_msg
, NULL
);
8659 hba
->flag
|= FC_MBOX_TIMEOUT
;
8660 EMLXS_STATE_CHANGE_LOCKED(hba
, FC_ERROR
);
8662 mutex_exit(&EMLXS_PORT_LOCK
);
8664 /* Perform mailbox cleanup */
8665 /* This will wake any sleeping or polling threads */
8666 emlxs_mb_fini(hba
, NULL
, MBX_TIMEOUT
);
8668 /* Trigger adapter shutdown */
8669 emlxs_thread_spawn(hba
, emlxs_shutdown_thread
, 0, 0);
8673 } /* emlxs_sli4_timer_check_mbox() */
8677 emlxs_data_dump(emlxs_port_t
*port
, char *str
, uint32_t *iptr
, int cnt
, int err
)
8681 if (!port
|| !str
|| !iptr
|| !cnt
) {
8686 msg
= &emlxs_sli_err_msg
;
8688 msg
= &emlxs_sli_detail_msg
;
8692 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
8693 "%s00: %08x %08x %08x %08x %08x %08x", str
, *iptr
,
8694 *(iptr
+1), *(iptr
+2), *(iptr
+3), *(iptr
+4), *(iptr
+5));
8697 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
8698 "%s06: %08x %08x %08x %08x %08x %08x", str
, *(iptr
+6),
8699 *(iptr
+7), *(iptr
+8), *(iptr
+9), *(iptr
+10), *(iptr
+11));
8702 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
8703 "%s12: %08x %08x %08x %08x %08x %08x", str
, *(iptr
+12),
8704 *(iptr
+13), *(iptr
+14), *(iptr
+15), *(iptr
+16), *(iptr
+17));
8707 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
8708 "%s18: %08x %08x %08x %08x %08x %08x", str
, *(iptr
+18),
8709 *(iptr
+19), *(iptr
+20), *(iptr
+21), *(iptr
+22), *(iptr
+23));
8712 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
8713 "%s24: %08x %08x %08x %08x %08x %08x", str
, *(iptr
+24),
8714 *(iptr
+25), *(iptr
+26), *(iptr
+27), *(iptr
+28), *(iptr
+29));
8717 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
8718 "%s30: %08x %08x %08x %08x %08x %08x", str
, *(iptr
+30),
8719 *(iptr
+31), *(iptr
+32), *(iptr
+33), *(iptr
+34), *(iptr
+35));
8722 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
8723 "%s36: %08x %08x %08x %08x %08x %08x", str
, *(iptr
+36),
8724 *(iptr
+37), *(iptr
+38), *(iptr
+39), *(iptr
+40), *(iptr
+41));
8727 } /* emlxs_data_dump() */
8731 emlxs_ue_dump(emlxs_hba_t
*hba
, char *str
)
8733 emlxs_port_t
*port
= &PPORT
;
8740 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
8741 case SLI_INTF_IF_TYPE_0
:
8742 ue_l
= ddi_get32(hba
->pci_acc_handle
,
8743 hba
->sli
.sli4
.ERR1_reg_addr
);
8744 ue_h
= ddi_get32(hba
->pci_acc_handle
,
8745 hba
->sli
.sli4
.ERR2_reg_addr
);
8747 on1
= ddi_get32(hba
->pci_acc_handle
,
8748 (uint32_t *)(hba
->pci_addr
+ PCICFG_UE_STATUS_ONLINE1
));
8749 on2
= ddi_get32(hba
->pci_acc_handle
,
8750 (uint32_t *)(hba
->pci_addr
+ PCICFG_UE_STATUS_ONLINE2
));
8752 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8753 "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str
,
8754 ue_l
, ue_h
, on1
, on2
);
8757 case SLI_INTF_IF_TYPE_2
:
8758 status
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
8759 hba
->sli
.sli4
.STATUS_reg_addr
);
8761 ue_l
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
8762 hba
->sli
.sli4
.ERR1_reg_addr
);
8763 ue_h
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
8764 hba
->sli
.sli4
.ERR2_reg_addr
);
8766 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8767 "%s: status:%08x err1:%08x err2:%08x", str
,
8768 status
, ue_l
, ue_h
);
8774 /* Access handle validation */
8775 EMLXS_CHK_ACC_HANDLE(hba
, hba
->pci_acc_handle
);
8776 #endif /* FMA_SUPPORT */
8778 } /* emlxs_ue_dump() */
8782 emlxs_sli4_poll_erratt(emlxs_hba_t
*hba
)
8784 emlxs_port_t
*port
= &PPORT
;
8790 if (hba
->flag
& FC_HARDWARE_ERROR
) {
8794 switch (hba
->sli_intf
& SLI_INTF_IF_TYPE_MASK
) {
8795 case SLI_INTF_IF_TYPE_0
:
8796 ue_l
= ddi_get32(hba
->pci_acc_handle
,
8797 hba
->sli
.sli4
.ERR1_reg_addr
);
8798 ue_h
= ddi_get32(hba
->pci_acc_handle
,
8799 hba
->sli
.sli4
.ERR2_reg_addr
);
8801 if ((~hba
->sli
.sli4
.ue_mask_lo
& ue_l
) ||
8802 (~hba
->sli
.sli4
.ue_mask_hi
& ue_h
) ||
8803 (hba
->sli
.sli4
.flag
& EMLXS_SLI4_HW_ERROR
)) {
8804 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_hardware_error_msg
,
8805 "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
8806 "maskHigh:%08x flag:%08x",
8807 ue_l
, ue_h
, hba
->sli
.sli4
.ue_mask_lo
,
8808 hba
->sli
.sli4
.ue_mask_hi
, hba
->sli
.sli4
.flag
);
8814 case SLI_INTF_IF_TYPE_2
:
8815 status
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
8816 hba
->sli
.sli4
.STATUS_reg_addr
);
8818 if ((status
& SLI_STATUS_ERROR
) ||
8819 (hba
->sli
.sli4
.flag
& EMLXS_SLI4_HW_ERROR
)) {
8820 ue_l
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
8821 hba
->sli
.sli4
.ERR1_reg_addr
);
8822 ue_h
= ddi_get32(hba
->sli
.sli4
.bar0_acc_handle
,
8823 hba
->sli
.sli4
.ERR2_reg_addr
);
8825 error
= (status
& SLI_STATUS_RESET_NEEDED
)? 1:2;
8828 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_debug_msg
,
8829 "Host Error: status:%08x err1:%08x "
8830 "err2:%08x flag:%08x",
8831 status
, ue_l
, ue_h
, hba
->sli
.sli4
.flag
);
8833 EMLXS_MSGF(EMLXS_CONTEXT
,
8834 &emlxs_hardware_error_msg
,
8835 "Host Error: status:%08x err1:%08x "
8836 "err2:%08x flag:%08x",
8837 status
, ue_l
, ue_h
, hba
->sli
.sli4
.flag
);
8844 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
8846 emlxs_sli4_hba_flush_chipq(hba
);
8848 emlxs_thread_spawn(hba
, emlxs_shutdown_thread
, 0, 0);
8850 } else if (error
== 1) {
8851 EMLXS_STATE_CHANGE(hba
, FC_ERROR
);
8853 emlxs_sli4_hba_flush_chipq(hba
);
8855 emlxs_thread_spawn(hba
, emlxs_restart_thread
, 0, 0);
8859 /* Access handle validation */
8860 EMLXS_CHK_ACC_HANDLE(hba
, hba
->pci_acc_handle
);
8861 #endif /* FMA_SUPPORT */
8863 } /* emlxs_sli4_poll_erratt() */
8867 emlxs_sli4_reg_did(emlxs_port_t
*port
, uint32_t did
, SERV_PARM
*param
,
8868 emlxs_buf_t
*sbp
, fc_unsol_buf_t
*ubp
, IOCBQ
*iocbq
)
8870 emlxs_hba_t
*hba
= HBA
;
8875 /* Check for invalid node ids to register */
8876 if ((did
== 0) && (!(hba
->flag
& FC_LOOPBACK_MODE
))) {
8880 if (did
& 0xff000000) {
8884 /* We don't register our own did */
8885 if ((did
== port
->did
) && (!(hba
->flag
& FC_LOOPBACK_MODE
))) {
8889 if (did
!= FABRIC_DID
) {
8890 if ((rval
= emlxs_mb_check_sparm(hba
, param
))) {
8891 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_node_create_failed_msg
,
8892 "Invalid service parameters. did=%06x rval=%d", did
,
8899 /* Check if the node limit has been reached */
8900 if (port
->node_count
>= hba
->max_nodes
) {
8901 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_node_create_failed_msg
,
8902 "Limit reached. did=%06x count=%d", did
,
8908 node
= emlxs_node_find_did(port
, did
, 1);
8909 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
8911 rval
= emlxs_rpi_online_notify(port
, rpip
, did
, param
, (void *)sbp
,
8912 (void *)ubp
, (void *)iocbq
);
8916 } /* emlxs_sli4_reg_did() */
8920 emlxs_sli4_unreg_node(emlxs_port_t
*port
, emlxs_node_t
*node
,
8921 emlxs_buf_t
*sbp
, fc_unsol_buf_t
*ubp
, IOCBQ
*iocbq
)
8927 /* Unreg all nodes */
8928 (void) emlxs_sli4_unreg_all_nodes(port
);
8932 /* Check for base node */
8933 if (node
== &port
->node_base
) {
8934 /* Just flush base node */
8935 (void) emlxs_tx_node_flush(port
, &port
->node_base
,
8938 (void) emlxs_chipq_node_flush(port
, 0,
8939 &port
->node_base
, 0);
8947 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8948 "unreg_node:%p did=%x rpi=%d",
8949 node
, node
->nlp_DID
, node
->nlp_Rpi
);
8951 rpip
= EMLXS_NODE_TO_RPI(port
, node
);
8954 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
8955 "unreg_node:%p did=%x rpi=%d. RPI not found.",
8956 node
, node
->nlp_DID
, node
->nlp_Rpi
);
8958 emlxs_node_rm(port
, node
);
8962 rval
= emlxs_rpi_offline_notify(port
, rpip
, (void *)sbp
, (void *)ubp
,
8967 } /* emlxs_sli4_unreg_node() */
8971 emlxs_sli4_unreg_all_nodes(emlxs_port_t
*port
)
8977 /* Set the node tags */
8978 /* We will process all nodes with this tag */
8979 rw_enter(&port
->node_rwlock
, RW_READER
);
8981 for (i
= 0; i
< EMLXS_NUM_HASH_QUES
; i
++) {
8982 nlp
= port
->node_table
[i
];
8983 while (nlp
!= NULL
) {
8986 nlp
= nlp
->nlp_list_next
;
8989 rw_exit(&port
->node_rwlock
);
8996 rw_enter(&port
->node_rwlock
, RW_READER
);
8998 for (i
= 0; i
< EMLXS_NUM_HASH_QUES
; i
++) {
8999 nlp
= port
->node_table
[i
];
9000 while (nlp
!= NULL
) {
9001 if (!nlp
->nlp_tag
) {
9002 nlp
= nlp
->nlp_list_next
;
9014 rw_exit(&port
->node_rwlock
);
9020 (void) emlxs_sli4_unreg_node(port
, nlp
, 0, 0, 0);
9025 } /* emlxs_sli4_unreg_all_nodes() */
9029 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t
*hba
, CQE_ASYNC_t
*cqe
)
9031 emlxs_port_t
*port
= &PPORT
;
9033 /* Handle link down */
9034 if ((cqe
->un
.link
.link_status
== ASYNC_EVENT_LOGICAL_LINK_DOWN
) ||
9035 (cqe
->un
.link
.link_status
== ASYNC_EVENT_PHYS_LINK_DOWN
)) {
9036 (void) emlxs_fcf_linkdown_notify(port
);
9038 mutex_enter(&EMLXS_PORT_LOCK
);
9039 hba
->sli
.sli4
.flag
&= ~EMLXS_SLI4_DOWN_LINK
;
9040 mutex_exit(&EMLXS_PORT_LOCK
);
9047 switch (cqe
->un
.link
.port_speed
) {
9049 hba
->linkspeed
= LA_1GHZ_LINK
;
9051 case PHY_10GHZ_LINK
:
9052 hba
->linkspeed
= LA_10GHZ_LINK
;
9055 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
9056 "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9057 cqe
->un
.link
.port_speed
);
9062 /* Set qos_linkspeed */
9063 hba
->qos_linkspeed
= cqe
->un
.link
.qos_link_speed
;
9066 hba
->topology
= TOPOLOGY_PT_PT
;
9068 mutex_enter(&EMLXS_PORT_LOCK
);
9069 hba
->sli
.sli4
.flag
&= ~EMLXS_SLI4_DOWN_LINK
;
9070 mutex_exit(&EMLXS_PORT_LOCK
);
9072 (void) emlxs_fcf_linkup_notify(port
);
9076 } /* emlxs_sli4_handle_fcoe_link_event() */
9080 emlxs_sli4_handle_fc_link_att(emlxs_hba_t
*hba
, CQE_ASYNC_t
*cqe
)
9082 emlxs_port_t
*port
= &PPORT
;
9084 /* Handle link down */
9085 if (cqe
->un
.fc
.att_type
== ATT_TYPE_LINK_DOWN
) {
9086 (void) emlxs_fcf_linkdown_notify(port
);
9088 mutex_enter(&EMLXS_PORT_LOCK
);
9089 hba
->sli
.sli4
.flag
&= ~EMLXS_SLI4_DOWN_LINK
;
9090 mutex_exit(&EMLXS_PORT_LOCK
);
9097 switch (cqe
->un
.fc
.port_speed
) {
9099 hba
->linkspeed
= LA_1GHZ_LINK
;
9102 hba
->linkspeed
= LA_2GHZ_LINK
;
9105 hba
->linkspeed
= LA_4GHZ_LINK
;
9108 hba
->linkspeed
= LA_8GHZ_LINK
;
9111 hba
->linkspeed
= LA_10GHZ_LINK
;
9114 hba
->linkspeed
= LA_16GHZ_LINK
;
9117 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
9118 "sli4_handle_fc_link_att: Unknown link speed=%x.",
9119 cqe
->un
.fc
.port_speed
);
9124 /* Set qos_linkspeed */
9125 hba
->qos_linkspeed
= cqe
->un
.fc
.link_speed
;
9128 hba
->topology
= cqe
->un
.fc
.topology
;
9130 mutex_enter(&EMLXS_PORT_LOCK
);
9131 hba
->sli
.sli4
.flag
&= ~EMLXS_SLI4_DOWN_LINK
;
9132 mutex_exit(&EMLXS_PORT_LOCK
);
9134 (void) emlxs_fcf_linkup_notify(port
);
9138 } /* emlxs_sli4_handle_fc_link_att() */
9142 emlxs_sli4_init_extents(emlxs_hba_t
*hba
, MAILBOXQ
*mbq
)
9144 emlxs_port_t
*port
= &PPORT
;
9146 IOCTL_COMMON_EXTENTS
*ep
;
9150 if (!(hba
->sli
.sli4
.param
.EXT
)) {
9154 mb4
= (MAILBOX4
*) mbq
;
9156 /* Discover XRI Extents */
9157 bzero(mbq
, sizeof (MAILBOXQ
));
9158 emlxs_mb_get_extents_info(hba
, mbq
, RSC_TYPE_FCOE_XRI
);
9160 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9161 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9162 "Unable to discover XRI extents. Mailbox cmd=%x status=%x",
9163 mb4
->mbxCommand
, mb4
->mbxStatus
);
9168 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9169 hba
->sli
.sli4
.XRIExtSize
= ep
->params
.response
.ExtentSize
;
9170 ExtentCnt
= ep
->params
.response
.ExtentCnt
;
9172 /* Allocate XRI Extents */
9173 bzero(mbq
, sizeof (MAILBOXQ
));
9174 emlxs_mb_alloc_extents(hba
, mbq
, RSC_TYPE_FCOE_XRI
, ExtentCnt
);
9176 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9177 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9178 "Unable to allocate XRI extents. Mailbox cmd=%x status=%x",
9179 mb4
->mbxCommand
, mb4
->mbxStatus
);
9183 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9185 bcopy((uint8_t *)ep
->params
.response
.RscId
,
9186 (uint8_t *)hba
->sli
.sli4
.XRIBase
,
9187 (ep
->params
.response
.ExtentCnt
* sizeof (uint16_t)));
9189 hba
->sli
.sli4
.XRIExtCount
= ep
->params
.response
.ExtentCnt
;
9190 hba
->sli
.sli4
.XRICount
= hba
->sli
.sli4
.XRIExtCount
*
9191 hba
->sli
.sli4
.XRIExtSize
;
9193 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9194 "XRI Ext: size=%d cnt=%d/%d",
9195 hba
->sli
.sli4
.XRIExtSize
,
9196 hba
->sli
.sli4
.XRIExtCount
, ExtentCnt
);
9198 for (i
= 0; i
< ep
->params
.response
.ExtentCnt
; i
+= 4) {
9199 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9200 "XRI Ext%d: %d, %d, %d, %d", i
,
9201 hba
->sli
.sli4
.XRIBase
[i
],
9202 hba
->sli
.sli4
.XRIBase
[i
+1],
9203 hba
->sli
.sli4
.XRIBase
[i
+2],
9204 hba
->sli
.sli4
.XRIBase
[i
+3]);
9208 /* Discover RPI Extents */
9209 bzero(mbq
, sizeof (MAILBOXQ
));
9210 emlxs_mb_get_extents_info(hba
, mbq
, RSC_TYPE_FCOE_RPI
);
9212 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9213 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9214 "Unable to discover RPI extents. Mailbox cmd=%x status=%x",
9215 mb4
->mbxCommand
, mb4
->mbxStatus
);
9220 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9221 hba
->sli
.sli4
.RPIExtSize
= ep
->params
.response
.ExtentSize
;
9222 ExtentCnt
= ep
->params
.response
.ExtentCnt
;
9224 /* Allocate RPI Extents */
9225 bzero(mbq
, sizeof (MAILBOXQ
));
9226 emlxs_mb_alloc_extents(hba
, mbq
, RSC_TYPE_FCOE_RPI
, ExtentCnt
);
9228 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9229 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9230 "Unable to allocate RPI extents. Mailbox cmd=%x status=%x",
9231 mb4
->mbxCommand
, mb4
->mbxStatus
);
9235 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9237 bcopy((uint8_t *)ep
->params
.response
.RscId
,
9238 (uint8_t *)hba
->sli
.sli4
.RPIBase
,
9239 (ep
->params
.response
.ExtentCnt
* sizeof (uint16_t)));
9241 hba
->sli
.sli4
.RPIExtCount
= ep
->params
.response
.ExtentCnt
;
9242 hba
->sli
.sli4
.RPICount
= hba
->sli
.sli4
.RPIExtCount
*
9243 hba
->sli
.sli4
.RPIExtSize
;
9245 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9246 "RPI Ext: size=%d cnt=%d/%d",
9247 hba
->sli
.sli4
.RPIExtSize
,
9248 hba
->sli
.sli4
.RPIExtCount
, ExtentCnt
);
9250 for (i
= 0; i
< ep
->params
.response
.ExtentCnt
; i
+= 4) {
9251 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9252 "RPI Ext%d: %d, %d, %d, %d", i
,
9253 hba
->sli
.sli4
.RPIBase
[i
],
9254 hba
->sli
.sli4
.RPIBase
[i
+1],
9255 hba
->sli
.sli4
.RPIBase
[i
+2],
9256 hba
->sli
.sli4
.RPIBase
[i
+3]);
9260 /* Discover VPI Extents */
9261 bzero(mbq
, sizeof (MAILBOXQ
));
9262 emlxs_mb_get_extents_info(hba
, mbq
, RSC_TYPE_FCOE_VPI
);
9264 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9265 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9266 "Unable to discover VPI extents. Mailbox cmd=%x status=%x",
9267 mb4
->mbxCommand
, mb4
->mbxStatus
);
9272 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9273 hba
->sli
.sli4
.VPIExtSize
= ep
->params
.response
.ExtentSize
;
9274 ExtentCnt
= ep
->params
.response
.ExtentCnt
;
9276 /* Allocate VPI Extents */
9277 bzero(mbq
, sizeof (MAILBOXQ
));
9278 emlxs_mb_alloc_extents(hba
, mbq
, RSC_TYPE_FCOE_VPI
, ExtentCnt
);
9280 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9281 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9282 "Unable to allocate VPI extents. Mailbox cmd=%x status=%x",
9283 mb4
->mbxCommand
, mb4
->mbxStatus
);
9287 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9289 bcopy((uint8_t *)ep
->params
.response
.RscId
,
9290 (uint8_t *)hba
->sli
.sli4
.VPIBase
,
9291 (ep
->params
.response
.ExtentCnt
* sizeof (uint16_t)));
9293 hba
->sli
.sli4
.VPIExtCount
= ep
->params
.response
.ExtentCnt
;
9294 hba
->sli
.sli4
.VPICount
= hba
->sli
.sli4
.VPIExtCount
*
9295 hba
->sli
.sli4
.VPIExtSize
;
9297 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9298 "VPI Ext: size=%d cnt=%d/%d",
9299 hba
->sli
.sli4
.VPIExtSize
,
9300 hba
->sli
.sli4
.VPIExtCount
, ExtentCnt
);
9302 for (i
= 0; i
< ep
->params
.response
.ExtentCnt
; i
+= 4) {
9303 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9304 "VPI Ext%d: %d, %d, %d, %d", i
,
9305 hba
->sli
.sli4
.VPIBase
[i
],
9306 hba
->sli
.sli4
.VPIBase
[i
+1],
9307 hba
->sli
.sli4
.VPIBase
[i
+2],
9308 hba
->sli
.sli4
.VPIBase
[i
+3]);
9311 /* Discover VFI Extents */
9312 bzero(mbq
, sizeof (MAILBOXQ
));
9313 emlxs_mb_get_extents_info(hba
, mbq
, RSC_TYPE_FCOE_VFI
);
9315 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9316 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9317 "Unable to discover VFI extents. Mailbox cmd=%x status=%x",
9318 mb4
->mbxCommand
, mb4
->mbxStatus
);
9323 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9324 hba
->sli
.sli4
.VFIExtSize
= ep
->params
.response
.ExtentSize
;
9325 ExtentCnt
= ep
->params
.response
.ExtentCnt
;
9327 /* Allocate VFI Extents */
9328 bzero(mbq
, sizeof (MAILBOXQ
));
9329 emlxs_mb_alloc_extents(hba
, mbq
, RSC_TYPE_FCOE_VFI
, ExtentCnt
);
9331 if (emlxs_sli4_issue_mbox_cmd(hba
, mbq
, MBX_WAIT
, 0) != MBX_SUCCESS
) {
9332 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
,
9333 "Unable to allocate VFI extents. Mailbox cmd=%x status=%x",
9334 mb4
->mbxCommand
, mb4
->mbxStatus
);
9338 ep
= (IOCTL_COMMON_EXTENTS
*)&mb4
->un
.varSLIConfig
.payload
;
9340 bcopy((uint8_t *)ep
->params
.response
.RscId
,
9341 (uint8_t *)hba
->sli
.sli4
.VFIBase
,
9342 (ep
->params
.response
.ExtentCnt
* sizeof (uint16_t)));
9344 hba
->sli
.sli4
.VFIExtCount
= ep
->params
.response
.ExtentCnt
;
9345 hba
->sli
.sli4
.VFICount
= hba
->sli
.sli4
.VFIExtCount
*
9346 hba
->sli
.sli4
.VFIExtSize
;
9348 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9349 "VFI Ext: size=%d cnt=%d/%d",
9350 hba
->sli
.sli4
.VFIExtSize
,
9351 hba
->sli
.sli4
.VFIExtCount
, ExtentCnt
);
9353 for (i
= 0; i
< ep
->params
.response
.ExtentCnt
; i
+= 4) {
9354 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_debug_msg
,
9355 "VFI Ext%d: %d, %d, %d, %d", i
,
9356 hba
->sli
.sli4
.VFIBase
[i
],
9357 hba
->sli
.sli4
.VFIBase
[i
+1],
9358 hba
->sli
.sli4
.VFIBase
[i
+2],
9359 hba
->sli
.sli4
.VFIBase
[i
+3]);
9364 } /* emlxs_sli4_init_extents() */
9368 emlxs_sli4_index_to_rpi(emlxs_hba_t
*hba
, uint32_t index
)
9374 i
= index
/ hba
->sli
.sli4
.RPIExtSize
;
9375 j
= index
% hba
->sli
.sli4
.RPIExtSize
;
9376 rpi
= hba
->sli
.sli4
.RPIBase
[i
] + j
;
9380 } /* emlxs_sli4_index_to_rpi */
9384 emlxs_sli4_rpi_to_index(emlxs_hba_t
*hba
, uint32_t rpi
)
9389 uint32_t index
= hba
->sli
.sli4
.RPICount
;
9391 for (i
= 0; i
< hba
->sli
.sli4
.RPIExtCount
; i
++) {
9392 lo
= hba
->sli
.sli4
.RPIBase
[i
];
9393 hi
= lo
+ hba
->sli
.sli4
.RPIExtSize
;
9395 if ((rpi
< hi
) && (rpi
>= lo
)) {
9396 index
= (i
* hba
->sli
.sli4
.RPIExtSize
) + (rpi
- lo
);
9403 } /* emlxs_sli4_rpi_to_index */
9407 emlxs_sli4_index_to_xri(emlxs_hba_t
*hba
, uint32_t index
)
9413 i
= index
/ hba
->sli
.sli4
.XRIExtSize
;
9414 j
= index
% hba
->sli
.sli4
.XRIExtSize
;
9415 xri
= hba
->sli
.sli4
.XRIBase
[i
] + j
;
9419 } /* emlxs_sli4_index_to_xri */
9425 emlxs_sli4_index_to_vpi(emlxs_hba_t
*hba
, uint32_t index
)
9431 i
= index
/ hba
->sli
.sli4
.VPIExtSize
;
9432 j
= index
% hba
->sli
.sli4
.VPIExtSize
;
9433 vpi
= hba
->sli
.sli4
.VPIBase
[i
] + j
;
9437 } /* emlxs_sli4_index_to_vpi */
9441 emlxs_sli4_vpi_to_index(emlxs_hba_t
*hba
, uint32_t vpi
)
9446 uint32_t index
= hba
->sli
.sli4
.VPICount
;
9448 for (i
= 0; i
< hba
->sli
.sli4
.VPIExtCount
; i
++) {
9449 lo
= hba
->sli
.sli4
.VPIBase
[i
];
9450 hi
= lo
+ hba
->sli
.sli4
.VPIExtSize
;
9452 if ((vpi
< hi
) && (vpi
>= lo
)) {
9453 index
= (i
* hba
->sli
.sli4
.VPIExtSize
) + (vpi
- lo
);
9460 } /* emlxs_sli4_vpi_to_index */
9466 emlxs_sli4_index_to_vfi(emlxs_hba_t
*hba
, uint32_t index
)
9472 i
= index
/ hba
->sli
.sli4
.VFIExtSize
;
9473 j
= index
% hba
->sli
.sli4
.VFIExtSize
;
9474 vfi
= hba
->sli
.sli4
.VFIBase
[i
] + j
;
9478 } /* emlxs_sli4_index_to_vfi */
9482 emlxs_sli4_rqid_to_index(emlxs_hba_t
*hba
, uint16_t rqid
)
9486 if (rqid
< 0xffff) {
9487 for (i
= 0; i
< EMLXS_MAX_RQS
; i
++) {
9488 if (hba
->sli
.sli4
.rq
[i
].qid
== rqid
) {
9496 } /* emlxs_sli4_rqid_to_index */
9500 emlxs_sli4_wqid_to_index(emlxs_hba_t
*hba
, uint16_t wqid
)
9504 if (wqid
< 0xffff) {
9505 for (i
= 0; i
< EMLXS_MAX_WQS
; i
++) {
9506 if (hba
->sli
.sli4
.wq
[i
].qid
== wqid
) {
9514 } /* emlxs_sli4_wqid_to_index */
9518 emlxs_sli4_cqid_to_index(emlxs_hba_t
*hba
, uint16_t cqid
)
9522 if (cqid
< 0xffff) {
9523 for (i
= 0; i
< EMLXS_MAX_CQS
; i
++) {
9524 if (hba
->sli
.sli4
.cq
[i
].qid
== cqid
) {
9532 } /* emlxs_sli4_cqid_to_index */