2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfa_os_inc.h"
21 #include "bfa_modules.h"
24 BFA_TRC_FILE(HAL
, FCXP
);
33 * LPS related definitions
35 #define BFA_LPS_MIN_LPORTS (1)
36 #define BFA_LPS_MAX_LPORTS (256)
39 * Maximum Vports supported per physical port or vf.
41 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
45 * lps_pvt BFA LPS private functions
49 BFA_LPS_SM_LOGIN
= 1, /* login request from user */
50 BFA_LPS_SM_LOGOUT
= 2, /* logout request from user */
51 BFA_LPS_SM_FWRSP
= 3, /* f/w response to login/logout */
52 BFA_LPS_SM_RESUME
= 4, /* space present in reqq queue */
53 BFA_LPS_SM_DELETE
= 5, /* lps delete from user */
54 BFA_LPS_SM_OFFLINE
= 6, /* Link is offline */
55 BFA_LPS_SM_RX_CVL
= 7, /* Rx clear virtual link */
59 * FC PORT related definitions
62 * The port is considered disabled if corresponding physical port or IOC are
65 #define BFA_PORT_IS_DISABLED(bfa) \
66 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
71 * BFA port state machine events
73 enum bfa_fcport_sm_event
{
74 BFA_FCPORT_SM_START
= 1, /* start port state machine */
75 BFA_FCPORT_SM_STOP
= 2, /* stop port state machine */
76 BFA_FCPORT_SM_ENABLE
= 3, /* enable port */
77 BFA_FCPORT_SM_DISABLE
= 4, /* disable port state machine */
78 BFA_FCPORT_SM_FWRSP
= 5, /* firmware enable/disable rsp */
79 BFA_FCPORT_SM_LINKUP
= 6, /* firmware linkup event */
80 BFA_FCPORT_SM_LINKDOWN
= 7, /* firmware linkup down */
81 BFA_FCPORT_SM_QRESUME
= 8, /* CQ space available */
82 BFA_FCPORT_SM_HWFAIL
= 9, /* IOC h/w failure */
86 * BFA port link notification state machine events
89 enum bfa_fcport_ln_sm_event
{
90 BFA_FCPORT_LN_SM_LINKUP
= 1, /* linkup event */
91 BFA_FCPORT_LN_SM_LINKDOWN
= 2, /* linkdown event */
92 BFA_FCPORT_LN_SM_NOTIFICATION
= 3 /* done notification */
96 * RPORT related definitions
98 #define bfa_rport_offline_cb(__rp) do { \
99 if ((__rp)->bfa->fcs) \
100 bfa_cb_rport_offline((__rp)->rport_drv); \
102 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
103 __bfa_cb_rport_offline, (__rp)); \
107 #define bfa_rport_online_cb(__rp) do { \
108 if ((__rp)->bfa->fcs) \
109 bfa_cb_rport_online((__rp)->rport_drv); \
111 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
112 __bfa_cb_rport_online, (__rp)); \
117 enum bfa_rport_event
{
118 BFA_RPORT_SM_CREATE
= 1, /* rport create event */
119 BFA_RPORT_SM_DELETE
= 2, /* deleting an existing rport */
120 BFA_RPORT_SM_ONLINE
= 3, /* rport is online */
121 BFA_RPORT_SM_OFFLINE
= 4, /* rport is offline */
122 BFA_RPORT_SM_FWRSP
= 5, /* firmware response */
123 BFA_RPORT_SM_HWFAIL
= 6, /* IOC h/w failure */
124 BFA_RPORT_SM_QOS_SCN
= 7, /* QoS SCN from firmware */
125 BFA_RPORT_SM_SET_SPEED
= 8, /* Set Rport Speed */
126 BFA_RPORT_SM_QRESUME
= 9, /* space in requeue queue */
130 * forward declarations FCXP related functions
132 static void __bfa_fcxp_send_cbfn(void *cbarg
, bfa_boolean_t complete
);
133 static void hal_fcxp_rx_plog(struct bfa_s
*bfa
, struct bfa_fcxp_s
*fcxp
,
134 struct bfi_fcxp_send_rsp_s
*fcxp_rsp
);
135 static void hal_fcxp_tx_plog(struct bfa_s
*bfa
, u32 reqlen
,
136 struct bfa_fcxp_s
*fcxp
, struct fchs_s
*fchs
);
137 static void bfa_fcxp_qresume(void *cbarg
);
138 static void bfa_fcxp_queue(struct bfa_fcxp_s
*fcxp
,
139 struct bfi_fcxp_send_req_s
*send_req
);
142 * forward declarations for LPS functions
144 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
146 static void bfa_lps_attach(struct bfa_s
*bfa
, void *bfad
,
147 struct bfa_iocfc_cfg_s
*cfg
,
148 struct bfa_meminfo_s
*meminfo
,
149 struct bfa_pcidev_s
*pcidev
);
150 static void bfa_lps_detach(struct bfa_s
*bfa
);
151 static void bfa_lps_start(struct bfa_s
*bfa
);
152 static void bfa_lps_stop(struct bfa_s
*bfa
);
153 static void bfa_lps_iocdisable(struct bfa_s
*bfa
);
154 static void bfa_lps_login_rsp(struct bfa_s
*bfa
,
155 struct bfi_lps_login_rsp_s
*rsp
);
156 static void bfa_lps_logout_rsp(struct bfa_s
*bfa
,
157 struct bfi_lps_logout_rsp_s
*rsp
);
158 static void bfa_lps_reqq_resume(void *lps_arg
);
159 static void bfa_lps_free(struct bfa_lps_s
*lps
);
160 static void bfa_lps_send_login(struct bfa_lps_s
*lps
);
161 static void bfa_lps_send_logout(struct bfa_lps_s
*lps
);
162 static void bfa_lps_login_comp(struct bfa_lps_s
*lps
);
163 static void bfa_lps_logout_comp(struct bfa_lps_s
*lps
);
164 static void bfa_lps_cvl_event(struct bfa_lps_s
*lps
);
167 * forward declaration for LPS state machine
169 static void bfa_lps_sm_init(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
170 static void bfa_lps_sm_login(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
171 static void bfa_lps_sm_loginwait(struct bfa_lps_s
*lps
, enum bfa_lps_event
173 static void bfa_lps_sm_online(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
174 static void bfa_lps_sm_logout(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
175 static void bfa_lps_sm_logowait(struct bfa_lps_s
*lps
, enum bfa_lps_event
179 * forward declaration for FC Port functions
181 static bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s
*fcport
);
182 static bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s
*fcport
);
183 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s
*fcport
);
184 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s
*fcport
);
185 static void bfa_fcport_set_wwns(struct bfa_fcport_s
*fcport
);
186 static void __bfa_cb_fcport_event(void *cbarg
, bfa_boolean_t complete
);
187 static void bfa_fcport_scn(struct bfa_fcport_s
*fcport
,
188 enum bfa_port_linkstate event
, bfa_boolean_t trunk
);
189 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s
*ln
,
190 enum bfa_port_linkstate event
);
191 static void __bfa_cb_fcport_stats_clr(void *cbarg
, bfa_boolean_t complete
);
192 static void bfa_fcport_stats_get_timeout(void *cbarg
);
193 static void bfa_fcport_stats_clr_timeout(void *cbarg
);
194 static void bfa_trunk_iocdisable(struct bfa_s
*bfa
);
197 * forward declaration for FC PORT state machine
199 static void bfa_fcport_sm_uninit(struct bfa_fcport_s
*fcport
,
200 enum bfa_fcport_sm_event event
);
201 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s
*fcport
,
202 enum bfa_fcport_sm_event event
);
203 static void bfa_fcport_sm_enabling(struct bfa_fcport_s
*fcport
,
204 enum bfa_fcport_sm_event event
);
205 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s
*fcport
,
206 enum bfa_fcport_sm_event event
);
207 static void bfa_fcport_sm_linkup(struct bfa_fcport_s
*fcport
,
208 enum bfa_fcport_sm_event event
);
209 static void bfa_fcport_sm_disabling(struct bfa_fcport_s
*fcport
,
210 enum bfa_fcport_sm_event event
);
211 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s
*fcport
,
212 enum bfa_fcport_sm_event event
);
213 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s
*fcport
,
214 enum bfa_fcport_sm_event event
);
215 static void bfa_fcport_sm_disabled(struct bfa_fcport_s
*fcport
,
216 enum bfa_fcport_sm_event event
);
217 static void bfa_fcport_sm_stopped(struct bfa_fcport_s
*fcport
,
218 enum bfa_fcport_sm_event event
);
219 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s
*fcport
,
220 enum bfa_fcport_sm_event event
);
221 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s
*fcport
,
222 enum bfa_fcport_sm_event event
);
224 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s
*ln
,
225 enum bfa_fcport_ln_sm_event event
);
226 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s
*ln
,
227 enum bfa_fcport_ln_sm_event event
);
228 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
229 enum bfa_fcport_ln_sm_event event
);
230 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s
*ln
,
231 enum bfa_fcport_ln_sm_event event
);
232 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s
*ln
,
233 enum bfa_fcport_ln_sm_event event
);
234 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s
*ln
,
235 enum bfa_fcport_ln_sm_event event
);
236 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
237 enum bfa_fcport_ln_sm_event event
);
239 static struct bfa_sm_table_s hal_port_sm_table
[] = {
240 {BFA_SM(bfa_fcport_sm_uninit
), BFA_PORT_ST_UNINIT
},
241 {BFA_SM(bfa_fcport_sm_enabling_qwait
), BFA_PORT_ST_ENABLING_QWAIT
},
242 {BFA_SM(bfa_fcport_sm_enabling
), BFA_PORT_ST_ENABLING
},
243 {BFA_SM(bfa_fcport_sm_linkdown
), BFA_PORT_ST_LINKDOWN
},
244 {BFA_SM(bfa_fcport_sm_linkup
), BFA_PORT_ST_LINKUP
},
245 {BFA_SM(bfa_fcport_sm_disabling_qwait
), BFA_PORT_ST_DISABLING_QWAIT
},
246 {BFA_SM(bfa_fcport_sm_toggling_qwait
), BFA_PORT_ST_TOGGLING_QWAIT
},
247 {BFA_SM(bfa_fcport_sm_disabling
), BFA_PORT_ST_DISABLING
},
248 {BFA_SM(bfa_fcport_sm_disabled
), BFA_PORT_ST_DISABLED
},
249 {BFA_SM(bfa_fcport_sm_stopped
), BFA_PORT_ST_STOPPED
},
250 {BFA_SM(bfa_fcport_sm_iocdown
), BFA_PORT_ST_IOCDOWN
},
251 {BFA_SM(bfa_fcport_sm_iocfail
), BFA_PORT_ST_IOCDOWN
},
256 * forward declaration for RPORT related functions
258 static struct bfa_rport_s
*bfa_rport_alloc(struct bfa_rport_mod_s
*rp_mod
);
259 static void bfa_rport_free(struct bfa_rport_s
*rport
);
260 static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s
*rp
);
261 static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s
*rp
);
262 static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s
*rp
);
263 static void __bfa_cb_rport_online(void *cbarg
,
264 bfa_boolean_t complete
);
265 static void __bfa_cb_rport_offline(void *cbarg
,
266 bfa_boolean_t complete
);
269 * forward declaration for RPORT state machine
271 static void bfa_rport_sm_uninit(struct bfa_rport_s
*rp
,
272 enum bfa_rport_event event
);
273 static void bfa_rport_sm_created(struct bfa_rport_s
*rp
,
274 enum bfa_rport_event event
);
275 static void bfa_rport_sm_fwcreate(struct bfa_rport_s
*rp
,
276 enum bfa_rport_event event
);
277 static void bfa_rport_sm_online(struct bfa_rport_s
*rp
,
278 enum bfa_rport_event event
);
279 static void bfa_rport_sm_fwdelete(struct bfa_rport_s
*rp
,
280 enum bfa_rport_event event
);
281 static void bfa_rport_sm_offline(struct bfa_rport_s
*rp
,
282 enum bfa_rport_event event
);
283 static void bfa_rport_sm_deleting(struct bfa_rport_s
*rp
,
284 enum bfa_rport_event event
);
285 static void bfa_rport_sm_offline_pending(struct bfa_rport_s
*rp
,
286 enum bfa_rport_event event
);
287 static void bfa_rport_sm_delete_pending(struct bfa_rport_s
*rp
,
288 enum bfa_rport_event event
);
289 static void bfa_rport_sm_iocdisable(struct bfa_rport_s
*rp
,
290 enum bfa_rport_event event
);
291 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s
*rp
,
292 enum bfa_rport_event event
);
293 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s
*rp
,
294 enum bfa_rport_event event
);
295 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s
*rp
,
296 enum bfa_rport_event event
);
299 * PLOG related definitions
302 plkd_validate_logrec(struct bfa_plog_rec_s
*pl_rec
)
304 if ((pl_rec
->log_type
!= BFA_PL_LOG_TYPE_INT
) &&
305 (pl_rec
->log_type
!= BFA_PL_LOG_TYPE_STRING
))
308 if ((pl_rec
->log_type
!= BFA_PL_LOG_TYPE_INT
) &&
309 (pl_rec
->log_num_ints
> BFA_PL_INT_LOG_SZ
))
316 bfa_plog_add(struct bfa_plog_s
*plog
, struct bfa_plog_rec_s
*pl_rec
)
319 struct bfa_plog_rec_s
*pl_recp
;
321 if (plog
->plog_enabled
== 0)
324 if (plkd_validate_logrec(pl_rec
)) {
331 pl_recp
= &(plog
->plog_recs
[tail
]);
333 memcpy(pl_recp
, pl_rec
, sizeof(struct bfa_plog_rec_s
));
335 pl_recp
->tv
= bfa_os_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog
->tail
);
338 if (plog
->head
== plog
->tail
)
339 BFA_PL_LOG_REC_INCR(plog
->head
);
343 bfa_plog_init(struct bfa_plog_s
*plog
)
345 memset((char *)plog
, 0, sizeof(struct bfa_plog_s
));
347 memcpy(plog
->plog_sig
, BFA_PL_SIG_STR
, BFA_PL_SIG_LEN
);
348 plog
->head
= plog
->tail
= 0;
349 plog
->plog_enabled
= 1;
353 bfa_plog_str(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
354 enum bfa_plog_eid event
,
355 u16 misc
, char *log_str
)
357 struct bfa_plog_rec_s lp
;
359 if (plog
->plog_enabled
) {
360 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
363 lp
.log_type
= BFA_PL_LOG_TYPE_STRING
;
365 strncpy(lp
.log_entry
.string_log
, log_str
,
366 BFA_PL_STRING_LOG_SZ
- 1);
367 lp
.log_entry
.string_log
[BFA_PL_STRING_LOG_SZ
- 1] = '\0';
368 bfa_plog_add(plog
, &lp
);
373 bfa_plog_intarr(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
374 enum bfa_plog_eid event
,
375 u16 misc
, u32
*intarr
, u32 num_ints
)
377 struct bfa_plog_rec_s lp
;
380 if (num_ints
> BFA_PL_INT_LOG_SZ
)
381 num_ints
= BFA_PL_INT_LOG_SZ
;
383 if (plog
->plog_enabled
) {
384 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
387 lp
.log_type
= BFA_PL_LOG_TYPE_INT
;
390 for (i
= 0; i
< num_ints
; i
++)
391 lp
.log_entry
.int_log
[i
] = intarr
[i
];
393 lp
.log_num_ints
= (u8
) num_ints
;
395 bfa_plog_add(plog
, &lp
);
400 bfa_plog_fchdr(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
401 enum bfa_plog_eid event
,
402 u16 misc
, struct fchs_s
*fchdr
)
404 struct bfa_plog_rec_s lp
;
405 u32
*tmp_int
= (u32
*) fchdr
;
406 u32 ints
[BFA_PL_INT_LOG_SZ
];
408 if (plog
->plog_enabled
) {
409 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
411 ints
[0] = tmp_int
[0];
412 ints
[1] = tmp_int
[1];
413 ints
[2] = tmp_int
[4];
415 bfa_plog_intarr(plog
, mid
, event
, misc
, ints
, 3);
420 bfa_plog_fchdr_and_pl(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
421 enum bfa_plog_eid event
, u16 misc
, struct fchs_s
*fchdr
,
424 struct bfa_plog_rec_s lp
;
425 u32
*tmp_int
= (u32
*) fchdr
;
426 u32 ints
[BFA_PL_INT_LOG_SZ
];
428 if (plog
->plog_enabled
) {
429 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
431 ints
[0] = tmp_int
[0];
432 ints
[1] = tmp_int
[1];
433 ints
[2] = tmp_int
[4];
436 bfa_plog_intarr(plog
, mid
, event
, misc
, ints
, 4);
442 bfa_plog_get_setting(struct bfa_plog_s
*plog
)
444 return (bfa_boolean_t
)plog
->plog_enabled
;
448 * fcxp_pvt BFA FCXP private functions
452 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s
*mod
, struct bfa_meminfo_s
*mi
)
458 dm_kva
= bfa_meminfo_dma_virt(mi
);
459 dm_pa
= bfa_meminfo_dma_phys(mi
);
461 buf_pool_sz
= mod
->req_pld_sz
* mod
->num_fcxps
;
464 * Initialize the fcxp req payload list
466 mod
->req_pld_list_kva
= dm_kva
;
467 mod
->req_pld_list_pa
= dm_pa
;
468 dm_kva
+= buf_pool_sz
;
469 dm_pa
+= buf_pool_sz
;
470 memset(mod
->req_pld_list_kva
, 0, buf_pool_sz
);
473 * Initialize the fcxp rsp payload list
475 buf_pool_sz
= mod
->rsp_pld_sz
* mod
->num_fcxps
;
476 mod
->rsp_pld_list_kva
= dm_kva
;
477 mod
->rsp_pld_list_pa
= dm_pa
;
478 dm_kva
+= buf_pool_sz
;
479 dm_pa
+= buf_pool_sz
;
480 memset(mod
->rsp_pld_list_kva
, 0, buf_pool_sz
);
482 bfa_meminfo_dma_virt(mi
) = dm_kva
;
483 bfa_meminfo_dma_phys(mi
) = dm_pa
;
487 claim_fcxps_mem(struct bfa_fcxp_mod_s
*mod
, struct bfa_meminfo_s
*mi
)
490 struct bfa_fcxp_s
*fcxp
;
492 fcxp
= (struct bfa_fcxp_s
*) bfa_meminfo_kva(mi
);
493 memset(fcxp
, 0, sizeof(struct bfa_fcxp_s
) * mod
->num_fcxps
);
495 INIT_LIST_HEAD(&mod
->fcxp_free_q
);
496 INIT_LIST_HEAD(&mod
->fcxp_active_q
);
498 mod
->fcxp_list
= fcxp
;
500 for (i
= 0; i
< mod
->num_fcxps
; i
++) {
501 fcxp
->fcxp_mod
= mod
;
504 list_add_tail(&fcxp
->qe
, &mod
->fcxp_free_q
);
505 bfa_reqq_winit(&fcxp
->reqq_wqe
, bfa_fcxp_qresume
, fcxp
);
506 fcxp
->reqq_waiting
= BFA_FALSE
;
511 bfa_meminfo_kva(mi
) = (void *)fcxp
;
515 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
518 u16 num_fcxp_reqs
= cfg
->fwcfg
.num_fcxp_reqs
;
520 if (num_fcxp_reqs
== 0)
524 * Account for req/rsp payload
526 *dm_len
+= BFA_FCXP_MAX_IBUF_SZ
* num_fcxp_reqs
;
527 if (cfg
->drvcfg
.min_cfg
)
528 *dm_len
+= BFA_FCXP_MAX_IBUF_SZ
* num_fcxp_reqs
;
530 *dm_len
+= BFA_FCXP_MAX_LBUF_SZ
* num_fcxp_reqs
;
533 * Account for fcxp structs
535 *ndm_len
+= sizeof(struct bfa_fcxp_s
) * num_fcxp_reqs
;
539 bfa_fcxp_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
540 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
542 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
544 memset(mod
, 0, sizeof(struct bfa_fcxp_mod_s
));
546 mod
->num_fcxps
= cfg
->fwcfg
.num_fcxp_reqs
;
549 * Initialize FCXP request and response payload sizes.
551 mod
->req_pld_sz
= mod
->rsp_pld_sz
= BFA_FCXP_MAX_IBUF_SZ
;
552 if (!cfg
->drvcfg
.min_cfg
)
553 mod
->rsp_pld_sz
= BFA_FCXP_MAX_LBUF_SZ
;
555 INIT_LIST_HEAD(&mod
->wait_q
);
557 claim_fcxp_req_rsp_mem(mod
, meminfo
);
558 claim_fcxps_mem(mod
, meminfo
);
562 bfa_fcxp_detach(struct bfa_s
*bfa
)
567 bfa_fcxp_start(struct bfa_s
*bfa
)
572 bfa_fcxp_stop(struct bfa_s
*bfa
)
577 bfa_fcxp_iocdisable(struct bfa_s
*bfa
)
579 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
580 struct bfa_fcxp_s
*fcxp
;
581 struct list_head
*qe
, *qen
;
583 list_for_each_safe(qe
, qen
, &mod
->fcxp_active_q
) {
584 fcxp
= (struct bfa_fcxp_s
*) qe
;
585 if (fcxp
->caller
== NULL
) {
586 fcxp
->send_cbfn(fcxp
->caller
, fcxp
, fcxp
->send_cbarg
,
587 BFA_STATUS_IOC_FAILURE
, 0, 0, NULL
);
590 fcxp
->rsp_status
= BFA_STATUS_IOC_FAILURE
;
591 bfa_cb_queue(bfa
, &fcxp
->hcb_qe
,
592 __bfa_fcxp_send_cbfn
, fcxp
);
597 static struct bfa_fcxp_s
*
598 bfa_fcxp_get(struct bfa_fcxp_mod_s
*fm
)
600 struct bfa_fcxp_s
*fcxp
;
602 bfa_q_deq(&fm
->fcxp_free_q
, &fcxp
);
605 list_add_tail(&fcxp
->qe
, &fm
->fcxp_active_q
);
611 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s
*fcxp
,
615 bfa_fcxp_get_sgaddr_t
*r_sga_cbfn
,
616 bfa_fcxp_get_sglen_t
*r_sglen_cbfn
,
617 struct list_head
*r_sgpg_q
,
619 bfa_fcxp_get_sgaddr_t sga_cbfn
,
620 bfa_fcxp_get_sglen_t sglen_cbfn
)
623 bfa_assert(bfa
!= NULL
);
625 bfa_trc(bfa
, fcxp
->fcxp_tag
);
630 bfa_assert(*sga_cbfn
!= NULL
);
631 bfa_assert(*sglen_cbfn
!= NULL
);
634 *r_sga_cbfn
= sga_cbfn
;
635 *r_sglen_cbfn
= sglen_cbfn
;
640 * alloc required sgpgs
642 if (n_sgles
> BFI_SGE_INLINE
)
649 bfa_fcxp_init(struct bfa_fcxp_s
*fcxp
,
650 void *caller
, struct bfa_s
*bfa
, int nreq_sgles
,
651 int nrsp_sgles
, bfa_fcxp_get_sgaddr_t req_sga_cbfn
,
652 bfa_fcxp_get_sglen_t req_sglen_cbfn
,
653 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn
,
654 bfa_fcxp_get_sglen_t rsp_sglen_cbfn
)
657 bfa_assert(bfa
!= NULL
);
659 bfa_trc(bfa
, fcxp
->fcxp_tag
);
661 fcxp
->caller
= caller
;
663 bfa_fcxp_init_reqrsp(fcxp
, bfa
,
664 &fcxp
->use_ireqbuf
, &fcxp
->nreq_sgles
, &fcxp
->req_sga_cbfn
,
665 &fcxp
->req_sglen_cbfn
, &fcxp
->req_sgpg_q
,
666 nreq_sgles
, req_sga_cbfn
, req_sglen_cbfn
);
668 bfa_fcxp_init_reqrsp(fcxp
, bfa
,
669 &fcxp
->use_irspbuf
, &fcxp
->nrsp_sgles
, &fcxp
->rsp_sga_cbfn
,
670 &fcxp
->rsp_sglen_cbfn
, &fcxp
->rsp_sgpg_q
,
671 nrsp_sgles
, rsp_sga_cbfn
, rsp_sglen_cbfn
);
676 bfa_fcxp_put(struct bfa_fcxp_s
*fcxp
)
678 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
679 struct bfa_fcxp_wqe_s
*wqe
;
681 bfa_q_deq(&mod
->wait_q
, &wqe
);
683 bfa_trc(mod
->bfa
, fcxp
->fcxp_tag
);
685 bfa_fcxp_init(fcxp
, wqe
->caller
, wqe
->bfa
, wqe
->nreq_sgles
,
686 wqe
->nrsp_sgles
, wqe
->req_sga_cbfn
,
687 wqe
->req_sglen_cbfn
, wqe
->rsp_sga_cbfn
,
688 wqe
->rsp_sglen_cbfn
);
690 wqe
->alloc_cbfn(wqe
->alloc_cbarg
, fcxp
);
694 bfa_assert(bfa_q_is_on_q(&mod
->fcxp_active_q
, fcxp
));
696 list_add_tail(&fcxp
->qe
, &mod
->fcxp_free_q
);
700 bfa_fcxp_null_comp(void *bfad_fcxp
, struct bfa_fcxp_s
*fcxp
, void *cbarg
,
701 bfa_status_t req_status
, u32 rsp_len
,
702 u32 resid_len
, struct fchs_s
*rsp_fchs
)
704 /* discarded fcxp completion */
708 __bfa_fcxp_send_cbfn(void *cbarg
, bfa_boolean_t complete
)
710 struct bfa_fcxp_s
*fcxp
= cbarg
;
713 fcxp
->send_cbfn(fcxp
->caller
, fcxp
, fcxp
->send_cbarg
,
714 fcxp
->rsp_status
, fcxp
->rsp_len
,
715 fcxp
->residue_len
, &fcxp
->rsp_fchs
);
722 hal_fcxp_send_comp(struct bfa_s
*bfa
, struct bfi_fcxp_send_rsp_s
*fcxp_rsp
)
724 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
725 struct bfa_fcxp_s
*fcxp
;
726 u16 fcxp_tag
= be16_to_cpu(fcxp_rsp
->fcxp_tag
);
728 bfa_trc(bfa
, fcxp_tag
);
730 fcxp_rsp
->rsp_len
= be32_to_cpu(fcxp_rsp
->rsp_len
);
733 * @todo f/w should not set residue to non-0 when everything
736 if (fcxp_rsp
->req_status
== BFA_STATUS_OK
)
737 fcxp_rsp
->residue_len
= 0;
739 fcxp_rsp
->residue_len
= be32_to_cpu(fcxp_rsp
->residue_len
);
741 fcxp
= BFA_FCXP_FROM_TAG(mod
, fcxp_tag
);
743 bfa_assert(fcxp
->send_cbfn
!= NULL
);
745 hal_fcxp_rx_plog(mod
->bfa
, fcxp
, fcxp_rsp
);
747 if (fcxp
->send_cbfn
!= NULL
) {
748 bfa_trc(mod
->bfa
, (NULL
== fcxp
->caller
));
749 if (fcxp
->caller
== NULL
) {
750 fcxp
->send_cbfn(fcxp
->caller
, fcxp
, fcxp
->send_cbarg
,
751 fcxp_rsp
->req_status
, fcxp_rsp
->rsp_len
,
752 fcxp_rsp
->residue_len
, &fcxp_rsp
->fchs
);
754 * fcxp automatically freed on return from the callback
758 fcxp
->rsp_status
= fcxp_rsp
->req_status
;
759 fcxp
->rsp_len
= fcxp_rsp
->rsp_len
;
760 fcxp
->residue_len
= fcxp_rsp
->residue_len
;
761 fcxp
->rsp_fchs
= fcxp_rsp
->fchs
;
763 bfa_cb_queue(bfa
, &fcxp
->hcb_qe
,
764 __bfa_fcxp_send_cbfn
, fcxp
);
767 bfa_trc(bfa
, (NULL
== fcxp
->send_cbfn
));
772 hal_fcxp_set_local_sges(struct bfi_sge_s
*sge
, u32 reqlen
, u64 req_pa
)
774 union bfi_addr_u sga_zero
= { {0} };
776 sge
->sg_len
= reqlen
;
777 sge
->flags
= BFI_SGE_DATA_LAST
;
778 bfa_dma_addr_set(sge
[0].sga
, req_pa
);
783 sge
->sg_len
= reqlen
;
784 sge
->flags
= BFI_SGE_PGDLEN
;
789 hal_fcxp_tx_plog(struct bfa_s
*bfa
, u32 reqlen
, struct bfa_fcxp_s
*fcxp
,
796 if (fcxp
->use_ireqbuf
) {
798 *((u32
*) BFA_FCXP_REQ_PLD(fcxp
));
800 bfa_plog_fchdr_and_pl(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
802 reqlen
+ sizeof(struct fchs_s
), fchs
,
805 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
807 reqlen
+ sizeof(struct fchs_s
),
811 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
, BFA_PL_EID_TX
,
812 reqlen
+ sizeof(struct fchs_s
), fchs
);
817 hal_fcxp_rx_plog(struct bfa_s
*bfa
, struct bfa_fcxp_s
*fcxp
,
818 struct bfi_fcxp_send_rsp_s
*fcxp_rsp
)
820 if (fcxp_rsp
->rsp_len
> 0) {
821 if (fcxp
->use_irspbuf
) {
823 *((u32
*) BFA_FCXP_RSP_PLD(fcxp
));
825 bfa_plog_fchdr_and_pl(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
827 (u16
) fcxp_rsp
->rsp_len
,
828 &fcxp_rsp
->fchs
, pld_w0
);
830 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
832 (u16
) fcxp_rsp
->rsp_len
,
836 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
, BFA_PL_EID_RX
,
837 (u16
) fcxp_rsp
->rsp_len
, &fcxp_rsp
->fchs
);
842 * Handler to resume sending fcxp when space in available in cpe queue.
845 bfa_fcxp_qresume(void *cbarg
)
847 struct bfa_fcxp_s
*fcxp
= cbarg
;
848 struct bfa_s
*bfa
= fcxp
->fcxp_mod
->bfa
;
849 struct bfi_fcxp_send_req_s
*send_req
;
851 fcxp
->reqq_waiting
= BFA_FALSE
;
852 send_req
= bfa_reqq_next(bfa
, BFA_REQQ_FCXP
);
853 bfa_fcxp_queue(fcxp
, send_req
);
857 * Queue fcxp send request to foimrware.
860 bfa_fcxp_queue(struct bfa_fcxp_s
*fcxp
, struct bfi_fcxp_send_req_s
*send_req
)
862 struct bfa_s
*bfa
= fcxp
->fcxp_mod
->bfa
;
863 struct bfa_fcxp_req_info_s
*reqi
= &fcxp
->req_info
;
864 struct bfa_fcxp_rsp_info_s
*rspi
= &fcxp
->rsp_info
;
865 struct bfa_rport_s
*rport
= reqi
->bfa_rport
;
867 bfi_h2i_set(send_req
->mh
, BFI_MC_FCXP
, BFI_FCXP_H2I_SEND_REQ
,
870 send_req
->fcxp_tag
= cpu_to_be16(fcxp
->fcxp_tag
);
872 send_req
->rport_fw_hndl
= rport
->fw_handle
;
873 send_req
->max_frmsz
= cpu_to_be16(rport
->rport_info
.max_frmsz
);
874 if (send_req
->max_frmsz
== 0)
875 send_req
->max_frmsz
= cpu_to_be16(FC_MAX_PDUSZ
);
877 send_req
->rport_fw_hndl
= 0;
878 send_req
->max_frmsz
= cpu_to_be16(FC_MAX_PDUSZ
);
881 send_req
->vf_id
= cpu_to_be16(reqi
->vf_id
);
882 send_req
->lp_tag
= reqi
->lp_tag
;
883 send_req
->class = reqi
->class;
884 send_req
->rsp_timeout
= rspi
->rsp_timeout
;
885 send_req
->cts
= reqi
->cts
;
886 send_req
->fchs
= reqi
->fchs
;
888 send_req
->req_len
= cpu_to_be32(reqi
->req_tot_len
);
889 send_req
->rsp_maxlen
= cpu_to_be32(rspi
->rsp_maxlen
);
894 if (fcxp
->use_ireqbuf
== 1) {
895 hal_fcxp_set_local_sges(send_req
->req_sge
, reqi
->req_tot_len
,
896 BFA_FCXP_REQ_PLD_PA(fcxp
));
898 if (fcxp
->nreq_sgles
> 0) {
899 bfa_assert(fcxp
->nreq_sgles
== 1);
900 hal_fcxp_set_local_sges(send_req
->req_sge
,
902 fcxp
->req_sga_cbfn(fcxp
->caller
,
905 bfa_assert(reqi
->req_tot_len
== 0);
906 hal_fcxp_set_local_sges(send_req
->rsp_sge
, 0, 0);
913 if (fcxp
->use_irspbuf
== 1) {
914 bfa_assert(rspi
->rsp_maxlen
<= BFA_FCXP_MAX_LBUF_SZ
);
916 hal_fcxp_set_local_sges(send_req
->rsp_sge
, rspi
->rsp_maxlen
,
917 BFA_FCXP_RSP_PLD_PA(fcxp
));
920 if (fcxp
->nrsp_sgles
> 0) {
921 bfa_assert(fcxp
->nrsp_sgles
== 1);
922 hal_fcxp_set_local_sges(send_req
->rsp_sge
,
924 fcxp
->rsp_sga_cbfn(fcxp
->caller
,
927 bfa_assert(rspi
->rsp_maxlen
== 0);
928 hal_fcxp_set_local_sges(send_req
->rsp_sge
, 0, 0);
932 hal_fcxp_tx_plog(bfa
, reqi
->req_tot_len
, fcxp
, &reqi
->fchs
);
934 bfa_reqq_produce(bfa
, BFA_REQQ_FCXP
);
936 bfa_trc(bfa
, bfa_reqq_pi(bfa
, BFA_REQQ_FCXP
));
937 bfa_trc(bfa
, bfa_reqq_ci(bfa
, BFA_REQQ_FCXP
));
941 * hal_fcxp_api BFA FCXP API
945 * Allocate an FCXP instance to send a response or to send a request
946 * that has a response. Request/response buffers are allocated by caller.
948 * @param[in] bfa BFA bfa instance
949 * @param[in] nreq_sgles Number of SG elements required for request
950 * buffer. 0, if fcxp internal buffers are used.
951 * Use bfa_fcxp_get_reqbuf() to get the
952 * internal req buffer.
953 * @param[in] req_sgles SG elements describing request buffer. Will be
954 * copied in by BFA and hence can be freed on
955 * return from this function.
956 * @param[in] get_req_sga function ptr to be called to get a request SG
957 * Address (given the sge index).
958 * @param[in] get_req_sglen function ptr to be called to get a request SG
959 * len (given the sge index).
960 * @param[in] get_rsp_sga function ptr to be called to get a response SG
961 * Address (given the sge index).
962 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
963 * len (given the sge index).
965 * @return FCXP instance. NULL on failure.
968 bfa_fcxp_alloc(void *caller
, struct bfa_s
*bfa
, int nreq_sgles
,
969 int nrsp_sgles
, bfa_fcxp_get_sgaddr_t req_sga_cbfn
,
970 bfa_fcxp_get_sglen_t req_sglen_cbfn
,
971 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn
,
972 bfa_fcxp_get_sglen_t rsp_sglen_cbfn
)
974 struct bfa_fcxp_s
*fcxp
= NULL
;
976 bfa_assert(bfa
!= NULL
);
978 fcxp
= bfa_fcxp_get(BFA_FCXP_MOD(bfa
));
982 bfa_trc(bfa
, fcxp
->fcxp_tag
);
984 bfa_fcxp_init(fcxp
, caller
, bfa
, nreq_sgles
, nrsp_sgles
, req_sga_cbfn
,
985 req_sglen_cbfn
, rsp_sga_cbfn
, rsp_sglen_cbfn
);
991 * Get the internal request buffer pointer
993 * @param[in] fcxp BFA fcxp pointer
995 * @return pointer to the internal request buffer
998 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s
*fcxp
)
1000 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
1003 bfa_assert(fcxp
->use_ireqbuf
== 1);
1004 reqbuf
= ((u8
*)mod
->req_pld_list_kva
) +
1005 fcxp
->fcxp_tag
* mod
->req_pld_sz
;
1010 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s
*fcxp
)
1012 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
1014 return mod
->req_pld_sz
;
1018 * Get the internal response buffer pointer
1020 * @param[in] fcxp BFA fcxp pointer
1022 * @return pointer to the internal request buffer
1025 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s
*fcxp
)
1027 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
1030 bfa_assert(fcxp
->use_irspbuf
== 1);
1032 rspbuf
= ((u8
*)mod
->rsp_pld_list_kva
) +
1033 fcxp
->fcxp_tag
* mod
->rsp_pld_sz
;
1040 * @param[in] fcxp BFA fcxp pointer
1045 bfa_fcxp_free(struct bfa_fcxp_s
*fcxp
)
1047 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
1049 bfa_assert(fcxp
!= NULL
);
1050 bfa_trc(mod
->bfa
, fcxp
->fcxp_tag
);
1055 * Send a FCXP request
1057 * @param[in] fcxp BFA fcxp pointer
1058 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1059 * @param[in] vf_id virtual Fabric ID
1060 * @param[in] lp_tag lport tag
1061 * @param[in] cts use Continous sequence
1062 * @param[in] cos fc Class of Service
1063 * @param[in] reqlen request length, does not include FCHS length
1064 * @param[in] fchs fc Header Pointer. The header content will be copied
1067 * @param[in] cbfn call back function to be called on receiving
1069 * @param[in] cbarg arg for cbfn
1070 * @param[in] rsp_timeout
1073 * @return bfa_status_t
1076 bfa_fcxp_send(struct bfa_fcxp_s
*fcxp
, struct bfa_rport_s
*rport
,
1077 u16 vf_id
, u8 lp_tag
, bfa_boolean_t cts
, enum fc_cos cos
,
1078 u32 reqlen
, struct fchs_s
*fchs
, bfa_cb_fcxp_send_t cbfn
,
1079 void *cbarg
, u32 rsp_maxlen
, u8 rsp_timeout
)
1081 struct bfa_s
*bfa
= fcxp
->fcxp_mod
->bfa
;
1082 struct bfa_fcxp_req_info_s
*reqi
= &fcxp
->req_info
;
1083 struct bfa_fcxp_rsp_info_s
*rspi
= &fcxp
->rsp_info
;
1084 struct bfi_fcxp_send_req_s
*send_req
;
1086 bfa_trc(bfa
, fcxp
->fcxp_tag
);
1089 * setup request/response info
1091 reqi
->bfa_rport
= rport
;
1092 reqi
->vf_id
= vf_id
;
1093 reqi
->lp_tag
= lp_tag
;
1095 rspi
->rsp_timeout
= rsp_timeout
;
1098 reqi
->req_tot_len
= reqlen
;
1099 rspi
->rsp_maxlen
= rsp_maxlen
;
1100 fcxp
->send_cbfn
= cbfn
? cbfn
: bfa_fcxp_null_comp
;
1101 fcxp
->send_cbarg
= cbarg
;
1104 * If no room in CPE queue, wait for space in request queue
1106 send_req
= bfa_reqq_next(bfa
, BFA_REQQ_FCXP
);
1108 bfa_trc(bfa
, fcxp
->fcxp_tag
);
1109 fcxp
->reqq_waiting
= BFA_TRUE
;
1110 bfa_reqq_wait(bfa
, BFA_REQQ_FCXP
, &fcxp
->reqq_wqe
);
1114 bfa_fcxp_queue(fcxp
, send_req
);
1120 * @param[in] fcxp BFA fcxp pointer
1125 bfa_fcxp_abort(struct bfa_fcxp_s
*fcxp
)
1127 bfa_trc(fcxp
->fcxp_mod
->bfa
, fcxp
->fcxp_tag
);
1129 return BFA_STATUS_OK
;
1133 bfa_fcxp_alloc_wait(struct bfa_s
*bfa
, struct bfa_fcxp_wqe_s
*wqe
,
1134 bfa_fcxp_alloc_cbfn_t alloc_cbfn
, void *alloc_cbarg
,
1135 void *caller
, int nreq_sgles
,
1136 int nrsp_sgles
, bfa_fcxp_get_sgaddr_t req_sga_cbfn
,
1137 bfa_fcxp_get_sglen_t req_sglen_cbfn
,
1138 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn
,
1139 bfa_fcxp_get_sglen_t rsp_sglen_cbfn
)
1141 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
1143 bfa_assert(list_empty(&mod
->fcxp_free_q
));
1145 wqe
->alloc_cbfn
= alloc_cbfn
;
1146 wqe
->alloc_cbarg
= alloc_cbarg
;
1147 wqe
->caller
= caller
;
1149 wqe
->nreq_sgles
= nreq_sgles
;
1150 wqe
->nrsp_sgles
= nrsp_sgles
;
1151 wqe
->req_sga_cbfn
= req_sga_cbfn
;
1152 wqe
->req_sglen_cbfn
= req_sglen_cbfn
;
1153 wqe
->rsp_sga_cbfn
= rsp_sga_cbfn
;
1154 wqe
->rsp_sglen_cbfn
= rsp_sglen_cbfn
;
1156 list_add_tail(&wqe
->qe
, &mod
->wait_q
);
1160 bfa_fcxp_walloc_cancel(struct bfa_s
*bfa
, struct bfa_fcxp_wqe_s
*wqe
)
1162 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
1164 bfa_assert(bfa_q_is_on_q(&mod
->wait_q
, wqe
));
1169 bfa_fcxp_discard(struct bfa_fcxp_s
*fcxp
)
1172 * If waiting for room in request queue, cancel reqq wait
1175 if (fcxp
->reqq_waiting
) {
1176 fcxp
->reqq_waiting
= BFA_FALSE
;
1177 bfa_reqq_wcancel(&fcxp
->reqq_wqe
);
1178 bfa_fcxp_free(fcxp
);
1182 fcxp
->send_cbfn
= bfa_fcxp_null_comp
;
1188 * hal_fcxp_public BFA FCXP public functions
1192 bfa_fcxp_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*msg
)
1194 switch (msg
->mhdr
.msg_id
) {
1195 case BFI_FCXP_I2H_SEND_RSP
:
1196 hal_fcxp_send_comp(bfa
, (struct bfi_fcxp_send_rsp_s
*) msg
);
1200 bfa_trc(bfa
, msg
->mhdr
.msg_id
);
1206 bfa_fcxp_get_maxrsp(struct bfa_s
*bfa
)
1208 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
1210 return mod
->rsp_pld_sz
;
1215 * BFA LPS state machine functions
1219 * Init state -- no login
1222 bfa_lps_sm_init(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1224 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1225 bfa_trc(lps
->bfa
, event
);
1228 case BFA_LPS_SM_LOGIN
:
1229 if (bfa_reqq_full(lps
->bfa
, lps
->reqq
)) {
1230 bfa_sm_set_state(lps
, bfa_lps_sm_loginwait
);
1231 bfa_reqq_wait(lps
->bfa
, lps
->reqq
, &lps
->wqe
);
1233 bfa_sm_set_state(lps
, bfa_lps_sm_login
);
1234 bfa_lps_send_login(lps
);
1238 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1239 BFA_PL_EID_LOGIN
, 0, "FDISC Request");
1241 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1242 BFA_PL_EID_LOGIN
, 0, "FLOGI Request");
1245 case BFA_LPS_SM_LOGOUT
:
1246 bfa_lps_logout_comp(lps
);
1249 case BFA_LPS_SM_DELETE
:
1253 case BFA_LPS_SM_RX_CVL
:
1254 case BFA_LPS_SM_OFFLINE
:
1257 case BFA_LPS_SM_FWRSP
:
1259 * Could happen when fabric detects loopback and discards
1260 * the lps request. Fw will eventually sent out the timeout
1266 bfa_sm_fault(lps
->bfa
, event
);
1271 * login is in progress -- awaiting response from firmware
1274 bfa_lps_sm_login(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1276 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1277 bfa_trc(lps
->bfa
, event
);
1280 case BFA_LPS_SM_FWRSP
:
1281 if (lps
->status
== BFA_STATUS_OK
) {
1282 bfa_sm_set_state(lps
, bfa_lps_sm_online
);
1284 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1285 BFA_PL_EID_LOGIN
, 0, "FDISC Accept");
1287 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1288 BFA_PL_EID_LOGIN
, 0, "FLOGI Accept");
1290 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1292 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1293 BFA_PL_EID_LOGIN
, 0,
1294 "FDISC Fail (RJT or timeout)");
1296 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1297 BFA_PL_EID_LOGIN
, 0,
1298 "FLOGI Fail (RJT or timeout)");
1300 bfa_lps_login_comp(lps
);
1303 case BFA_LPS_SM_OFFLINE
:
1304 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1308 bfa_sm_fault(lps
->bfa
, event
);
1313 * login pending - awaiting space in request queue
1316 bfa_lps_sm_loginwait(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1318 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1319 bfa_trc(lps
->bfa
, event
);
1322 case BFA_LPS_SM_RESUME
:
1323 bfa_sm_set_state(lps
, bfa_lps_sm_login
);
1326 case BFA_LPS_SM_OFFLINE
:
1327 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1328 bfa_reqq_wcancel(&lps
->wqe
);
1331 case BFA_LPS_SM_RX_CVL
:
1333 * Login was not even sent out; so when getting out
1334 * of this state, it will appear like a login retry
1335 * after Clear virtual link
1340 bfa_sm_fault(lps
->bfa
, event
);
1348 bfa_lps_sm_online(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1350 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1351 bfa_trc(lps
->bfa
, event
);
1354 case BFA_LPS_SM_LOGOUT
:
1355 if (bfa_reqq_full(lps
->bfa
, lps
->reqq
)) {
1356 bfa_sm_set_state(lps
, bfa_lps_sm_logowait
);
1357 bfa_reqq_wait(lps
->bfa
, lps
->reqq
, &lps
->wqe
);
1359 bfa_sm_set_state(lps
, bfa_lps_sm_logout
);
1360 bfa_lps_send_logout(lps
);
1362 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1363 BFA_PL_EID_LOGO
, 0, "Logout");
1366 case BFA_LPS_SM_RX_CVL
:
1367 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1369 /* Let the vport module know about this event */
1370 bfa_lps_cvl_event(lps
);
1371 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1372 BFA_PL_EID_FIP_FCF_CVL
, 0, "FCF Clear Virt. Link Rx");
1375 case BFA_LPS_SM_OFFLINE
:
1376 case BFA_LPS_SM_DELETE
:
1377 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1381 bfa_sm_fault(lps
->bfa
, event
);
1386 * logout in progress - awaiting firmware response
1389 bfa_lps_sm_logout(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1391 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1392 bfa_trc(lps
->bfa
, event
);
1395 case BFA_LPS_SM_FWRSP
:
1396 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1397 bfa_lps_logout_comp(lps
);
1400 case BFA_LPS_SM_OFFLINE
:
1401 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1405 bfa_sm_fault(lps
->bfa
, event
);
1410 * logout pending -- awaiting space in request queue
1413 bfa_lps_sm_logowait(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1415 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1416 bfa_trc(lps
->bfa
, event
);
1419 case BFA_LPS_SM_RESUME
:
1420 bfa_sm_set_state(lps
, bfa_lps_sm_logout
);
1421 bfa_lps_send_logout(lps
);
1424 case BFA_LPS_SM_OFFLINE
:
1425 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1426 bfa_reqq_wcancel(&lps
->wqe
);
1430 bfa_sm_fault(lps
->bfa
, event
);
1437 * lps_pvt BFA LPS private functions
1441 * return memory requirement
1444 bfa_lps_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
1447 if (cfg
->drvcfg
.min_cfg
)
1448 *ndm_len
+= sizeof(struct bfa_lps_s
) * BFA_LPS_MIN_LPORTS
;
1450 *ndm_len
+= sizeof(struct bfa_lps_s
) * BFA_LPS_MAX_LPORTS
;
1454 * bfa module attach at initialization time
1457 bfa_lps_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
1458 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
1460 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1461 struct bfa_lps_s
*lps
;
1464 memset(mod
, 0, sizeof(struct bfa_lps_mod_s
));
1465 mod
->num_lps
= BFA_LPS_MAX_LPORTS
;
1466 if (cfg
->drvcfg
.min_cfg
)
1467 mod
->num_lps
= BFA_LPS_MIN_LPORTS
;
1469 mod
->num_lps
= BFA_LPS_MAX_LPORTS
;
1470 mod
->lps_arr
= lps
= (struct bfa_lps_s
*) bfa_meminfo_kva(meminfo
);
1472 bfa_meminfo_kva(meminfo
) += mod
->num_lps
* sizeof(struct bfa_lps_s
);
1474 INIT_LIST_HEAD(&mod
->lps_free_q
);
1475 INIT_LIST_HEAD(&mod
->lps_active_q
);
1477 for (i
= 0; i
< mod
->num_lps
; i
++, lps
++) {
1479 lps
->lp_tag
= (u8
) i
;
1480 lps
->reqq
= BFA_REQQ_LPS
;
1481 bfa_reqq_winit(&lps
->wqe
, bfa_lps_reqq_resume
, lps
);
1482 list_add_tail(&lps
->qe
, &mod
->lps_free_q
);
1487 bfa_lps_detach(struct bfa_s
*bfa
)
1492 bfa_lps_start(struct bfa_s
*bfa
)
1497 bfa_lps_stop(struct bfa_s
*bfa
)
1502 * IOC in disabled state -- consider all lps offline
1505 bfa_lps_iocdisable(struct bfa_s
*bfa
)
1507 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1508 struct bfa_lps_s
*lps
;
1509 struct list_head
*qe
, *qen
;
1511 list_for_each_safe(qe
, qen
, &mod
->lps_active_q
) {
1512 lps
= (struct bfa_lps_s
*) qe
;
1513 bfa_sm_send_event(lps
, BFA_LPS_SM_OFFLINE
);
1518 * Firmware login response
1521 bfa_lps_login_rsp(struct bfa_s
*bfa
, struct bfi_lps_login_rsp_s
*rsp
)
1523 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1524 struct bfa_lps_s
*lps
;
1526 bfa_assert(rsp
->lp_tag
< mod
->num_lps
);
1527 lps
= BFA_LPS_FROM_TAG(mod
, rsp
->lp_tag
);
1529 lps
->status
= rsp
->status
;
1530 switch (rsp
->status
) {
1532 lps
->fport
= rsp
->f_port
;
1533 lps
->npiv_en
= rsp
->npiv_en
;
1534 lps
->lp_pid
= rsp
->lp_pid
;
1535 lps
->pr_bbcred
= be16_to_cpu(rsp
->bb_credit
);
1536 lps
->pr_pwwn
= rsp
->port_name
;
1537 lps
->pr_nwwn
= rsp
->node_name
;
1538 lps
->auth_req
= rsp
->auth_req
;
1539 lps
->lp_mac
= rsp
->lp_mac
;
1540 lps
->brcd_switch
= rsp
->brcd_switch
;
1541 lps
->fcf_mac
= rsp
->fcf_mac
;
1545 case BFA_STATUS_FABRIC_RJT
:
1546 lps
->lsrjt_rsn
= rsp
->lsrjt_rsn
;
1547 lps
->lsrjt_expl
= rsp
->lsrjt_expl
;
1551 case BFA_STATUS_EPROTOCOL
:
1552 lps
->ext_status
= rsp
->ext_status
;
1557 /* Nothing to do with other status */
1561 bfa_sm_send_event(lps
, BFA_LPS_SM_FWRSP
);
1565 * Firmware logout response
1568 bfa_lps_logout_rsp(struct bfa_s
*bfa
, struct bfi_lps_logout_rsp_s
*rsp
)
1570 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1571 struct bfa_lps_s
*lps
;
1573 bfa_assert(rsp
->lp_tag
< mod
->num_lps
);
1574 lps
= BFA_LPS_FROM_TAG(mod
, rsp
->lp_tag
);
1576 bfa_sm_send_event(lps
, BFA_LPS_SM_FWRSP
);
1580 * Firmware received a Clear virtual link request (for FCoE)
1583 bfa_lps_rx_cvl_event(struct bfa_s
*bfa
, struct bfi_lps_cvl_event_s
*cvl
)
1585 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1586 struct bfa_lps_s
*lps
;
1588 lps
= BFA_LPS_FROM_TAG(mod
, cvl
->lp_tag
);
1590 bfa_sm_send_event(lps
, BFA_LPS_SM_RX_CVL
);
1594 * Space is available in request queue, resume queueing request to firmware.
1597 bfa_lps_reqq_resume(void *lps_arg
)
1599 struct bfa_lps_s
*lps
= lps_arg
;
1601 bfa_sm_send_event(lps
, BFA_LPS_SM_RESUME
);
1605 * lps is freed -- triggered by vport delete
1608 bfa_lps_free(struct bfa_lps_s
*lps
)
1610 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(lps
->bfa
);
1614 list_add_tail(&lps
->qe
, &mod
->lps_free_q
);
1618 * send login request to firmware
1621 bfa_lps_send_login(struct bfa_lps_s
*lps
)
1623 struct bfi_lps_login_req_s
*m
;
1625 m
= bfa_reqq_next(lps
->bfa
, lps
->reqq
);
1628 bfi_h2i_set(m
->mh
, BFI_MC_LPS
, BFI_LPS_H2I_LOGIN_REQ
,
1629 bfa_lpuid(lps
->bfa
));
1631 m
->lp_tag
= lps
->lp_tag
;
1632 m
->alpa
= lps
->alpa
;
1633 m
->pdu_size
= cpu_to_be16(lps
->pdusz
);
1634 m
->pwwn
= lps
->pwwn
;
1635 m
->nwwn
= lps
->nwwn
;
1636 m
->fdisc
= lps
->fdisc
;
1637 m
->auth_en
= lps
->auth_en
;
1639 bfa_reqq_produce(lps
->bfa
, lps
->reqq
);
1643 * send logout request to firmware
1646 bfa_lps_send_logout(struct bfa_lps_s
*lps
)
1648 struct bfi_lps_logout_req_s
*m
;
1650 m
= bfa_reqq_next(lps
->bfa
, lps
->reqq
);
1653 bfi_h2i_set(m
->mh
, BFI_MC_LPS
, BFI_LPS_H2I_LOGOUT_REQ
,
1654 bfa_lpuid(lps
->bfa
));
1656 m
->lp_tag
= lps
->lp_tag
;
1657 m
->port_name
= lps
->pwwn
;
1658 bfa_reqq_produce(lps
->bfa
, lps
->reqq
);
1662 * Indirect login completion handler for non-fcs
1665 bfa_lps_login_comp_cb(void *arg
, bfa_boolean_t complete
)
1667 struct bfa_lps_s
*lps
= arg
;
1673 bfa_cb_lps_fdisc_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1675 bfa_cb_lps_flogi_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1679 * Login completion handler -- direct call for fcs, queue for others
1682 bfa_lps_login_comp(struct bfa_lps_s
*lps
)
1684 if (!lps
->bfa
->fcs
) {
1685 bfa_cb_queue(lps
->bfa
, &lps
->hcb_qe
, bfa_lps_login_comp_cb
,
1691 bfa_cb_lps_fdisc_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1693 bfa_cb_lps_flogi_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1697 * Indirect logout completion handler for non-fcs
1700 bfa_lps_logout_comp_cb(void *arg
, bfa_boolean_t complete
)
1702 struct bfa_lps_s
*lps
= arg
;
1708 bfa_cb_lps_fdisclogo_comp(lps
->bfa
->bfad
, lps
->uarg
);
1712 * Logout completion handler -- direct call for fcs, queue for others
1715 bfa_lps_logout_comp(struct bfa_lps_s
*lps
)
1717 if (!lps
->bfa
->fcs
) {
1718 bfa_cb_queue(lps
->bfa
, &lps
->hcb_qe
, bfa_lps_logout_comp_cb
,
1723 bfa_cb_lps_fdisclogo_comp(lps
->bfa
->bfad
, lps
->uarg
);
1727 * Clear virtual link completion handler for non-fcs
1730 bfa_lps_cvl_event_cb(void *arg
, bfa_boolean_t complete
)
1732 struct bfa_lps_s
*lps
= arg
;
1737 /* Clear virtual link to base port will result in link down */
1739 bfa_cb_lps_cvl_event(lps
->bfa
->bfad
, lps
->uarg
);
1743 * Received Clear virtual link event --direct call for fcs,
1747 bfa_lps_cvl_event(struct bfa_lps_s
*lps
)
1749 if (!lps
->bfa
->fcs
) {
1750 bfa_cb_queue(lps
->bfa
, &lps
->hcb_qe
, bfa_lps_cvl_event_cb
,
1755 /* Clear virtual link to base port will result in link down */
1757 bfa_cb_lps_cvl_event(lps
->bfa
->bfad
, lps
->uarg
);
1763 * lps_public BFA LPS public functions
1767 bfa_lps_get_max_vport(struct bfa_s
*bfa
)
1769 if (bfa_ioc_devid(&bfa
->ioc
) == BFA_PCI_DEVICE_ID_CT
)
1770 return BFA_LPS_MAX_VPORTS_SUPP_CT
;
1772 return BFA_LPS_MAX_VPORTS_SUPP_CB
;
1776 * Allocate a lport srvice tag.
1779 bfa_lps_alloc(struct bfa_s
*bfa
)
1781 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1782 struct bfa_lps_s
*lps
= NULL
;
1784 bfa_q_deq(&mod
->lps_free_q
, &lps
);
1789 list_add_tail(&lps
->qe
, &mod
->lps_active_q
);
1791 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1796 * Free lport service tag. This can be called anytime after an alloc.
1797 * No need to wait for any pending login/logout completions.
1800 bfa_lps_delete(struct bfa_lps_s
*lps
)
1802 bfa_sm_send_event(lps
, BFA_LPS_SM_DELETE
);
1806 * Initiate a lport login.
1809 bfa_lps_flogi(struct bfa_lps_s
*lps
, void *uarg
, u8 alpa
, u16 pdusz
,
1810 wwn_t pwwn
, wwn_t nwwn
, bfa_boolean_t auth_en
)
1817 lps
->fdisc
= BFA_FALSE
;
1818 lps
->auth_en
= auth_en
;
1819 bfa_sm_send_event(lps
, BFA_LPS_SM_LOGIN
);
1823 * Initiate a lport fdisc login.
1826 bfa_lps_fdisc(struct bfa_lps_s
*lps
, void *uarg
, u16 pdusz
, wwn_t pwwn
,
1834 lps
->fdisc
= BFA_TRUE
;
1835 lps
->auth_en
= BFA_FALSE
;
1836 bfa_sm_send_event(lps
, BFA_LPS_SM_LOGIN
);
1841 * Initiate a lport FDSIC logout.
1844 bfa_lps_fdisclogo(struct bfa_lps_s
*lps
)
1846 bfa_sm_send_event(lps
, BFA_LPS_SM_LOGOUT
);
1850 * Discard a pending login request -- should be called only for
1851 * link down handling.
1854 bfa_lps_discard(struct bfa_lps_s
*lps
)
1856 bfa_sm_send_event(lps
, BFA_LPS_SM_OFFLINE
);
1860 * Return lport services tag
1863 bfa_lps_get_tag(struct bfa_lps_s
*lps
)
1869 * Return lport services tag given the pid
1872 bfa_lps_get_tag_from_pid(struct bfa_s
*bfa
, u32 pid
)
1874 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1875 struct bfa_lps_s
*lps
;
1878 for (i
= 0, lps
= mod
->lps_arr
; i
< mod
->num_lps
; i
++, lps
++) {
1879 if (lps
->lp_pid
== pid
)
1883 /* Return base port tag anyway */
1888 * return if fabric login indicates support for NPIV
1891 bfa_lps_is_npiv_en(struct bfa_lps_s
*lps
)
1893 return lps
->npiv_en
;
1897 * Return TRUE if attached to F-Port, else return FALSE
1900 bfa_lps_is_fport(struct bfa_lps_s
*lps
)
1906 * Return TRUE if attached to a Brocade Fabric
1909 bfa_lps_is_brcd_fabric(struct bfa_lps_s
*lps
)
1911 return lps
->brcd_switch
;
1914 * return TRUE if authentication is required
1917 bfa_lps_is_authreq(struct bfa_lps_s
*lps
)
1919 return lps
->auth_req
;
1923 bfa_lps_get_extstatus(struct bfa_lps_s
*lps
)
1925 return lps
->ext_status
;
1929 * return port id assigned to the lport
1932 bfa_lps_get_pid(struct bfa_lps_s
*lps
)
1938 * return port id assigned to the base lport
1941 bfa_lps_get_base_pid(struct bfa_s
*bfa
)
1943 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1945 return BFA_LPS_FROM_TAG(mod
, 0)->lp_pid
;
1949 * Return bb_credit assigned in FLOGI response
1952 bfa_lps_get_peer_bbcredit(struct bfa_lps_s
*lps
)
1954 return lps
->pr_bbcred
;
1958 * Return peer port name
1961 bfa_lps_get_peer_pwwn(struct bfa_lps_s
*lps
)
1963 return lps
->pr_pwwn
;
1967 * Return peer node name
1970 bfa_lps_get_peer_nwwn(struct bfa_lps_s
*lps
)
1972 return lps
->pr_nwwn
;
1976 * return reason code if login request is rejected
1979 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s
*lps
)
1981 return lps
->lsrjt_rsn
;
1985 * return explanation code if login request is rejected
1988 bfa_lps_get_lsrjt_expl(struct bfa_lps_s
*lps
)
1990 return lps
->lsrjt_expl
;
1994 * Return fpma/spma MAC for lport
1997 bfa_lps_get_lp_mac(struct bfa_lps_s
*lps
)
2003 * LPS firmware message class handler.
2006 bfa_lps_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
2008 union bfi_lps_i2h_msg_u msg
;
2010 bfa_trc(bfa
, m
->mhdr
.msg_id
);
2013 switch (m
->mhdr
.msg_id
) {
2014 case BFI_LPS_H2I_LOGIN_RSP
:
2015 bfa_lps_login_rsp(bfa
, msg
.login_rsp
);
2018 case BFI_LPS_H2I_LOGOUT_RSP
:
2019 bfa_lps_logout_rsp(bfa
, msg
.logout_rsp
);
2022 case BFI_LPS_H2I_CVL_EVENT
:
2023 bfa_lps_rx_cvl_event(bfa
, msg
.cvl_event
);
2027 bfa_trc(bfa
, m
->mhdr
.msg_id
);
2033 * FC PORT state machine functions
2036 bfa_fcport_sm_uninit(struct bfa_fcport_s
*fcport
,
2037 enum bfa_fcport_sm_event event
)
2039 bfa_trc(fcport
->bfa
, event
);
2042 case BFA_FCPORT_SM_START
:
2044 * Start event after IOC is configured and BFA is started.
2046 if (bfa_fcport_send_enable(fcport
)) {
2047 bfa_trc(fcport
->bfa
, BFA_TRUE
);
2048 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2050 bfa_trc(fcport
->bfa
, BFA_FALSE
);
2051 bfa_sm_set_state(fcport
,
2052 bfa_fcport_sm_enabling_qwait
);
2056 case BFA_FCPORT_SM_ENABLE
:
2058 * Port is persistently configured to be in enabled state. Do
2059 * not change state. Port enabling is done when START event is
2064 case BFA_FCPORT_SM_DISABLE
:
2066 * If a port is persistently configured to be disabled, the
2067 * first event will a port disable request.
2069 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2072 case BFA_FCPORT_SM_HWFAIL
:
2073 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2077 bfa_sm_fault(fcport
->bfa
, event
);
2082 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s
*fcport
,
2083 enum bfa_fcport_sm_event event
)
2085 char pwwn_buf
[BFA_STRING_32
];
2086 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2087 bfa_trc(fcport
->bfa
, event
);
2090 case BFA_FCPORT_SM_QRESUME
:
2091 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2092 bfa_fcport_send_enable(fcport
);
2095 case BFA_FCPORT_SM_STOP
:
2096 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2097 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2100 case BFA_FCPORT_SM_ENABLE
:
2102 * Already enable is in progress.
2106 case BFA_FCPORT_SM_DISABLE
:
2108 * Just send disable request to firmware when room becomes
2109 * available in request queue.
2111 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2112 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2113 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2114 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2115 wwn2str(pwwn_buf
, fcport
->pwwn
);
2116 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2117 "Base port disabled: WWN = %s\n", pwwn_buf
);
2120 case BFA_FCPORT_SM_LINKUP
:
2121 case BFA_FCPORT_SM_LINKDOWN
:
2123 * Possible to get link events when doing back-to-back
2128 case BFA_FCPORT_SM_HWFAIL
:
2129 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2130 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2134 bfa_sm_fault(fcport
->bfa
, event
);
2139 bfa_fcport_sm_enabling(struct bfa_fcport_s
*fcport
,
2140 enum bfa_fcport_sm_event event
)
2142 char pwwn_buf
[BFA_STRING_32
];
2143 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2144 bfa_trc(fcport
->bfa
, event
);
2147 case BFA_FCPORT_SM_FWRSP
:
2148 case BFA_FCPORT_SM_LINKDOWN
:
2149 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkdown
);
2152 case BFA_FCPORT_SM_LINKUP
:
2153 bfa_fcport_update_linkinfo(fcport
);
2154 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkup
);
2156 bfa_assert(fcport
->event_cbfn
);
2157 bfa_fcport_scn(fcport
, BFA_PORT_LINKUP
, BFA_FALSE
);
2160 case BFA_FCPORT_SM_ENABLE
:
2162 * Already being enabled.
2166 case BFA_FCPORT_SM_DISABLE
:
2167 if (bfa_fcport_send_disable(fcport
))
2168 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2170 bfa_sm_set_state(fcport
,
2171 bfa_fcport_sm_disabling_qwait
);
2173 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2174 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2175 wwn2str(pwwn_buf
, fcport
->pwwn
);
2176 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2177 "Base port disabled: WWN = %s\n", pwwn_buf
);
2180 case BFA_FCPORT_SM_STOP
:
2181 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2184 case BFA_FCPORT_SM_HWFAIL
:
2185 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2189 bfa_sm_fault(fcport
->bfa
, event
);
2194 bfa_fcport_sm_linkdown(struct bfa_fcport_s
*fcport
,
2195 enum bfa_fcport_sm_event event
)
2197 struct bfi_fcport_event_s
*pevent
= fcport
->event_arg
.i2hmsg
.event
;
2198 char pwwn_buf
[BFA_STRING_32
];
2199 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2201 bfa_trc(fcport
->bfa
, event
);
2204 case BFA_FCPORT_SM_LINKUP
:
2205 bfa_fcport_update_linkinfo(fcport
);
2206 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkup
);
2207 bfa_assert(fcport
->event_cbfn
);
2208 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2209 BFA_PL_EID_PORT_ST_CHANGE
, 0, "Port Linkup");
2210 if (!bfa_ioc_get_fcmode(&fcport
->bfa
->ioc
)) {
2212 bfa_trc(fcport
->bfa
,
2213 pevent
->link_state
.vc_fcf
.fcf
.fipenabled
);
2214 bfa_trc(fcport
->bfa
,
2215 pevent
->link_state
.vc_fcf
.fcf
.fipfailed
);
2217 if (pevent
->link_state
.vc_fcf
.fcf
.fipfailed
)
2218 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2219 BFA_PL_EID_FIP_FCF_DISC
, 0,
2220 "FIP FCF Discovery Failed");
2222 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2223 BFA_PL_EID_FIP_FCF_DISC
, 0,
2224 "FIP FCF Discovered");
2227 bfa_fcport_scn(fcport
, BFA_PORT_LINKUP
, BFA_FALSE
);
2228 wwn2str(pwwn_buf
, fcport
->pwwn
);
2229 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2230 "Base port online: WWN = %s\n", pwwn_buf
);
2233 case BFA_FCPORT_SM_LINKDOWN
:
2235 * Possible to get link down event.
2239 case BFA_FCPORT_SM_ENABLE
:
2245 case BFA_FCPORT_SM_DISABLE
:
2246 if (bfa_fcport_send_disable(fcport
))
2247 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2249 bfa_sm_set_state(fcport
,
2250 bfa_fcport_sm_disabling_qwait
);
2252 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2253 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2254 wwn2str(pwwn_buf
, fcport
->pwwn
);
2255 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2256 "Base port disabled: WWN = %s\n", pwwn_buf
);
2259 case BFA_FCPORT_SM_STOP
:
2260 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2263 case BFA_FCPORT_SM_HWFAIL
:
2264 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2268 bfa_sm_fault(fcport
->bfa
, event
);
2273 bfa_fcport_sm_linkup(struct bfa_fcport_s
*fcport
,
2274 enum bfa_fcport_sm_event event
)
2276 char pwwn_buf
[BFA_STRING_32
];
2277 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2279 bfa_trc(fcport
->bfa
, event
);
2282 case BFA_FCPORT_SM_ENABLE
:
2288 case BFA_FCPORT_SM_DISABLE
:
2289 if (bfa_fcport_send_disable(fcport
))
2290 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2292 bfa_sm_set_state(fcport
,
2293 bfa_fcport_sm_disabling_qwait
);
2295 bfa_fcport_reset_linkinfo(fcport
);
2296 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_FALSE
);
2297 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2298 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2299 wwn2str(pwwn_buf
, fcport
->pwwn
);
2300 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2301 "Base port offline: WWN = %s\n", pwwn_buf
);
2302 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2303 "Base port disabled: WWN = %s\n", pwwn_buf
);
2306 case BFA_FCPORT_SM_LINKDOWN
:
2307 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkdown
);
2308 bfa_fcport_reset_linkinfo(fcport
);
2309 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_FALSE
);
2310 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2311 BFA_PL_EID_PORT_ST_CHANGE
, 0, "Port Linkdown");
2312 wwn2str(pwwn_buf
, fcport
->pwwn
);
2313 if (BFA_PORT_IS_DISABLED(fcport
->bfa
))
2314 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2315 "Base port offline: WWN = %s\n", pwwn_buf
);
2317 BFA_LOG(KERN_ERR
, bfad
, bfa_log_level
,
2318 "Base port (WWN = %s) "
2319 "lost fabric connectivity\n", pwwn_buf
);
2322 case BFA_FCPORT_SM_STOP
:
2323 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2324 bfa_fcport_reset_linkinfo(fcport
);
2325 wwn2str(pwwn_buf
, fcport
->pwwn
);
2326 if (BFA_PORT_IS_DISABLED(fcport
->bfa
))
2327 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2328 "Base port offline: WWN = %s\n", pwwn_buf
);
2330 BFA_LOG(KERN_ERR
, bfad
, bfa_log_level
,
2331 "Base port (WWN = %s) "
2332 "lost fabric connectivity\n", pwwn_buf
);
2335 case BFA_FCPORT_SM_HWFAIL
:
2336 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2337 bfa_fcport_reset_linkinfo(fcport
);
2338 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_FALSE
);
2339 wwn2str(pwwn_buf
, fcport
->pwwn
);
2340 if (BFA_PORT_IS_DISABLED(fcport
->bfa
))
2341 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2342 "Base port offline: WWN = %s\n", pwwn_buf
);
2344 BFA_LOG(KERN_ERR
, bfad
, bfa_log_level
,
2345 "Base port (WWN = %s) "
2346 "lost fabric connectivity\n", pwwn_buf
);
2350 bfa_sm_fault(fcport
->bfa
, event
);
2355 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s
*fcport
,
2356 enum bfa_fcport_sm_event event
)
2358 bfa_trc(fcport
->bfa
, event
);
2361 case BFA_FCPORT_SM_QRESUME
:
2362 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2363 bfa_fcport_send_disable(fcport
);
2366 case BFA_FCPORT_SM_STOP
:
2367 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2368 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2371 case BFA_FCPORT_SM_ENABLE
:
2372 bfa_sm_set_state(fcport
, bfa_fcport_sm_toggling_qwait
);
2375 case BFA_FCPORT_SM_DISABLE
:
2377 * Already being disabled.
2381 case BFA_FCPORT_SM_LINKUP
:
2382 case BFA_FCPORT_SM_LINKDOWN
:
2384 * Possible to get link events when doing back-to-back
2389 case BFA_FCPORT_SM_HWFAIL
:
2390 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2391 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2395 bfa_sm_fault(fcport
->bfa
, event
);
2400 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s
*fcport
,
2401 enum bfa_fcport_sm_event event
)
2403 bfa_trc(fcport
->bfa
, event
);
2406 case BFA_FCPORT_SM_QRESUME
:
2407 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2408 bfa_fcport_send_disable(fcport
);
2409 if (bfa_fcport_send_enable(fcport
))
2410 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2412 bfa_sm_set_state(fcport
,
2413 bfa_fcport_sm_enabling_qwait
);
2416 case BFA_FCPORT_SM_STOP
:
2417 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2418 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2421 case BFA_FCPORT_SM_ENABLE
:
2424 case BFA_FCPORT_SM_DISABLE
:
2425 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling_qwait
);
2428 case BFA_FCPORT_SM_LINKUP
:
2429 case BFA_FCPORT_SM_LINKDOWN
:
2431 * Possible to get link events when doing back-to-back
2436 case BFA_FCPORT_SM_HWFAIL
:
2437 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2438 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2442 bfa_sm_fault(fcport
->bfa
, event
);
2447 bfa_fcport_sm_disabling(struct bfa_fcport_s
*fcport
,
2448 enum bfa_fcport_sm_event event
)
2450 char pwwn_buf
[BFA_STRING_32
];
2451 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2452 bfa_trc(fcport
->bfa
, event
);
2455 case BFA_FCPORT_SM_FWRSP
:
2456 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2459 case BFA_FCPORT_SM_DISABLE
:
2461 * Already being disabled.
2465 case BFA_FCPORT_SM_ENABLE
:
2466 if (bfa_fcport_send_enable(fcport
))
2467 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2469 bfa_sm_set_state(fcport
,
2470 bfa_fcport_sm_enabling_qwait
);
2472 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2473 BFA_PL_EID_PORT_ENABLE
, 0, "Port Enable");
2474 wwn2str(pwwn_buf
, fcport
->pwwn
);
2475 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2476 "Base port enabled: WWN = %s\n", pwwn_buf
);
2479 case BFA_FCPORT_SM_STOP
:
2480 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2483 case BFA_FCPORT_SM_LINKUP
:
2484 case BFA_FCPORT_SM_LINKDOWN
:
2486 * Possible to get link events when doing back-to-back
2491 case BFA_FCPORT_SM_HWFAIL
:
2492 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2496 bfa_sm_fault(fcport
->bfa
, event
);
2501 bfa_fcport_sm_disabled(struct bfa_fcport_s
*fcport
,
2502 enum bfa_fcport_sm_event event
)
2504 char pwwn_buf
[BFA_STRING_32
];
2505 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2506 bfa_trc(fcport
->bfa
, event
);
2509 case BFA_FCPORT_SM_START
:
2511 * Ignore start event for a port that is disabled.
2515 case BFA_FCPORT_SM_STOP
:
2516 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2519 case BFA_FCPORT_SM_ENABLE
:
2520 if (bfa_fcport_send_enable(fcport
))
2521 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2523 bfa_sm_set_state(fcport
,
2524 bfa_fcport_sm_enabling_qwait
);
2526 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2527 BFA_PL_EID_PORT_ENABLE
, 0, "Port Enable");
2528 wwn2str(pwwn_buf
, fcport
->pwwn
);
2529 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2530 "Base port enabled: WWN = %s\n", pwwn_buf
);
2533 case BFA_FCPORT_SM_DISABLE
:
2539 case BFA_FCPORT_SM_HWFAIL
:
2540 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2544 bfa_sm_fault(fcport
->bfa
, event
);
2549 bfa_fcport_sm_stopped(struct bfa_fcport_s
*fcport
,
2550 enum bfa_fcport_sm_event event
)
2552 bfa_trc(fcport
->bfa
, event
);
2555 case BFA_FCPORT_SM_START
:
2556 if (bfa_fcport_send_enable(fcport
))
2557 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2559 bfa_sm_set_state(fcport
,
2560 bfa_fcport_sm_enabling_qwait
);
2565 * Ignore all other events.
2572 * Port is enabled. IOC is down/failed.
2575 bfa_fcport_sm_iocdown(struct bfa_fcport_s
*fcport
,
2576 enum bfa_fcport_sm_event event
)
2578 bfa_trc(fcport
->bfa
, event
);
2581 case BFA_FCPORT_SM_START
:
2582 if (bfa_fcport_send_enable(fcport
))
2583 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2585 bfa_sm_set_state(fcport
,
2586 bfa_fcport_sm_enabling_qwait
);
2591 * Ignore all events.
2598 * Port is disabled. IOC is down/failed.
2601 bfa_fcport_sm_iocfail(struct bfa_fcport_s
*fcport
,
2602 enum bfa_fcport_sm_event event
)
2604 bfa_trc(fcport
->bfa
, event
);
2607 case BFA_FCPORT_SM_START
:
2608 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2611 case BFA_FCPORT_SM_ENABLE
:
2612 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2617 * Ignore all events.
2624 * Link state is down
2627 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s
*ln
,
2628 enum bfa_fcport_ln_sm_event event
)
2630 bfa_trc(ln
->fcport
->bfa
, event
);
2633 case BFA_FCPORT_LN_SM_LINKUP
:
2634 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_nf
);
2635 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKUP
);
2639 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2644 * Link state is waiting for down notification
2647 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s
*ln
,
2648 enum bfa_fcport_ln_sm_event event
)
2650 bfa_trc(ln
->fcport
->bfa
, event
);
2653 case BFA_FCPORT_LN_SM_LINKUP
:
2654 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_up_nf
);
2657 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2658 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn
);
2662 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2667 * Link state is waiting for down notification and there is a pending up
2670 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
2671 enum bfa_fcport_ln_sm_event event
)
2673 bfa_trc(ln
->fcport
->bfa
, event
);
2676 case BFA_FCPORT_LN_SM_LINKDOWN
:
2677 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_nf
);
2680 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2681 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_nf
);
2682 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKUP
);
2686 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2694 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s
*ln
,
2695 enum bfa_fcport_ln_sm_event event
)
2697 bfa_trc(ln
->fcport
->bfa
, event
);
2700 case BFA_FCPORT_LN_SM_LINKDOWN
:
2701 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_nf
);
2702 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKDOWN
);
2706 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2711 * Link state is waiting for up notification
2714 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s
*ln
,
2715 enum bfa_fcport_ln_sm_event event
)
2717 bfa_trc(ln
->fcport
->bfa
, event
);
2720 case BFA_FCPORT_LN_SM_LINKDOWN
:
2721 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_dn_nf
);
2724 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2725 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up
);
2729 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2734 * Link state is waiting for up notification and there is a pending down
2737 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s
*ln
,
2738 enum bfa_fcport_ln_sm_event event
)
2740 bfa_trc(ln
->fcport
->bfa
, event
);
2743 case BFA_FCPORT_LN_SM_LINKUP
:
2744 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_dn_up_nf
);
2747 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2748 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_nf
);
2749 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKDOWN
);
2753 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2758 * Link state is waiting for up notification and there are pending down and up
2761 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
2762 enum bfa_fcport_ln_sm_event event
)
2764 bfa_trc(ln
->fcport
->bfa
, event
);
2767 case BFA_FCPORT_LN_SM_LINKDOWN
:
2768 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_dn_nf
);
2771 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2772 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_up_nf
);
2773 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKDOWN
);
2777 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2788 __bfa_cb_fcport_event(void *cbarg
, bfa_boolean_t complete
)
2790 struct bfa_fcport_ln_s
*ln
= cbarg
;
2793 ln
->fcport
->event_cbfn(ln
->fcport
->event_cbarg
, ln
->ln_event
);
2795 bfa_sm_send_event(ln
, BFA_FCPORT_LN_SM_NOTIFICATION
);
2799 * Send SCN notification to upper layers.
2800 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2803 bfa_fcport_scn(struct bfa_fcport_s
*fcport
, enum bfa_port_linkstate event
,
2804 bfa_boolean_t trunk
)
2806 if (fcport
->cfg
.trunked
&& !trunk
)
2810 case BFA_PORT_LINKUP
:
2811 bfa_sm_send_event(&fcport
->ln
, BFA_FCPORT_LN_SM_LINKUP
);
2813 case BFA_PORT_LINKDOWN
:
2814 bfa_sm_send_event(&fcport
->ln
, BFA_FCPORT_LN_SM_LINKDOWN
);
2822 bfa_fcport_queue_cb(struct bfa_fcport_ln_s
*ln
, enum bfa_port_linkstate event
)
2824 struct bfa_fcport_s
*fcport
= ln
->fcport
;
2826 if (fcport
->bfa
->fcs
) {
2827 fcport
->event_cbfn(fcport
->event_cbarg
, event
);
2828 bfa_sm_send_event(ln
, BFA_FCPORT_LN_SM_NOTIFICATION
);
2830 ln
->ln_event
= event
;
2831 bfa_cb_queue(fcport
->bfa
, &ln
->ln_qe
,
2832 __bfa_cb_fcport_event
, ln
);
2836 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2840 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
2843 *dm_len
+= FCPORT_STATS_DMA_SZ
;
2847 bfa_fcport_qresume(void *cbarg
)
2849 struct bfa_fcport_s
*fcport
= cbarg
;
2851 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_QRESUME
);
2855 bfa_fcport_mem_claim(struct bfa_fcport_s
*fcport
, struct bfa_meminfo_s
*meminfo
)
2860 dm_kva
= bfa_meminfo_dma_virt(meminfo
);
2861 dm_pa
= bfa_meminfo_dma_phys(meminfo
);
2863 fcport
->stats_kva
= dm_kva
;
2864 fcport
->stats_pa
= dm_pa
;
2865 fcport
->stats
= (union bfa_fcport_stats_u
*) dm_kva
;
2867 dm_kva
+= FCPORT_STATS_DMA_SZ
;
2868 dm_pa
+= FCPORT_STATS_DMA_SZ
;
2870 bfa_meminfo_dma_virt(meminfo
) = dm_kva
;
2871 bfa_meminfo_dma_phys(meminfo
) = dm_pa
;
2875 * Memory initialization.
2878 bfa_fcport_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
2879 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
2881 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
2882 struct bfa_port_cfg_s
*port_cfg
= &fcport
->cfg
;
2883 struct bfa_fcport_ln_s
*ln
= &fcport
->ln
;
2884 struct bfa_timeval_s tv
;
2886 memset(fcport
, 0, sizeof(struct bfa_fcport_s
));
2888 ln
->fcport
= fcport
;
2890 bfa_fcport_mem_claim(fcport
, meminfo
);
2892 bfa_sm_set_state(fcport
, bfa_fcport_sm_uninit
);
2893 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn
);
2896 * initialize time stamp for stats reset
2898 bfa_os_gettimeofday(&tv
);
2899 fcport
->stats_reset_time
= tv
.tv_sec
;
2902 * initialize and set default configuration
2904 port_cfg
->topology
= BFA_PORT_TOPOLOGY_P2P
;
2905 port_cfg
->speed
= BFA_PORT_SPEED_AUTO
;
2906 port_cfg
->trunked
= BFA_FALSE
;
2907 port_cfg
->maxfrsize
= 0;
2909 port_cfg
->trl_def_speed
= BFA_PORT_SPEED_1GBPS
;
2911 bfa_reqq_winit(&fcport
->reqq_wait
, bfa_fcport_qresume
, fcport
);
2915 bfa_fcport_detach(struct bfa_s
*bfa
)
2920 * Called when IOC is ready.
2923 bfa_fcport_start(struct bfa_s
*bfa
)
2925 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_START
);
2929 * Called before IOC is stopped.
2932 bfa_fcport_stop(struct bfa_s
*bfa
)
2934 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_STOP
);
2935 bfa_trunk_iocdisable(bfa
);
2939 * Called when IOC failure is detected.
2942 bfa_fcport_iocdisable(struct bfa_s
*bfa
)
2944 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
2946 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_HWFAIL
);
2947 bfa_trunk_iocdisable(bfa
);
2951 bfa_fcport_update_linkinfo(struct bfa_fcport_s
*fcport
)
2953 struct bfi_fcport_event_s
*pevent
= fcport
->event_arg
.i2hmsg
.event
;
2954 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2956 fcport
->speed
= pevent
->link_state
.speed
;
2957 fcport
->topology
= pevent
->link_state
.topology
;
2959 if (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
)
2963 fcport
->qos_attr
= pevent
->link_state
.qos_attr
;
2964 fcport
->qos_vc_attr
= pevent
->link_state
.vc_fcf
.qos_vc_attr
;
2967 * update trunk state if applicable
2969 if (!fcport
->cfg
.trunked
)
2970 trunk
->attr
.state
= BFA_TRUNK_DISABLED
;
2972 /* update FCoE specific */
2973 fcport
->fcoe_vlan
= be16_to_cpu(pevent
->link_state
.vc_fcf
.fcf
.vlan
);
2975 bfa_trc(fcport
->bfa
, fcport
->speed
);
2976 bfa_trc(fcport
->bfa
, fcport
->topology
);
2980 bfa_fcport_reset_linkinfo(struct bfa_fcport_s
*fcport
)
2982 fcport
->speed
= BFA_PORT_SPEED_UNKNOWN
;
2983 fcport
->topology
= BFA_PORT_TOPOLOGY_NONE
;
2987 * Send port enable message to firmware.
2989 static bfa_boolean_t
2990 bfa_fcport_send_enable(struct bfa_fcport_s
*fcport
)
2992 struct bfi_fcport_enable_req_s
*m
;
2995 * Increment message tag before queue check, so that responses to old
2996 * requests are discarded.
3001 * check for room in queue to send request now
3003 m
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3005 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
3006 &fcport
->reqq_wait
);
3010 bfi_h2i_set(m
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_ENABLE_REQ
,
3011 bfa_lpuid(fcport
->bfa
));
3012 m
->nwwn
= fcport
->nwwn
;
3013 m
->pwwn
= fcport
->pwwn
;
3014 m
->port_cfg
= fcport
->cfg
;
3015 m
->msgtag
= fcport
->msgtag
;
3016 m
->port_cfg
.maxfrsize
= cpu_to_be16(fcport
->cfg
.maxfrsize
);
3017 bfa_dma_be_addr_set(m
->stats_dma_addr
, fcport
->stats_pa
);
3018 bfa_trc(fcport
->bfa
, m
->stats_dma_addr
.a32
.addr_lo
);
3019 bfa_trc(fcport
->bfa
, m
->stats_dma_addr
.a32
.addr_hi
);
3022 * queue I/O message to firmware
3024 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3029 * Send port disable message to firmware.
3031 static bfa_boolean_t
3032 bfa_fcport_send_disable(struct bfa_fcport_s
*fcport
)
3034 struct bfi_fcport_req_s
*m
;
3037 * Increment message tag before queue check, so that responses to old
3038 * requests are discarded.
3043 * check for room in queue to send request now
3045 m
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3047 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
3048 &fcport
->reqq_wait
);
3052 bfi_h2i_set(m
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_DISABLE_REQ
,
3053 bfa_lpuid(fcport
->bfa
));
3054 m
->msgtag
= fcport
->msgtag
;
3057 * queue I/O message to firmware
3059 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3065 bfa_fcport_set_wwns(struct bfa_fcport_s
*fcport
)
3067 fcport
->pwwn
= bfa_ioc_get_pwwn(&fcport
->bfa
->ioc
);
3068 fcport
->nwwn
= bfa_ioc_get_nwwn(&fcport
->bfa
->ioc
);
3070 bfa_trc(fcport
->bfa
, fcport
->pwwn
);
3071 bfa_trc(fcport
->bfa
, fcport
->nwwn
);
3075 bfa_fcport_send_txcredit(void *port_cbarg
)
3078 struct bfa_fcport_s
*fcport
= port_cbarg
;
3079 struct bfi_fcport_set_svc_params_req_s
*m
;
3082 * check for room in queue to send request now
3084 m
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3086 bfa_trc(fcport
->bfa
, fcport
->cfg
.tx_bbcredit
);
3090 bfi_h2i_set(m
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
,
3091 bfa_lpuid(fcport
->bfa
));
3092 m
->tx_bbcredit
= cpu_to_be16((u16
)fcport
->cfg
.tx_bbcredit
);
3095 * queue I/O message to firmware
3097 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3101 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s
*d
,
3102 struct bfa_qos_stats_s
*s
)
3104 u32
*dip
= (u32
*) d
;
3105 __be32
*sip
= (__be32
*) s
;
3108 /* Now swap the 32 bit fields */
3109 for (i
= 0; i
< (sizeof(struct bfa_qos_stats_s
)/sizeof(u32
)); ++i
)
3110 dip
[i
] = be32_to_cpu(sip
[i
]);
3114 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s
*d
,
3115 struct bfa_fcoe_stats_s
*s
)
3117 u32
*dip
= (u32
*) d
;
3118 __be32
*sip
= (__be32
*) s
;
3121 for (i
= 0; i
< ((sizeof(struct bfa_fcoe_stats_s
))/sizeof(u32
));
3124 dip
[i
] = be32_to_cpu(sip
[i
]);
3125 dip
[i
+ 1] = be32_to_cpu(sip
[i
+ 1]);
3127 dip
[i
] = be32_to_cpu(sip
[i
+ 1]);
3128 dip
[i
+ 1] = be32_to_cpu(sip
[i
]);
3134 __bfa_cb_fcport_stats_get(void *cbarg
, bfa_boolean_t complete
)
3136 struct bfa_fcport_s
*fcport
= cbarg
;
3139 if (fcport
->stats_status
== BFA_STATUS_OK
) {
3140 struct bfa_timeval_s tv
;
3142 /* Swap FC QoS or FCoE stats */
3143 if (bfa_ioc_get_fcmode(&fcport
->bfa
->ioc
)) {
3144 bfa_fcport_qos_stats_swap(
3145 &fcport
->stats_ret
->fcqos
,
3146 &fcport
->stats
->fcqos
);
3148 bfa_fcport_fcoe_stats_swap(
3149 &fcport
->stats_ret
->fcoe
,
3150 &fcport
->stats
->fcoe
);
3152 bfa_os_gettimeofday(&tv
);
3153 fcport
->stats_ret
->fcoe
.secs_reset
=
3154 tv
.tv_sec
- fcport
->stats_reset_time
;
3157 fcport
->stats_cbfn(fcport
->stats_cbarg
, fcport
->stats_status
);
3159 fcport
->stats_busy
= BFA_FALSE
;
3160 fcport
->stats_status
= BFA_STATUS_OK
;
3165 bfa_fcport_stats_get_timeout(void *cbarg
)
3167 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3169 bfa_trc(fcport
->bfa
, fcport
->stats_qfull
);
3171 if (fcport
->stats_qfull
) {
3172 bfa_reqq_wcancel(&fcport
->stats_reqq_wait
);
3173 fcport
->stats_qfull
= BFA_FALSE
;
3176 fcport
->stats_status
= BFA_STATUS_ETIMER
;
3177 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
, __bfa_cb_fcport_stats_get
,
3182 bfa_fcport_send_stats_get(void *cbarg
)
3184 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3185 struct bfi_fcport_req_s
*msg
;
3187 msg
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3190 fcport
->stats_qfull
= BFA_TRUE
;
3191 bfa_reqq_winit(&fcport
->stats_reqq_wait
,
3192 bfa_fcport_send_stats_get
, fcport
);
3193 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
3194 &fcport
->stats_reqq_wait
);
3197 fcport
->stats_qfull
= BFA_FALSE
;
3199 memset(msg
, 0, sizeof(struct bfi_fcport_req_s
));
3200 bfi_h2i_set(msg
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_STATS_GET_REQ
,
3201 bfa_lpuid(fcport
->bfa
));
3202 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3206 __bfa_cb_fcport_stats_clr(void *cbarg
, bfa_boolean_t complete
)
3208 struct bfa_fcport_s
*fcport
= cbarg
;
3211 struct bfa_timeval_s tv
;
3214 * re-initialize time stamp for stats reset
3216 bfa_os_gettimeofday(&tv
);
3217 fcport
->stats_reset_time
= tv
.tv_sec
;
3219 fcport
->stats_cbfn(fcport
->stats_cbarg
, fcport
->stats_status
);
3221 fcport
->stats_busy
= BFA_FALSE
;
3222 fcport
->stats_status
= BFA_STATUS_OK
;
3227 bfa_fcport_stats_clr_timeout(void *cbarg
)
3229 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3231 bfa_trc(fcport
->bfa
, fcport
->stats_qfull
);
3233 if (fcport
->stats_qfull
) {
3234 bfa_reqq_wcancel(&fcport
->stats_reqq_wait
);
3235 fcport
->stats_qfull
= BFA_FALSE
;
3238 fcport
->stats_status
= BFA_STATUS_ETIMER
;
3239 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
,
3240 __bfa_cb_fcport_stats_clr
, fcport
);
3244 bfa_fcport_send_stats_clear(void *cbarg
)
3246 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3247 struct bfi_fcport_req_s
*msg
;
3249 msg
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3252 fcport
->stats_qfull
= BFA_TRUE
;
3253 bfa_reqq_winit(&fcport
->stats_reqq_wait
,
3254 bfa_fcport_send_stats_clear
, fcport
);
3255 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
3256 &fcport
->stats_reqq_wait
);
3259 fcport
->stats_qfull
= BFA_FALSE
;
3261 memset(msg
, 0, sizeof(struct bfi_fcport_req_s
));
3262 bfi_h2i_set(msg
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_STATS_CLEAR_REQ
,
3263 bfa_lpuid(fcport
->bfa
));
3264 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3268 * Handle trunk SCN event from firmware.
3271 bfa_trunk_scn(struct bfa_fcport_s
*fcport
, struct bfi_fcport_trunk_scn_s
*scn
)
3273 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
3274 struct bfi_fcport_trunk_link_s
*tlink
;
3275 struct bfa_trunk_link_attr_s
*lattr
;
3276 enum bfa_trunk_state state_prev
;
3280 bfa_trc(fcport
->bfa
, fcport
->cfg
.trunked
);
3281 bfa_assert(scn
->trunk_state
== BFA_TRUNK_ONLINE
||
3282 scn
->trunk_state
== BFA_TRUNK_OFFLINE
);
3284 bfa_trc(fcport
->bfa
, trunk
->attr
.state
);
3285 bfa_trc(fcport
->bfa
, scn
->trunk_state
);
3286 bfa_trc(fcport
->bfa
, scn
->trunk_speed
);
3289 * Save off new state for trunk attribute query
3291 state_prev
= trunk
->attr
.state
;
3292 if (fcport
->cfg
.trunked
&& (trunk
->attr
.state
!= BFA_TRUNK_DISABLED
))
3293 trunk
->attr
.state
= scn
->trunk_state
;
3294 trunk
->attr
.speed
= scn
->trunk_speed
;
3295 for (i
= 0; i
< BFA_TRUNK_MAX_PORTS
; i
++) {
3296 lattr
= &trunk
->attr
.link_attr
[i
];
3297 tlink
= &scn
->tlink
[i
];
3299 lattr
->link_state
= tlink
->state
;
3300 lattr
->trunk_wwn
= tlink
->trunk_wwn
;
3301 lattr
->fctl
= tlink
->fctl
;
3302 lattr
->speed
= tlink
->speed
;
3303 lattr
->deskew
= be32_to_cpu(tlink
->deskew
);
3305 if (tlink
->state
== BFA_TRUNK_LINK_STATE_UP
) {
3306 fcport
->speed
= tlink
->speed
;
3307 fcport
->topology
= BFA_PORT_TOPOLOGY_P2P
;
3311 bfa_trc(fcport
->bfa
, lattr
->link_state
);
3312 bfa_trc(fcport
->bfa
, lattr
->trunk_wwn
);
3313 bfa_trc(fcport
->bfa
, lattr
->fctl
);
3314 bfa_trc(fcport
->bfa
, lattr
->speed
);
3315 bfa_trc(fcport
->bfa
, lattr
->deskew
);
3320 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3321 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk up(0,1)");
3324 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3325 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk up(-,1)");
3328 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3329 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk up(0,-)");
3332 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3333 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk down");
3337 * Notify upper layers if trunk state changed.
3339 if ((state_prev
!= trunk
->attr
.state
) ||
3340 (scn
->trunk_state
== BFA_TRUNK_OFFLINE
)) {
3341 bfa_fcport_scn(fcport
, (scn
->trunk_state
== BFA_TRUNK_ONLINE
) ?
3342 BFA_PORT_LINKUP
: BFA_PORT_LINKDOWN
, BFA_TRUE
);
3347 bfa_trunk_iocdisable(struct bfa_s
*bfa
)
3349 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3353 * In trunked mode, notify upper layers that link is down
3355 if (fcport
->cfg
.trunked
) {
3356 if (fcport
->trunk
.attr
.state
== BFA_TRUNK_ONLINE
)
3357 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_TRUE
);
3359 fcport
->trunk
.attr
.state
= BFA_TRUNK_OFFLINE
;
3360 fcport
->trunk
.attr
.speed
= BFA_PORT_SPEED_UNKNOWN
;
3361 for (i
= 0; i
< BFA_TRUNK_MAX_PORTS
; i
++) {
3362 fcport
->trunk
.attr
.link_attr
[i
].trunk_wwn
= 0;
3363 fcport
->trunk
.attr
.link_attr
[i
].fctl
=
3364 BFA_TRUNK_LINK_FCTL_NORMAL
;
3365 fcport
->trunk
.attr
.link_attr
[i
].link_state
=
3366 BFA_TRUNK_LINK_STATE_DN_LINKDN
;
3367 fcport
->trunk
.attr
.link_attr
[i
].speed
=
3368 BFA_PORT_SPEED_UNKNOWN
;
3369 fcport
->trunk
.attr
.link_attr
[i
].deskew
= 0;
3381 * Called to initialize port attributes
3384 bfa_fcport_init(struct bfa_s
*bfa
)
3386 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3389 * Initialize port attributes from IOC hardware data.
3391 bfa_fcport_set_wwns(fcport
);
3392 if (fcport
->cfg
.maxfrsize
== 0)
3393 fcport
->cfg
.maxfrsize
= bfa_ioc_maxfrsize(&bfa
->ioc
);
3394 fcport
->cfg
.rx_bbcredit
= bfa_ioc_rx_bbcredit(&bfa
->ioc
);
3395 fcport
->speed_sup
= bfa_ioc_speed_sup(&bfa
->ioc
);
3397 bfa_assert(fcport
->cfg
.maxfrsize
);
3398 bfa_assert(fcport
->cfg
.rx_bbcredit
);
3399 bfa_assert(fcport
->speed_sup
);
3403 * Firmware message handler.
3406 bfa_fcport_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*msg
)
3408 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3409 union bfi_fcport_i2h_msg_u i2hmsg
;
3412 fcport
->event_arg
.i2hmsg
= i2hmsg
;
3414 bfa_trc(bfa
, msg
->mhdr
.msg_id
);
3415 bfa_trc(bfa
, bfa_sm_to_state(hal_port_sm_table
, fcport
->sm
));
3417 switch (msg
->mhdr
.msg_id
) {
3418 case BFI_FCPORT_I2H_ENABLE_RSP
:
3419 if (fcport
->msgtag
== i2hmsg
.penable_rsp
->msgtag
)
3420 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_FWRSP
);
3423 case BFI_FCPORT_I2H_DISABLE_RSP
:
3424 if (fcport
->msgtag
== i2hmsg
.penable_rsp
->msgtag
)
3425 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_FWRSP
);
3428 case BFI_FCPORT_I2H_EVENT
:
3429 if (i2hmsg
.event
->link_state
.linkstate
== BFA_PORT_LINKUP
)
3430 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_LINKUP
);
3432 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_LINKDOWN
);
3435 case BFI_FCPORT_I2H_TRUNK_SCN
:
3436 bfa_trunk_scn(fcport
, i2hmsg
.trunk_scn
);
3439 case BFI_FCPORT_I2H_STATS_GET_RSP
:
3441 * check for timer pop before processing the rsp
3443 if (fcport
->stats_busy
== BFA_FALSE
||
3444 fcport
->stats_status
== BFA_STATUS_ETIMER
)
3447 bfa_timer_stop(&fcport
->timer
);
3448 fcport
->stats_status
= i2hmsg
.pstatsget_rsp
->status
;
3449 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
,
3450 __bfa_cb_fcport_stats_get
, fcport
);
3453 case BFI_FCPORT_I2H_STATS_CLEAR_RSP
:
3455 * check for timer pop before processing the rsp
3457 if (fcport
->stats_busy
== BFA_FALSE
||
3458 fcport
->stats_status
== BFA_STATUS_ETIMER
)
3461 bfa_timer_stop(&fcport
->timer
);
3462 fcport
->stats_status
= BFA_STATUS_OK
;
3463 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
,
3464 __bfa_cb_fcport_stats_clr
, fcport
);
3467 case BFI_FCPORT_I2H_ENABLE_AEN
:
3468 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_ENABLE
);
3471 case BFI_FCPORT_I2H_DISABLE_AEN
:
3472 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_DISABLE
);
3488 * Registered callback for port events.
3491 bfa_fcport_event_register(struct bfa_s
*bfa
,
3492 void (*cbfn
) (void *cbarg
,
3493 enum bfa_port_linkstate event
),
3496 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3498 fcport
->event_cbfn
= cbfn
;
3499 fcport
->event_cbarg
= cbarg
;
3503 bfa_fcport_enable(struct bfa_s
*bfa
)
3505 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3507 if (bfa_ioc_is_disabled(&bfa
->ioc
))
3508 return BFA_STATUS_IOC_DISABLED
;
3510 if (fcport
->diag_busy
)
3511 return BFA_STATUS_DIAG_BUSY
;
3513 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_ENABLE
);
3514 return BFA_STATUS_OK
;
3518 bfa_fcport_disable(struct bfa_s
*bfa
)
3521 if (bfa_ioc_is_disabled(&bfa
->ioc
))
3522 return BFA_STATUS_IOC_DISABLED
;
3524 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_DISABLE
);
3525 return BFA_STATUS_OK
;
3529 * Configure port speed.
3532 bfa_fcport_cfg_speed(struct bfa_s
*bfa
, enum bfa_port_speed speed
)
3534 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3536 bfa_trc(bfa
, speed
);
3538 if (fcport
->cfg
.trunked
== BFA_TRUE
)
3539 return BFA_STATUS_TRUNK_ENABLED
;
3540 if ((speed
!= BFA_PORT_SPEED_AUTO
) && (speed
> fcport
->speed_sup
)) {
3541 bfa_trc(bfa
, fcport
->speed_sup
);
3542 return BFA_STATUS_UNSUPP_SPEED
;
3545 fcport
->cfg
.speed
= speed
;
3547 return BFA_STATUS_OK
;
3551 * Get current speed.
3554 bfa_fcport_get_speed(struct bfa_s
*bfa
)
3556 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3558 return fcport
->speed
;
3562 * Configure port topology.
3565 bfa_fcport_cfg_topology(struct bfa_s
*bfa
, enum bfa_port_topology topology
)
3567 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3569 bfa_trc(bfa
, topology
);
3570 bfa_trc(bfa
, fcport
->cfg
.topology
);
3573 case BFA_PORT_TOPOLOGY_P2P
:
3574 case BFA_PORT_TOPOLOGY_LOOP
:
3575 case BFA_PORT_TOPOLOGY_AUTO
:
3579 return BFA_STATUS_EINVAL
;
3582 fcport
->cfg
.topology
= topology
;
3583 return BFA_STATUS_OK
;
3587 * Get current topology.
3589 enum bfa_port_topology
3590 bfa_fcport_get_topology(struct bfa_s
*bfa
)
3592 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3594 return fcport
->topology
;
3598 bfa_fcport_cfg_hardalpa(struct bfa_s
*bfa
, u8 alpa
)
3600 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3603 bfa_trc(bfa
, fcport
->cfg
.cfg_hardalpa
);
3604 bfa_trc(bfa
, fcport
->cfg
.hardalpa
);
3606 fcport
->cfg
.cfg_hardalpa
= BFA_TRUE
;
3607 fcport
->cfg
.hardalpa
= alpa
;
3609 return BFA_STATUS_OK
;
3613 bfa_fcport_clr_hardalpa(struct bfa_s
*bfa
)
3615 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3617 bfa_trc(bfa
, fcport
->cfg
.cfg_hardalpa
);
3618 bfa_trc(bfa
, fcport
->cfg
.hardalpa
);
3620 fcport
->cfg
.cfg_hardalpa
= BFA_FALSE
;
3621 return BFA_STATUS_OK
;
3625 bfa_fcport_get_hardalpa(struct bfa_s
*bfa
, u8
*alpa
)
3627 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3629 *alpa
= fcport
->cfg
.hardalpa
;
3630 return fcport
->cfg
.cfg_hardalpa
;
3634 bfa_fcport_get_myalpa(struct bfa_s
*bfa
)
3636 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3638 return fcport
->myalpa
;
3642 bfa_fcport_cfg_maxfrsize(struct bfa_s
*bfa
, u16 maxfrsize
)
3644 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3646 bfa_trc(bfa
, maxfrsize
);
3647 bfa_trc(bfa
, fcport
->cfg
.maxfrsize
);
3650 if ((maxfrsize
> FC_MAX_PDUSZ
) || (maxfrsize
< FC_MIN_PDUSZ
))
3651 return BFA_STATUS_INVLD_DFSZ
;
3653 /* power of 2, if not the max frame size of 2112 */
3654 if ((maxfrsize
!= FC_MAX_PDUSZ
) && (maxfrsize
& (maxfrsize
- 1)))
3655 return BFA_STATUS_INVLD_DFSZ
;
3657 fcport
->cfg
.maxfrsize
= maxfrsize
;
3658 return BFA_STATUS_OK
;
3662 bfa_fcport_get_maxfrsize(struct bfa_s
*bfa
)
3664 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3666 return fcport
->cfg
.maxfrsize
;
3670 bfa_fcport_get_rx_bbcredit(struct bfa_s
*bfa
)
3672 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3674 return fcport
->cfg
.rx_bbcredit
;
3678 bfa_fcport_set_tx_bbcredit(struct bfa_s
*bfa
, u16 tx_bbcredit
)
3680 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3682 fcport
->cfg
.tx_bbcredit
= (u8
)tx_bbcredit
;
3683 bfa_fcport_send_txcredit(fcport
);
3687 * Get port attributes.
3691 bfa_fcport_get_wwn(struct bfa_s
*bfa
, bfa_boolean_t node
)
3693 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3695 return fcport
->nwwn
;
3697 return fcport
->pwwn
;
3701 bfa_fcport_get_attr(struct bfa_s
*bfa
, struct bfa_port_attr_s
*attr
)
3703 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3705 memset(attr
, 0, sizeof(struct bfa_port_attr_s
));
3707 attr
->nwwn
= fcport
->nwwn
;
3708 attr
->pwwn
= fcport
->pwwn
;
3710 attr
->factorypwwn
= bfa_ioc_get_mfg_pwwn(&bfa
->ioc
);
3711 attr
->factorynwwn
= bfa_ioc_get_mfg_nwwn(&bfa
->ioc
);
3713 memcpy(&attr
->pport_cfg
, &fcport
->cfg
,
3714 sizeof(struct bfa_port_cfg_s
));
3715 /* speed attributes */
3716 attr
->pport_cfg
.speed
= fcport
->cfg
.speed
;
3717 attr
->speed_supported
= fcport
->speed_sup
;
3718 attr
->speed
= fcport
->speed
;
3719 attr
->cos_supported
= FC_CLASS_3
;
3721 /* topology attributes */
3722 attr
->pport_cfg
.topology
= fcport
->cfg
.topology
;
3723 attr
->topology
= fcport
->topology
;
3724 attr
->pport_cfg
.trunked
= fcport
->cfg
.trunked
;
3726 /* beacon attributes */
3727 attr
->beacon
= fcport
->beacon
;
3728 attr
->link_e2e_beacon
= fcport
->link_e2e_beacon
;
3729 attr
->plog_enabled
= bfa_plog_get_setting(fcport
->bfa
->plog
);
3730 attr
->io_profile
= bfa_fcpim_get_io_profile(fcport
->bfa
);
3732 attr
->pport_cfg
.path_tov
= bfa_fcpim_path_tov_get(bfa
);
3733 attr
->pport_cfg
.q_depth
= bfa_fcpim_qdepth_get(bfa
);
3734 attr
->port_state
= bfa_sm_to_state(hal_port_sm_table
, fcport
->sm
);
3735 if (bfa_ioc_is_disabled(&fcport
->bfa
->ioc
))
3736 attr
->port_state
= BFA_PORT_ST_IOCDIS
;
3737 else if (bfa_ioc_fw_mismatch(&fcport
->bfa
->ioc
))
3738 attr
->port_state
= BFA_PORT_ST_FWMISMATCH
;
3741 attr
->fcoe_vlan
= fcport
->fcoe_vlan
;
3744 #define BFA_FCPORT_STATS_TOV 1000
3747 * Fetch port statistics (FCQoS or FCoE).
3750 bfa_fcport_get_stats(struct bfa_s
*bfa
, union bfa_fcport_stats_u
*stats
,
3751 bfa_cb_port_t cbfn
, void *cbarg
)
3753 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3755 if (fcport
->stats_busy
) {
3756 bfa_trc(bfa
, fcport
->stats_busy
);
3757 return BFA_STATUS_DEVBUSY
;
3760 fcport
->stats_busy
= BFA_TRUE
;
3761 fcport
->stats_ret
= stats
;
3762 fcport
->stats_cbfn
= cbfn
;
3763 fcport
->stats_cbarg
= cbarg
;
3765 bfa_fcport_send_stats_get(fcport
);
3767 bfa_timer_start(bfa
, &fcport
->timer
, bfa_fcport_stats_get_timeout
,
3768 fcport
, BFA_FCPORT_STATS_TOV
);
3769 return BFA_STATUS_OK
;
3773 * Reset port statistics (FCQoS or FCoE).
3776 bfa_fcport_clear_stats(struct bfa_s
*bfa
, bfa_cb_port_t cbfn
, void *cbarg
)
3778 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3780 if (fcport
->stats_busy
) {
3781 bfa_trc(bfa
, fcport
->stats_busy
);
3782 return BFA_STATUS_DEVBUSY
;
3785 fcport
->stats_busy
= BFA_TRUE
;
3786 fcport
->stats_cbfn
= cbfn
;
3787 fcport
->stats_cbarg
= cbarg
;
3789 bfa_fcport_send_stats_clear(fcport
);
3791 bfa_timer_start(bfa
, &fcport
->timer
, bfa_fcport_stats_clr_timeout
,
3792 fcport
, BFA_FCPORT_STATS_TOV
);
3793 return BFA_STATUS_OK
;
3798 * Fetch port attributes.
3801 bfa_fcport_is_disabled(struct bfa_s
*bfa
)
3803 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3805 return bfa_sm_to_state(hal_port_sm_table
, fcport
->sm
) ==
3806 BFA_PORT_ST_DISABLED
;
3811 bfa_fcport_is_ratelim(struct bfa_s
*bfa
)
3813 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3815 return fcport
->cfg
.ratelimit
? BFA_TRUE
: BFA_FALSE
;
3821 * Get default minimum ratelim speed
3824 bfa_fcport_get_ratelim_speed(struct bfa_s
*bfa
)
3826 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3828 bfa_trc(bfa
, fcport
->cfg
.trl_def_speed
);
3829 return fcport
->cfg
.trl_def_speed
;
3834 bfa_fcport_is_linkup(struct bfa_s
*bfa
)
3836 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3838 return (!fcport
->cfg
.trunked
&&
3839 bfa_sm_cmp_state(fcport
, bfa_fcport_sm_linkup
)) ||
3840 (fcport
->cfg
.trunked
&&
3841 fcport
->trunk
.attr
.state
== BFA_TRUNK_ONLINE
);
3845 bfa_fcport_is_qos_enabled(struct bfa_s
*bfa
)
3847 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3849 return fcport
->cfg
.qos_enabled
;
3853 * Rport State machine functions
3856 * Beginning state, only online event expected.
3859 bfa_rport_sm_uninit(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3861 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3862 bfa_trc(rp
->bfa
, event
);
3865 case BFA_RPORT_SM_CREATE
:
3866 bfa_stats(rp
, sm_un_cr
);
3867 bfa_sm_set_state(rp
, bfa_rport_sm_created
);
3871 bfa_stats(rp
, sm_un_unexp
);
3872 bfa_sm_fault(rp
->bfa
, event
);
3877 bfa_rport_sm_created(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3879 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3880 bfa_trc(rp
->bfa
, event
);
3883 case BFA_RPORT_SM_ONLINE
:
3884 bfa_stats(rp
, sm_cr_on
);
3885 if (bfa_rport_send_fwcreate(rp
))
3886 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
3888 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
3891 case BFA_RPORT_SM_DELETE
:
3892 bfa_stats(rp
, sm_cr_del
);
3893 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
3897 case BFA_RPORT_SM_HWFAIL
:
3898 bfa_stats(rp
, sm_cr_hwf
);
3899 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
3903 bfa_stats(rp
, sm_cr_unexp
);
3904 bfa_sm_fault(rp
->bfa
, event
);
3909 * Waiting for rport create response from firmware.
3912 bfa_rport_sm_fwcreate(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3914 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3915 bfa_trc(rp
->bfa
, event
);
3918 case BFA_RPORT_SM_FWRSP
:
3919 bfa_stats(rp
, sm_fwc_rsp
);
3920 bfa_sm_set_state(rp
, bfa_rport_sm_online
);
3921 bfa_rport_online_cb(rp
);
3924 case BFA_RPORT_SM_DELETE
:
3925 bfa_stats(rp
, sm_fwc_del
);
3926 bfa_sm_set_state(rp
, bfa_rport_sm_delete_pending
);
3929 case BFA_RPORT_SM_OFFLINE
:
3930 bfa_stats(rp
, sm_fwc_off
);
3931 bfa_sm_set_state(rp
, bfa_rport_sm_offline_pending
);
3934 case BFA_RPORT_SM_HWFAIL
:
3935 bfa_stats(rp
, sm_fwc_hwf
);
3936 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
3940 bfa_stats(rp
, sm_fwc_unexp
);
3941 bfa_sm_fault(rp
->bfa
, event
);
3946 * Request queue is full, awaiting queue resume to send create request.
3949 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3951 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3952 bfa_trc(rp
->bfa
, event
);
3955 case BFA_RPORT_SM_QRESUME
:
3956 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
3957 bfa_rport_send_fwcreate(rp
);
3960 case BFA_RPORT_SM_DELETE
:
3961 bfa_stats(rp
, sm_fwc_del
);
3962 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
3963 bfa_reqq_wcancel(&rp
->reqq_wait
);
3967 case BFA_RPORT_SM_OFFLINE
:
3968 bfa_stats(rp
, sm_fwc_off
);
3969 bfa_sm_set_state(rp
, bfa_rport_sm_offline
);
3970 bfa_reqq_wcancel(&rp
->reqq_wait
);
3971 bfa_rport_offline_cb(rp
);
3974 case BFA_RPORT_SM_HWFAIL
:
3975 bfa_stats(rp
, sm_fwc_hwf
);
3976 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
3977 bfa_reqq_wcancel(&rp
->reqq_wait
);
3981 bfa_stats(rp
, sm_fwc_unexp
);
3982 bfa_sm_fault(rp
->bfa
, event
);
3987 * Online state - normal parking state.
3990 bfa_rport_sm_online(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3992 struct bfi_rport_qos_scn_s
*qos_scn
;
3994 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3995 bfa_trc(rp
->bfa
, event
);
3998 case BFA_RPORT_SM_OFFLINE
:
3999 bfa_stats(rp
, sm_on_off
);
4000 if (bfa_rport_send_fwdelete(rp
))
4001 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
4003 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete_qfull
);
4006 case BFA_RPORT_SM_DELETE
:
4007 bfa_stats(rp
, sm_on_del
);
4008 if (bfa_rport_send_fwdelete(rp
))
4009 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
4011 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
4014 case BFA_RPORT_SM_HWFAIL
:
4015 bfa_stats(rp
, sm_on_hwf
);
4016 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4019 case BFA_RPORT_SM_SET_SPEED
:
4020 bfa_rport_send_fwspeed(rp
);
4023 case BFA_RPORT_SM_QOS_SCN
:
4024 qos_scn
= (struct bfi_rport_qos_scn_s
*) rp
->event_arg
.fw_msg
;
4025 rp
->qos_attr
= qos_scn
->new_qos_attr
;
4026 bfa_trc(rp
->bfa
, qos_scn
->old_qos_attr
.qos_flow_id
);
4027 bfa_trc(rp
->bfa
, qos_scn
->new_qos_attr
.qos_flow_id
);
4028 bfa_trc(rp
->bfa
, qos_scn
->old_qos_attr
.qos_priority
);
4029 bfa_trc(rp
->bfa
, qos_scn
->new_qos_attr
.qos_priority
);
4031 qos_scn
->old_qos_attr
.qos_flow_id
=
4032 be32_to_cpu(qos_scn
->old_qos_attr
.qos_flow_id
);
4033 qos_scn
->new_qos_attr
.qos_flow_id
=
4034 be32_to_cpu(qos_scn
->new_qos_attr
.qos_flow_id
);
4036 if (qos_scn
->old_qos_attr
.qos_flow_id
!=
4037 qos_scn
->new_qos_attr
.qos_flow_id
)
4038 bfa_cb_rport_qos_scn_flowid(rp
->rport_drv
,
4039 qos_scn
->old_qos_attr
,
4040 qos_scn
->new_qos_attr
);
4041 if (qos_scn
->old_qos_attr
.qos_priority
!=
4042 qos_scn
->new_qos_attr
.qos_priority
)
4043 bfa_cb_rport_qos_scn_prio(rp
->rport_drv
,
4044 qos_scn
->old_qos_attr
,
4045 qos_scn
->new_qos_attr
);
4049 bfa_stats(rp
, sm_on_unexp
);
4050 bfa_sm_fault(rp
->bfa
, event
);
4055 * Firmware rport is being deleted - awaiting f/w response.
4058 bfa_rport_sm_fwdelete(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4060 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4061 bfa_trc(rp
->bfa
, event
);
4064 case BFA_RPORT_SM_FWRSP
:
4065 bfa_stats(rp
, sm_fwd_rsp
);
4066 bfa_sm_set_state(rp
, bfa_rport_sm_offline
);
4067 bfa_rport_offline_cb(rp
);
4070 case BFA_RPORT_SM_DELETE
:
4071 bfa_stats(rp
, sm_fwd_del
);
4072 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
4075 case BFA_RPORT_SM_HWFAIL
:
4076 bfa_stats(rp
, sm_fwd_hwf
);
4077 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4078 bfa_rport_offline_cb(rp
);
4082 bfa_stats(rp
, sm_fwd_unexp
);
4083 bfa_sm_fault(rp
->bfa
, event
);
4088 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4090 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4091 bfa_trc(rp
->bfa
, event
);
4094 case BFA_RPORT_SM_QRESUME
:
4095 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
4096 bfa_rport_send_fwdelete(rp
);
4099 case BFA_RPORT_SM_DELETE
:
4100 bfa_stats(rp
, sm_fwd_del
);
4101 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
4104 case BFA_RPORT_SM_HWFAIL
:
4105 bfa_stats(rp
, sm_fwd_hwf
);
4106 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4107 bfa_reqq_wcancel(&rp
->reqq_wait
);
4108 bfa_rport_offline_cb(rp
);
4112 bfa_stats(rp
, sm_fwd_unexp
);
4113 bfa_sm_fault(rp
->bfa
, event
);
4121 bfa_rport_sm_offline(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4123 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4124 bfa_trc(rp
->bfa
, event
);
4127 case BFA_RPORT_SM_DELETE
:
4128 bfa_stats(rp
, sm_off_del
);
4129 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4133 case BFA_RPORT_SM_ONLINE
:
4134 bfa_stats(rp
, sm_off_on
);
4135 if (bfa_rport_send_fwcreate(rp
))
4136 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
4138 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
4141 case BFA_RPORT_SM_HWFAIL
:
4142 bfa_stats(rp
, sm_off_hwf
);
4143 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4147 bfa_stats(rp
, sm_off_unexp
);
4148 bfa_sm_fault(rp
->bfa
, event
);
4153 * Rport is deleted, waiting for firmware response to delete.
4156 bfa_rport_sm_deleting(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4158 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4159 bfa_trc(rp
->bfa
, event
);
4162 case BFA_RPORT_SM_FWRSP
:
4163 bfa_stats(rp
, sm_del_fwrsp
);
4164 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4168 case BFA_RPORT_SM_HWFAIL
:
4169 bfa_stats(rp
, sm_del_hwf
);
4170 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4175 bfa_sm_fault(rp
->bfa
, event
);
4180 bfa_rport_sm_deleting_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4182 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4183 bfa_trc(rp
->bfa
, event
);
4186 case BFA_RPORT_SM_QRESUME
:
4187 bfa_stats(rp
, sm_del_fwrsp
);
4188 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
4189 bfa_rport_send_fwdelete(rp
);
4192 case BFA_RPORT_SM_HWFAIL
:
4193 bfa_stats(rp
, sm_del_hwf
);
4194 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4195 bfa_reqq_wcancel(&rp
->reqq_wait
);
4200 bfa_sm_fault(rp
->bfa
, event
);
4205 * Waiting for rport create response from firmware. A delete is pending.
4208 bfa_rport_sm_delete_pending(struct bfa_rport_s
*rp
,
4209 enum bfa_rport_event event
)
4211 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4212 bfa_trc(rp
->bfa
, event
);
4215 case BFA_RPORT_SM_FWRSP
:
4216 bfa_stats(rp
, sm_delp_fwrsp
);
4217 if (bfa_rport_send_fwdelete(rp
))
4218 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
4220 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
4223 case BFA_RPORT_SM_HWFAIL
:
4224 bfa_stats(rp
, sm_delp_hwf
);
4225 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4230 bfa_stats(rp
, sm_delp_unexp
);
4231 bfa_sm_fault(rp
->bfa
, event
);
4236 * Waiting for rport create response from firmware. Rport offline is pending.
4239 bfa_rport_sm_offline_pending(struct bfa_rport_s
*rp
,
4240 enum bfa_rport_event event
)
4242 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4243 bfa_trc(rp
->bfa
, event
);
4246 case BFA_RPORT_SM_FWRSP
:
4247 bfa_stats(rp
, sm_offp_fwrsp
);
4248 if (bfa_rport_send_fwdelete(rp
))
4249 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
4251 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete_qfull
);
4254 case BFA_RPORT_SM_DELETE
:
4255 bfa_stats(rp
, sm_offp_del
);
4256 bfa_sm_set_state(rp
, bfa_rport_sm_delete_pending
);
4259 case BFA_RPORT_SM_HWFAIL
:
4260 bfa_stats(rp
, sm_offp_hwf
);
4261 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4265 bfa_stats(rp
, sm_offp_unexp
);
4266 bfa_sm_fault(rp
->bfa
, event
);
4274 bfa_rport_sm_iocdisable(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4276 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4277 bfa_trc(rp
->bfa
, event
);
4280 case BFA_RPORT_SM_OFFLINE
:
4281 bfa_stats(rp
, sm_iocd_off
);
4282 bfa_rport_offline_cb(rp
);
4285 case BFA_RPORT_SM_DELETE
:
4286 bfa_stats(rp
, sm_iocd_del
);
4287 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4291 case BFA_RPORT_SM_ONLINE
:
4292 bfa_stats(rp
, sm_iocd_on
);
4293 if (bfa_rport_send_fwcreate(rp
))
4294 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
4296 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
4299 case BFA_RPORT_SM_HWFAIL
:
4303 bfa_stats(rp
, sm_iocd_unexp
);
4304 bfa_sm_fault(rp
->bfa
, event
);
4311 * bfa_rport_private BFA rport private functions
4315 __bfa_cb_rport_online(void *cbarg
, bfa_boolean_t complete
)
4317 struct bfa_rport_s
*rp
= cbarg
;
4320 bfa_cb_rport_online(rp
->rport_drv
);
4324 __bfa_cb_rport_offline(void *cbarg
, bfa_boolean_t complete
)
4326 struct bfa_rport_s
*rp
= cbarg
;
4329 bfa_cb_rport_offline(rp
->rport_drv
);
4333 bfa_rport_qresume(void *cbarg
)
4335 struct bfa_rport_s
*rp
= cbarg
;
4337 bfa_sm_send_event(rp
, BFA_RPORT_SM_QRESUME
);
4341 bfa_rport_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
4344 if (cfg
->fwcfg
.num_rports
< BFA_RPORT_MIN
)
4345 cfg
->fwcfg
.num_rports
= BFA_RPORT_MIN
;
4347 *km_len
+= cfg
->fwcfg
.num_rports
* sizeof(struct bfa_rport_s
);
4351 bfa_rport_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
4352 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
4354 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(bfa
);
4355 struct bfa_rport_s
*rp
;
4358 INIT_LIST_HEAD(&mod
->rp_free_q
);
4359 INIT_LIST_HEAD(&mod
->rp_active_q
);
4361 rp
= (struct bfa_rport_s
*) bfa_meminfo_kva(meminfo
);
4363 mod
->num_rports
= cfg
->fwcfg
.num_rports
;
4365 bfa_assert(mod
->num_rports
&&
4366 !(mod
->num_rports
& (mod
->num_rports
- 1)));
4368 for (i
= 0; i
< mod
->num_rports
; i
++, rp
++) {
4369 memset(rp
, 0, sizeof(struct bfa_rport_s
));
4372 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4378 list_add_tail(&rp
->qe
, &mod
->rp_free_q
);
4380 bfa_reqq_winit(&rp
->reqq_wait
, bfa_rport_qresume
, rp
);
4386 bfa_meminfo_kva(meminfo
) = (u8
*) rp
;
4390 bfa_rport_detach(struct bfa_s
*bfa
)
4395 bfa_rport_start(struct bfa_s
*bfa
)
4400 bfa_rport_stop(struct bfa_s
*bfa
)
4405 bfa_rport_iocdisable(struct bfa_s
*bfa
)
4407 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(bfa
);
4408 struct bfa_rport_s
*rport
;
4409 struct list_head
*qe
, *qen
;
4411 list_for_each_safe(qe
, qen
, &mod
->rp_active_q
) {
4412 rport
= (struct bfa_rport_s
*) qe
;
4413 bfa_sm_send_event(rport
, BFA_RPORT_SM_HWFAIL
);
4417 static struct bfa_rport_s
*
4418 bfa_rport_alloc(struct bfa_rport_mod_s
*mod
)
4420 struct bfa_rport_s
*rport
;
4422 bfa_q_deq(&mod
->rp_free_q
, &rport
);
4424 list_add_tail(&rport
->qe
, &mod
->rp_active_q
);
4430 bfa_rport_free(struct bfa_rport_s
*rport
)
4432 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(rport
->bfa
);
4434 bfa_assert(bfa_q_is_on_q(&mod
->rp_active_q
, rport
));
4435 list_del(&rport
->qe
);
4436 list_add_tail(&rport
->qe
, &mod
->rp_free_q
);
4439 static bfa_boolean_t
4440 bfa_rport_send_fwcreate(struct bfa_rport_s
*rp
)
4442 struct bfi_rport_create_req_s
*m
;
4445 * check for room in queue to send request now
4447 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
4449 bfa_reqq_wait(rp
->bfa
, BFA_REQQ_RPORT
, &rp
->reqq_wait
);
4453 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_CREATE_REQ
,
4454 bfa_lpuid(rp
->bfa
));
4455 m
->bfa_handle
= rp
->rport_tag
;
4456 m
->max_frmsz
= cpu_to_be16(rp
->rport_info
.max_frmsz
);
4457 m
->pid
= rp
->rport_info
.pid
;
4458 m
->lp_tag
= rp
->rport_info
.lp_tag
;
4459 m
->local_pid
= rp
->rport_info
.local_pid
;
4460 m
->fc_class
= rp
->rport_info
.fc_class
;
4461 m
->vf_en
= rp
->rport_info
.vf_en
;
4462 m
->vf_id
= rp
->rport_info
.vf_id
;
4463 m
->cisc
= rp
->rport_info
.cisc
;
4466 * queue I/O message to firmware
4468 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
4472 static bfa_boolean_t
4473 bfa_rport_send_fwdelete(struct bfa_rport_s
*rp
)
4475 struct bfi_rport_delete_req_s
*m
;
4478 * check for room in queue to send request now
4480 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
4482 bfa_reqq_wait(rp
->bfa
, BFA_REQQ_RPORT
, &rp
->reqq_wait
);
4486 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_DELETE_REQ
,
4487 bfa_lpuid(rp
->bfa
));
4488 m
->fw_handle
= rp
->fw_handle
;
4491 * queue I/O message to firmware
4493 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
4497 static bfa_boolean_t
4498 bfa_rport_send_fwspeed(struct bfa_rport_s
*rp
)
4500 struct bfa_rport_speed_req_s
*m
;
4503 * check for room in queue to send request now
4505 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
4507 bfa_trc(rp
->bfa
, rp
->rport_info
.speed
);
4511 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_SET_SPEED_REQ
,
4512 bfa_lpuid(rp
->bfa
));
4513 m
->fw_handle
= rp
->fw_handle
;
4514 m
->speed
= (u8
)rp
->rport_info
.speed
;
4517 * queue I/O message to firmware
4519 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
4530 * Rport interrupt processing.
4533 bfa_rport_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
4535 union bfi_rport_i2h_msg_u msg
;
4536 struct bfa_rport_s
*rp
;
4538 bfa_trc(bfa
, m
->mhdr
.msg_id
);
4542 switch (m
->mhdr
.msg_id
) {
4543 case BFI_RPORT_I2H_CREATE_RSP
:
4544 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.create_rsp
->bfa_handle
);
4545 rp
->fw_handle
= msg
.create_rsp
->fw_handle
;
4546 rp
->qos_attr
= msg
.create_rsp
->qos_attr
;
4547 bfa_assert(msg
.create_rsp
->status
== BFA_STATUS_OK
);
4548 bfa_sm_send_event(rp
, BFA_RPORT_SM_FWRSP
);
4551 case BFI_RPORT_I2H_DELETE_RSP
:
4552 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.delete_rsp
->bfa_handle
);
4553 bfa_assert(msg
.delete_rsp
->status
== BFA_STATUS_OK
);
4554 bfa_sm_send_event(rp
, BFA_RPORT_SM_FWRSP
);
4557 case BFI_RPORT_I2H_QOS_SCN
:
4558 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.qos_scn_evt
->bfa_handle
);
4559 rp
->event_arg
.fw_msg
= msg
.qos_scn_evt
;
4560 bfa_sm_send_event(rp
, BFA_RPORT_SM_QOS_SCN
);
4564 bfa_trc(bfa
, m
->mhdr
.msg_id
);
4575 struct bfa_rport_s
*
4576 bfa_rport_create(struct bfa_s
*bfa
, void *rport_drv
)
4578 struct bfa_rport_s
*rp
;
4580 rp
= bfa_rport_alloc(BFA_RPORT_MOD(bfa
));
4586 rp
->rport_drv
= rport_drv
;
4587 bfa_rport_clear_stats(rp
);
4589 bfa_assert(bfa_sm_cmp_state(rp
, bfa_rport_sm_uninit
));
4590 bfa_sm_send_event(rp
, BFA_RPORT_SM_CREATE
);
4596 bfa_rport_delete(struct bfa_rport_s
*rport
)
4598 bfa_sm_send_event(rport
, BFA_RPORT_SM_DELETE
);
4602 bfa_rport_online(struct bfa_rport_s
*rport
, struct bfa_rport_info_s
*rport_info
)
4604 bfa_assert(rport_info
->max_frmsz
!= 0);
4607 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4608 * responses. Default to minimum size.
4610 if (rport_info
->max_frmsz
== 0) {
4611 bfa_trc(rport
->bfa
, rport
->rport_tag
);
4612 rport_info
->max_frmsz
= FC_MIN_PDUSZ
;
4615 rport
->rport_info
= *rport_info
;
4616 bfa_sm_send_event(rport
, BFA_RPORT_SM_ONLINE
);
4620 bfa_rport_offline(struct bfa_rport_s
*rport
)
4622 bfa_sm_send_event(rport
, BFA_RPORT_SM_OFFLINE
);
4626 bfa_rport_speed(struct bfa_rport_s
*rport
, enum bfa_port_speed speed
)
4628 bfa_assert(speed
!= 0);
4629 bfa_assert(speed
!= BFA_PORT_SPEED_AUTO
);
4631 rport
->rport_info
.speed
= speed
;
4632 bfa_sm_send_event(rport
, BFA_RPORT_SM_SET_SPEED
);
4636 bfa_rport_clear_stats(struct bfa_rport_s
*rport
)
4638 memset(&rport
->stats
, 0, sizeof(rport
->stats
));
4643 * SGPG related functions
4647 * Compute and return memory needed by FCP(im) module.
4650 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
4653 if (cfg
->drvcfg
.num_sgpgs
< BFA_SGPG_MIN
)
4654 cfg
->drvcfg
.num_sgpgs
= BFA_SGPG_MIN
;
4656 *km_len
+= (cfg
->drvcfg
.num_sgpgs
+ 1) * sizeof(struct bfa_sgpg_s
);
4657 *dm_len
+= (cfg
->drvcfg
.num_sgpgs
+ 1) * sizeof(struct bfi_sgpg_s
);
4662 bfa_sgpg_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
4663 struct bfa_meminfo_s
*minfo
, struct bfa_pcidev_s
*pcidev
)
4665 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4667 struct bfa_sgpg_s
*hsgpg
;
4668 struct bfi_sgpg_s
*sgpg
;
4673 union bfi_addr_u addr
;
4674 } sgpg_pa
, sgpg_pa_tmp
;
4676 INIT_LIST_HEAD(&mod
->sgpg_q
);
4677 INIT_LIST_HEAD(&mod
->sgpg_wait_q
);
4679 bfa_trc(bfa
, cfg
->drvcfg
.num_sgpgs
);
4681 mod
->num_sgpgs
= cfg
->drvcfg
.num_sgpgs
;
4682 mod
->sgpg_arr_pa
= bfa_meminfo_dma_phys(minfo
);
4683 align_len
= (BFA_SGPG_ROUNDUP(mod
->sgpg_arr_pa
) - mod
->sgpg_arr_pa
);
4684 mod
->sgpg_arr_pa
+= align_len
;
4685 mod
->hsgpg_arr
= (struct bfa_sgpg_s
*) (bfa_meminfo_kva(minfo
) +
4687 mod
->sgpg_arr
= (struct bfi_sgpg_s
*) (bfa_meminfo_dma_virt(minfo
) +
4690 hsgpg
= mod
->hsgpg_arr
;
4691 sgpg
= mod
->sgpg_arr
;
4692 sgpg_pa
.pa
= mod
->sgpg_arr_pa
;
4693 mod
->free_sgpgs
= mod
->num_sgpgs
;
4695 bfa_assert(!(sgpg_pa
.pa
& (sizeof(struct bfi_sgpg_s
) - 1)));
4697 for (i
= 0; i
< mod
->num_sgpgs
; i
++) {
4698 memset(hsgpg
, 0, sizeof(*hsgpg
));
4699 memset(sgpg
, 0, sizeof(*sgpg
));
4702 sgpg_pa_tmp
.pa
= bfa_sgaddr_le(sgpg_pa
.pa
);
4703 hsgpg
->sgpg_pa
= sgpg_pa_tmp
.addr
;
4704 list_add_tail(&hsgpg
->qe
, &mod
->sgpg_q
);
4708 sgpg_pa
.pa
+= sizeof(struct bfi_sgpg_s
);
4711 bfa_meminfo_kva(minfo
) = (u8
*) hsgpg
;
4712 bfa_meminfo_dma_virt(minfo
) = (u8
*) sgpg
;
4713 bfa_meminfo_dma_phys(minfo
) = sgpg_pa
.pa
;
4717 bfa_sgpg_detach(struct bfa_s
*bfa
)
4722 bfa_sgpg_start(struct bfa_s
*bfa
)
4727 bfa_sgpg_stop(struct bfa_s
*bfa
)
4732 bfa_sgpg_iocdisable(struct bfa_s
*bfa
)
4739 * hal_sgpg_public BFA SGPG public functions
4743 bfa_sgpg_malloc(struct bfa_s
*bfa
, struct list_head
*sgpg_q
, int nsgpgs
)
4745 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4746 struct bfa_sgpg_s
*hsgpg
;
4749 bfa_trc_fp(bfa
, nsgpgs
);
4751 if (mod
->free_sgpgs
< nsgpgs
)
4752 return BFA_STATUS_ENOMEM
;
4754 for (i
= 0; i
< nsgpgs
; i
++) {
4755 bfa_q_deq(&mod
->sgpg_q
, &hsgpg
);
4757 list_add_tail(&hsgpg
->qe
, sgpg_q
);
4760 mod
->free_sgpgs
-= nsgpgs
;
4761 return BFA_STATUS_OK
;
4765 bfa_sgpg_mfree(struct bfa_s
*bfa
, struct list_head
*sgpg_q
, int nsgpg
)
4767 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4768 struct bfa_sgpg_wqe_s
*wqe
;
4770 bfa_trc_fp(bfa
, nsgpg
);
4772 mod
->free_sgpgs
+= nsgpg
;
4773 bfa_assert(mod
->free_sgpgs
<= mod
->num_sgpgs
);
4775 list_splice_tail_init(sgpg_q
, &mod
->sgpg_q
);
4777 if (list_empty(&mod
->sgpg_wait_q
))
4781 * satisfy as many waiting requests as possible
4784 wqe
= bfa_q_first(&mod
->sgpg_wait_q
);
4785 if (mod
->free_sgpgs
< wqe
->nsgpg
)
4786 nsgpg
= mod
->free_sgpgs
;
4789 bfa_sgpg_malloc(bfa
, &wqe
->sgpg_q
, nsgpg
);
4790 wqe
->nsgpg
-= nsgpg
;
4791 if (wqe
->nsgpg
== 0) {
4793 wqe
->cbfn(wqe
->cbarg
);
4795 } while (mod
->free_sgpgs
&& !list_empty(&mod
->sgpg_wait_q
));
4799 bfa_sgpg_wait(struct bfa_s
*bfa
, struct bfa_sgpg_wqe_s
*wqe
, int nsgpg
)
4801 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4803 bfa_assert(nsgpg
> 0);
4804 bfa_assert(nsgpg
> mod
->free_sgpgs
);
4806 wqe
->nsgpg_total
= wqe
->nsgpg
= nsgpg
;
4809 * allocate any left to this one first
4811 if (mod
->free_sgpgs
) {
4813 * no one else is waiting for SGPG
4815 bfa_assert(list_empty(&mod
->sgpg_wait_q
));
4816 list_splice_tail_init(&mod
->sgpg_q
, &wqe
->sgpg_q
);
4817 wqe
->nsgpg
-= mod
->free_sgpgs
;
4818 mod
->free_sgpgs
= 0;
4821 list_add_tail(&wqe
->qe
, &mod
->sgpg_wait_q
);
4825 bfa_sgpg_wcancel(struct bfa_s
*bfa
, struct bfa_sgpg_wqe_s
*wqe
)
4827 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4829 bfa_assert(bfa_q_is_on_q(&mod
->sgpg_wait_q
, wqe
));
4832 if (wqe
->nsgpg_total
!= wqe
->nsgpg
)
4833 bfa_sgpg_mfree(bfa
, &wqe
->sgpg_q
,
4834 wqe
->nsgpg_total
- wqe
->nsgpg
);
4838 bfa_sgpg_winit(struct bfa_sgpg_wqe_s
*wqe
, void (*cbfn
) (void *cbarg
),
4841 INIT_LIST_HEAD(&wqe
->sgpg_q
);
4847 * UF related functions
4850 *****************************************************************************
4851 * Internal functions
4852 *****************************************************************************
4855 __bfa_cb_uf_recv(void *cbarg
, bfa_boolean_t complete
)
4857 struct bfa_uf_s
*uf
= cbarg
;
4858 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(uf
->bfa
);
4861 ufm
->ufrecv(ufm
->cbarg
, uf
);
4865 claim_uf_pbs(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4869 ufm
->uf_pbs_kva
= (struct bfa_uf_buf_s
*) bfa_meminfo_dma_virt(mi
);
4870 ufm
->uf_pbs_pa
= bfa_meminfo_dma_phys(mi
);
4871 uf_pb_tot_sz
= BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s
) * ufm
->num_ufs
),
4874 bfa_meminfo_dma_virt(mi
) += uf_pb_tot_sz
;
4875 bfa_meminfo_dma_phys(mi
) += uf_pb_tot_sz
;
4877 memset((void *)ufm
->uf_pbs_kva
, 0, uf_pb_tot_sz
);
4881 claim_uf_post_msgs(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4883 struct bfi_uf_buf_post_s
*uf_bp_msg
;
4884 struct bfi_sge_s
*sge
;
4885 union bfi_addr_u sga_zero
= { {0} };
4889 ufm
->uf_buf_posts
= (struct bfi_uf_buf_post_s
*) bfa_meminfo_kva(mi
);
4890 uf_bp_msg
= ufm
->uf_buf_posts
;
4892 for (i
= 0, uf_bp_msg
= ufm
->uf_buf_posts
; i
< ufm
->num_ufs
;
4894 memset(uf_bp_msg
, 0, sizeof(struct bfi_uf_buf_post_s
));
4896 uf_bp_msg
->buf_tag
= i
;
4897 buf_len
= sizeof(struct bfa_uf_buf_s
);
4898 uf_bp_msg
->buf_len
= cpu_to_be16(buf_len
);
4899 bfi_h2i_set(uf_bp_msg
->mh
, BFI_MC_UF
, BFI_UF_H2I_BUF_POST
,
4900 bfa_lpuid(ufm
->bfa
));
4902 sge
= uf_bp_msg
->sge
;
4903 sge
[0].sg_len
= buf_len
;
4904 sge
[0].flags
= BFI_SGE_DATA_LAST
;
4905 bfa_dma_addr_set(sge
[0].sga
, ufm_pbs_pa(ufm
, i
));
4908 sge
[1].sg_len
= buf_len
;
4909 sge
[1].flags
= BFI_SGE_PGDLEN
;
4910 sge
[1].sga
= sga_zero
;
4911 bfa_sge_to_be(&sge
[1]);
4915 * advance pointer beyond consumed memory
4917 bfa_meminfo_kva(mi
) = (u8
*) uf_bp_msg
;
4921 claim_ufs(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4924 struct bfa_uf_s
*uf
;
4927 * Claim block of memory for UF list
4929 ufm
->uf_list
= (struct bfa_uf_s
*) bfa_meminfo_kva(mi
);
4932 * Initialize UFs and queue it in UF free queue
4934 for (i
= 0, uf
= ufm
->uf_list
; i
< ufm
->num_ufs
; i
++, uf
++) {
4935 memset(uf
, 0, sizeof(struct bfa_uf_s
));
4938 uf
->pb_len
= sizeof(struct bfa_uf_buf_s
);
4939 uf
->buf_kva
= (void *)&ufm
->uf_pbs_kva
[i
];
4940 uf
->buf_pa
= ufm_pbs_pa(ufm
, i
);
4941 list_add_tail(&uf
->qe
, &ufm
->uf_free_q
);
4945 * advance memory pointer
4947 bfa_meminfo_kva(mi
) = (u8
*) uf
;
4951 uf_mem_claim(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4953 claim_uf_pbs(ufm
, mi
);
4955 claim_uf_post_msgs(ufm
, mi
);
4959 bfa_uf_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
, u32
*dm_len
)
4961 u32 num_ufs
= cfg
->fwcfg
.num_uf_bufs
;
4964 * dma-able memory for UF posted bufs
4966 *dm_len
+= BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s
) * num_ufs
),
4970 * kernel Virtual memory for UFs and UF buf post msg copies
4972 *ndm_len
+= sizeof(struct bfa_uf_s
) * num_ufs
;
4973 *ndm_len
+= sizeof(struct bfi_uf_buf_post_s
) * num_ufs
;
4977 bfa_uf_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
4978 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
4980 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
4982 memset(ufm
, 0, sizeof(struct bfa_uf_mod_s
));
4984 ufm
->num_ufs
= cfg
->fwcfg
.num_uf_bufs
;
4985 INIT_LIST_HEAD(&ufm
->uf_free_q
);
4986 INIT_LIST_HEAD(&ufm
->uf_posted_q
);
4988 uf_mem_claim(ufm
, meminfo
);
4992 bfa_uf_detach(struct bfa_s
*bfa
)
4996 static struct bfa_uf_s
*
4997 bfa_uf_get(struct bfa_uf_mod_s
*uf_mod
)
4999 struct bfa_uf_s
*uf
;
5001 bfa_q_deq(&uf_mod
->uf_free_q
, &uf
);
5006 bfa_uf_put(struct bfa_uf_mod_s
*uf_mod
, struct bfa_uf_s
*uf
)
5008 list_add_tail(&uf
->qe
, &uf_mod
->uf_free_q
);
5012 bfa_uf_post(struct bfa_uf_mod_s
*ufm
, struct bfa_uf_s
*uf
)
5014 struct bfi_uf_buf_post_s
*uf_post_msg
;
5016 uf_post_msg
= bfa_reqq_next(ufm
->bfa
, BFA_REQQ_FCXP
);
5018 return BFA_STATUS_FAILED
;
5020 memcpy(uf_post_msg
, &ufm
->uf_buf_posts
[uf
->uf_tag
],
5021 sizeof(struct bfi_uf_buf_post_s
));
5022 bfa_reqq_produce(ufm
->bfa
, BFA_REQQ_FCXP
);
5024 bfa_trc(ufm
->bfa
, uf
->uf_tag
);
5026 list_add_tail(&uf
->qe
, &ufm
->uf_posted_q
);
5027 return BFA_STATUS_OK
;
5031 bfa_uf_post_all(struct bfa_uf_mod_s
*uf_mod
)
5033 struct bfa_uf_s
*uf
;
5035 while ((uf
= bfa_uf_get(uf_mod
)) != NULL
) {
5036 if (bfa_uf_post(uf_mod
, uf
) != BFA_STATUS_OK
)
5042 uf_recv(struct bfa_s
*bfa
, struct bfi_uf_frm_rcvd_s
*m
)
5044 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
5045 u16 uf_tag
= m
->buf_tag
;
5046 struct bfa_uf_buf_s
*uf_buf
= &ufm
->uf_pbs_kva
[uf_tag
];
5047 struct bfa_uf_s
*uf
= &ufm
->uf_list
[uf_tag
];
5048 u8
*buf
= &uf_buf
->d
[0];
5049 struct fchs_s
*fchs
;
5051 m
->frm_len
= be16_to_cpu(m
->frm_len
);
5052 m
->xfr_len
= be16_to_cpu(m
->xfr_len
);
5054 fchs
= (struct fchs_s
*)uf_buf
;
5056 list_del(&uf
->qe
); /* dequeue from posted queue */
5059 uf
->data_len
= m
->xfr_len
;
5061 bfa_assert(uf
->data_len
>= sizeof(struct fchs_s
));
5063 if (uf
->data_len
== sizeof(struct fchs_s
)) {
5064 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_UF
, BFA_PL_EID_RX
,
5065 uf
->data_len
, (struct fchs_s
*)buf
);
5067 u32 pld_w0
= *((u32
*) (buf
+ sizeof(struct fchs_s
)));
5068 bfa_plog_fchdr_and_pl(bfa
->plog
, BFA_PL_MID_HAL_UF
,
5069 BFA_PL_EID_RX
, uf
->data_len
,
5070 (struct fchs_s
*)buf
, pld_w0
);
5074 __bfa_cb_uf_recv(uf
, BFA_TRUE
);
5076 bfa_cb_queue(bfa
, &uf
->hcb_qe
, __bfa_cb_uf_recv
, uf
);
5080 bfa_uf_stop(struct bfa_s
*bfa
)
5085 bfa_uf_iocdisable(struct bfa_s
*bfa
)
5087 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
5088 struct bfa_uf_s
*uf
;
5089 struct list_head
*qe
, *qen
;
5091 list_for_each_safe(qe
, qen
, &ufm
->uf_posted_q
) {
5092 uf
= (struct bfa_uf_s
*) qe
;
5094 bfa_uf_put(ufm
, uf
);
5099 bfa_uf_start(struct bfa_s
*bfa
)
5101 bfa_uf_post_all(BFA_UF_MOD(bfa
));
5111 * Register handler for all unsolicted recieve frames.
5113 * @param[in] bfa BFA instance
5114 * @param[in] ufrecv receive handler function
5115 * @param[in] cbarg receive handler arg
5118 bfa_uf_recv_register(struct bfa_s
*bfa
, bfa_cb_uf_recv_t ufrecv
, void *cbarg
)
5120 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
5122 ufm
->ufrecv
= ufrecv
;
5127 * Free an unsolicited frame back to BFA.
5129 * @param[in] uf unsolicited frame to be freed
5134 bfa_uf_free(struct bfa_uf_s
*uf
)
5136 bfa_uf_put(BFA_UF_MOD(uf
->bfa
), uf
);
5137 bfa_uf_post_all(BFA_UF_MOD(uf
->bfa
));
5143 * uf_pub BFA uf module public functions
5146 bfa_uf_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*msg
)
5148 bfa_trc(bfa
, msg
->mhdr
.msg_id
);
5150 switch (msg
->mhdr
.msg_id
) {
5151 case BFI_UF_I2H_FRM_RCVD
:
5152 uf_recv(bfa
, (struct bfi_uf_frm_rcvd_s
*) msg
);
5156 bfa_trc(bfa
, msg
->mhdr
.msg_id
);