[SCSI] bfa: remove unused and empty functions
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / bfa / bfa_svc.c
blob3a2bee4d42ddef7c92753c1dbd524216887b63ff
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfa_os_inc.h"
19 #include "bfa_plog.h"
20 #include "bfa_cs.h"
21 #include "bfa_modules.h"
22 #include "bfad_drv.h"
24 BFA_TRC_FILE(HAL, FCXP);
25 BFA_MODULE(fcxp);
26 BFA_MODULE(sgpg);
27 BFA_MODULE(lps);
28 BFA_MODULE(fcport);
29 BFA_MODULE(rport);
30 BFA_MODULE(uf);
33 * LPS related definitions
35 #define BFA_LPS_MIN_LPORTS (1)
36 #define BFA_LPS_MAX_LPORTS (256)
39 * Maximum Vports supported per physical port or vf.
41 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
45 * lps_pvt BFA LPS private functions
48 enum bfa_lps_event {
49 BFA_LPS_SM_LOGIN = 1, /* login request from user */
50 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
51 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
52 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
53 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
54 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
59 * FC PORT related definitions
62 * The port is considered disabled if corresponding physical port or IOC are
63 * disabled explicitly
65 #define BFA_PORT_IS_DISABLED(bfa) \
66 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
71 * BFA port state machine events
73 enum bfa_fcport_sm_event {
74 BFA_FCPORT_SM_START = 1, /* start port state machine */
75 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
76 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
77 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
78 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
79 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
80 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
81 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
86 * BFA port link notification state machine events
89 enum bfa_fcport_ln_sm_event {
90 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
91 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
96 * RPORT related definitions
98 #define bfa_rport_offline_cb(__rp) do { \
99 if ((__rp)->bfa->fcs) \
100 bfa_cb_rport_offline((__rp)->rport_drv); \
101 else { \
102 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
103 __bfa_cb_rport_offline, (__rp)); \
105 } while (0)
107 #define bfa_rport_online_cb(__rp) do { \
108 if ((__rp)->bfa->fcs) \
109 bfa_cb_rport_online((__rp)->rport_drv); \
110 else { \
111 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
112 __bfa_cb_rport_online, (__rp)); \
114 } while (0)
117 enum bfa_rport_event {
118 BFA_RPORT_SM_CREATE = 1, /* rport create event */
119 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
120 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
121 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
122 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
123 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
124 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
125 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
130 * forward declarations FCXP related functions
132 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
133 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
134 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
135 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
136 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
137 static void bfa_fcxp_qresume(void *cbarg);
138 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 struct bfi_fcxp_send_req_s *send_req);
142 * forward declarations for LPS functions
144 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
145 u32 *dm_len);
146 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
147 struct bfa_iocfc_cfg_s *cfg,
148 struct bfa_meminfo_s *meminfo,
149 struct bfa_pcidev_s *pcidev);
150 static void bfa_lps_detach(struct bfa_s *bfa);
151 static void bfa_lps_start(struct bfa_s *bfa);
152 static void bfa_lps_stop(struct bfa_s *bfa);
153 static void bfa_lps_iocdisable(struct bfa_s *bfa);
154 static void bfa_lps_login_rsp(struct bfa_s *bfa,
155 struct bfi_lps_login_rsp_s *rsp);
156 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
157 struct bfi_lps_logout_rsp_s *rsp);
158 static void bfa_lps_reqq_resume(void *lps_arg);
159 static void bfa_lps_free(struct bfa_lps_s *lps);
160 static void bfa_lps_send_login(struct bfa_lps_s *lps);
161 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
162 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
167 * forward declaration for LPS state machine
169 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
170 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
171 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
172 event);
173 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
174 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 event);
179 * forward declaration for FC Port functions
181 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
182 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
183 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
184 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
185 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
186 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
187 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
188 enum bfa_port_linkstate event, bfa_boolean_t trunk);
189 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
190 enum bfa_port_linkstate event);
191 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
192 static void bfa_fcport_stats_get_timeout(void *cbarg);
193 static void bfa_fcport_stats_clr_timeout(void *cbarg);
194 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
197 * forward declaration for FC PORT state machine
199 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
200 enum bfa_fcport_sm_event event);
201 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
202 enum bfa_fcport_sm_event event);
203 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
204 enum bfa_fcport_sm_event event);
205 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
206 enum bfa_fcport_sm_event event);
207 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
208 enum bfa_fcport_sm_event event);
209 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
210 enum bfa_fcport_sm_event event);
211 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
212 enum bfa_fcport_sm_event event);
213 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
214 enum bfa_fcport_sm_event event);
215 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
216 enum bfa_fcport_sm_event event);
217 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
218 enum bfa_fcport_sm_event event);
219 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
220 enum bfa_fcport_sm_event event);
221 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
222 enum bfa_fcport_sm_event event);
224 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
225 enum bfa_fcport_ln_sm_event event);
226 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
227 enum bfa_fcport_ln_sm_event event);
228 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
229 enum bfa_fcport_ln_sm_event event);
230 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
231 enum bfa_fcport_ln_sm_event event);
232 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
233 enum bfa_fcport_ln_sm_event event);
234 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
235 enum bfa_fcport_ln_sm_event event);
236 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
237 enum bfa_fcport_ln_sm_event event);
239 static struct bfa_sm_table_s hal_port_sm_table[] = {
240 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
241 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
242 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
243 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
244 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
245 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
246 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
247 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
248 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
249 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
250 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
251 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
256 * forward declaration for RPORT related functions
258 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
259 static void bfa_rport_free(struct bfa_rport_s *rport);
260 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
261 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
262 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
263 static void __bfa_cb_rport_online(void *cbarg,
264 bfa_boolean_t complete);
265 static void __bfa_cb_rport_offline(void *cbarg,
266 bfa_boolean_t complete);
269 * forward declaration for RPORT state machine
271 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
274 enum bfa_rport_event event);
275 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
276 enum bfa_rport_event event);
277 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
278 enum bfa_rport_event event);
279 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
280 enum bfa_rport_event event);
281 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
282 enum bfa_rport_event event);
283 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
284 enum bfa_rport_event event);
285 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
286 enum bfa_rport_event event);
287 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
288 enum bfa_rport_event event);
289 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
290 enum bfa_rport_event event);
291 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
292 enum bfa_rport_event event);
293 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
294 enum bfa_rport_event event);
295 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 enum bfa_rport_event event);
299 * PLOG related definitions
301 static int
302 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
304 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
305 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
306 return 1;
308 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
309 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
310 return 1;
312 return 0;
315 static void
316 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
318 u16 tail;
319 struct bfa_plog_rec_s *pl_recp;
321 if (plog->plog_enabled == 0)
322 return;
324 if (plkd_validate_logrec(pl_rec)) {
325 bfa_assert(0);
326 return;
329 tail = plog->tail;
331 pl_recp = &(plog->plog_recs[tail]);
333 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
335 pl_recp->tv = bfa_os_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog->tail);
338 if (plog->head == plog->tail)
339 BFA_PL_LOG_REC_INCR(plog->head);
342 void
343 bfa_plog_init(struct bfa_plog_s *plog)
345 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
347 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
348 plog->head = plog->tail = 0;
349 plog->plog_enabled = 1;
352 void
353 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
354 enum bfa_plog_eid event,
355 u16 misc, char *log_str)
357 struct bfa_plog_rec_s lp;
359 if (plog->plog_enabled) {
360 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
361 lp.mid = mid;
362 lp.eid = event;
363 lp.log_type = BFA_PL_LOG_TYPE_STRING;
364 lp.misc = misc;
365 strncpy(lp.log_entry.string_log, log_str,
366 BFA_PL_STRING_LOG_SZ - 1);
367 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
368 bfa_plog_add(plog, &lp);
372 void
373 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
374 enum bfa_plog_eid event,
375 u16 misc, u32 *intarr, u32 num_ints)
377 struct bfa_plog_rec_s lp;
378 u32 i;
380 if (num_ints > BFA_PL_INT_LOG_SZ)
381 num_ints = BFA_PL_INT_LOG_SZ;
383 if (plog->plog_enabled) {
384 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
385 lp.mid = mid;
386 lp.eid = event;
387 lp.log_type = BFA_PL_LOG_TYPE_INT;
388 lp.misc = misc;
390 for (i = 0; i < num_ints; i++)
391 lp.log_entry.int_log[i] = intarr[i];
393 lp.log_num_ints = (u8) num_ints;
395 bfa_plog_add(plog, &lp);
399 void
400 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
401 enum bfa_plog_eid event,
402 u16 misc, struct fchs_s *fchdr)
404 struct bfa_plog_rec_s lp;
405 u32 *tmp_int = (u32 *) fchdr;
406 u32 ints[BFA_PL_INT_LOG_SZ];
408 if (plog->plog_enabled) {
409 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
411 ints[0] = tmp_int[0];
412 ints[1] = tmp_int[1];
413 ints[2] = tmp_int[4];
415 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
419 void
420 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
421 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
422 u32 pld_w0)
424 struct bfa_plog_rec_s lp;
425 u32 *tmp_int = (u32 *) fchdr;
426 u32 ints[BFA_PL_INT_LOG_SZ];
428 if (plog->plog_enabled) {
429 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
431 ints[0] = tmp_int[0];
432 ints[1] = tmp_int[1];
433 ints[2] = tmp_int[4];
434 ints[3] = pld_w0;
436 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
441 bfa_boolean_t
442 bfa_plog_get_setting(struct bfa_plog_s *plog)
444 return (bfa_boolean_t)plog->plog_enabled;
448 * fcxp_pvt BFA FCXP private functions
451 static void
452 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
454 u8 *dm_kva = NULL;
455 u64 dm_pa;
456 u32 buf_pool_sz;
458 dm_kva = bfa_meminfo_dma_virt(mi);
459 dm_pa = bfa_meminfo_dma_phys(mi);
461 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
464 * Initialize the fcxp req payload list
466 mod->req_pld_list_kva = dm_kva;
467 mod->req_pld_list_pa = dm_pa;
468 dm_kva += buf_pool_sz;
469 dm_pa += buf_pool_sz;
470 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
473 * Initialize the fcxp rsp payload list
475 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
476 mod->rsp_pld_list_kva = dm_kva;
477 mod->rsp_pld_list_pa = dm_pa;
478 dm_kva += buf_pool_sz;
479 dm_pa += buf_pool_sz;
480 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
482 bfa_meminfo_dma_virt(mi) = dm_kva;
483 bfa_meminfo_dma_phys(mi) = dm_pa;
486 static void
487 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
489 u16 i;
490 struct bfa_fcxp_s *fcxp;
492 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
493 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
495 INIT_LIST_HEAD(&mod->fcxp_free_q);
496 INIT_LIST_HEAD(&mod->fcxp_active_q);
498 mod->fcxp_list = fcxp;
500 for (i = 0; i < mod->num_fcxps; i++) {
501 fcxp->fcxp_mod = mod;
502 fcxp->fcxp_tag = i;
504 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
505 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
506 fcxp->reqq_waiting = BFA_FALSE;
508 fcxp = fcxp + 1;
511 bfa_meminfo_kva(mi) = (void *)fcxp;
514 static void
515 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
516 u32 *dm_len)
518 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
520 if (num_fcxp_reqs == 0)
521 return;
524 * Account for req/rsp payload
526 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
527 if (cfg->drvcfg.min_cfg)
528 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
529 else
530 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
533 * Account for fcxp structs
535 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
538 static void
539 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
540 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
542 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
544 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
545 mod->bfa = bfa;
546 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
549 * Initialize FCXP request and response payload sizes.
551 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
552 if (!cfg->drvcfg.min_cfg)
553 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
555 INIT_LIST_HEAD(&mod->wait_q);
557 claim_fcxp_req_rsp_mem(mod, meminfo);
558 claim_fcxps_mem(mod, meminfo);
561 static void
562 bfa_fcxp_detach(struct bfa_s *bfa)
566 static void
567 bfa_fcxp_start(struct bfa_s *bfa)
571 static void
572 bfa_fcxp_stop(struct bfa_s *bfa)
576 static void
577 bfa_fcxp_iocdisable(struct bfa_s *bfa)
579 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
580 struct bfa_fcxp_s *fcxp;
581 struct list_head *qe, *qen;
583 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
584 fcxp = (struct bfa_fcxp_s *) qe;
585 if (fcxp->caller == NULL) {
586 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
587 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
588 bfa_fcxp_free(fcxp);
589 } else {
590 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
591 bfa_cb_queue(bfa, &fcxp->hcb_qe,
592 __bfa_fcxp_send_cbfn, fcxp);
597 static struct bfa_fcxp_s *
598 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
600 struct bfa_fcxp_s *fcxp;
602 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
604 if (fcxp)
605 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
607 return fcxp;
610 static void
611 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
612 struct bfa_s *bfa,
613 u8 *use_ibuf,
614 u32 *nr_sgles,
615 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
616 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
617 struct list_head *r_sgpg_q,
618 int n_sgles,
619 bfa_fcxp_get_sgaddr_t sga_cbfn,
620 bfa_fcxp_get_sglen_t sglen_cbfn)
623 bfa_assert(bfa != NULL);
625 bfa_trc(bfa, fcxp->fcxp_tag);
627 if (n_sgles == 0) {
628 *use_ibuf = 1;
629 } else {
630 bfa_assert(*sga_cbfn != NULL);
631 bfa_assert(*sglen_cbfn != NULL);
633 *use_ibuf = 0;
634 *r_sga_cbfn = sga_cbfn;
635 *r_sglen_cbfn = sglen_cbfn;
637 *nr_sgles = n_sgles;
640 * alloc required sgpgs
642 if (n_sgles > BFI_SGE_INLINE)
643 bfa_assert(0);
648 static void
649 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
650 void *caller, struct bfa_s *bfa, int nreq_sgles,
651 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
652 bfa_fcxp_get_sglen_t req_sglen_cbfn,
653 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
654 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
657 bfa_assert(bfa != NULL);
659 bfa_trc(bfa, fcxp->fcxp_tag);
661 fcxp->caller = caller;
663 bfa_fcxp_init_reqrsp(fcxp, bfa,
664 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
665 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
666 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
668 bfa_fcxp_init_reqrsp(fcxp, bfa,
669 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
670 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
671 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
675 static void
676 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
678 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
679 struct bfa_fcxp_wqe_s *wqe;
681 bfa_q_deq(&mod->wait_q, &wqe);
682 if (wqe) {
683 bfa_trc(mod->bfa, fcxp->fcxp_tag);
685 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
686 wqe->nrsp_sgles, wqe->req_sga_cbfn,
687 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
688 wqe->rsp_sglen_cbfn);
690 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
691 return;
694 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
695 list_del(&fcxp->qe);
696 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
699 static void
700 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
701 bfa_status_t req_status, u32 rsp_len,
702 u32 resid_len, struct fchs_s *rsp_fchs)
704 /* discarded fcxp completion */
707 static void
708 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
710 struct bfa_fcxp_s *fcxp = cbarg;
712 if (complete) {
713 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
714 fcxp->rsp_status, fcxp->rsp_len,
715 fcxp->residue_len, &fcxp->rsp_fchs);
716 } else {
717 bfa_fcxp_free(fcxp);
721 static void
722 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
724 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
725 struct bfa_fcxp_s *fcxp;
726 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
728 bfa_trc(bfa, fcxp_tag);
730 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
733 * @todo f/w should not set residue to non-0 when everything
734 * is received.
736 if (fcxp_rsp->req_status == BFA_STATUS_OK)
737 fcxp_rsp->residue_len = 0;
738 else
739 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
741 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
743 bfa_assert(fcxp->send_cbfn != NULL);
745 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
747 if (fcxp->send_cbfn != NULL) {
748 bfa_trc(mod->bfa, (NULL == fcxp->caller));
749 if (fcxp->caller == NULL) {
750 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
751 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
752 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
754 * fcxp automatically freed on return from the callback
756 bfa_fcxp_free(fcxp);
757 } else {
758 fcxp->rsp_status = fcxp_rsp->req_status;
759 fcxp->rsp_len = fcxp_rsp->rsp_len;
760 fcxp->residue_len = fcxp_rsp->residue_len;
761 fcxp->rsp_fchs = fcxp_rsp->fchs;
763 bfa_cb_queue(bfa, &fcxp->hcb_qe,
764 __bfa_fcxp_send_cbfn, fcxp);
766 } else {
767 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
771 static void
772 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
774 union bfi_addr_u sga_zero = { {0} };
776 sge->sg_len = reqlen;
777 sge->flags = BFI_SGE_DATA_LAST;
778 bfa_dma_addr_set(sge[0].sga, req_pa);
779 bfa_sge_to_be(sge);
780 sge++;
782 sge->sga = sga_zero;
783 sge->sg_len = reqlen;
784 sge->flags = BFI_SGE_PGDLEN;
785 bfa_sge_to_be(sge);
788 static void
789 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
790 struct fchs_s *fchs)
793 * TODO: TX ox_id
795 if (reqlen > 0) {
796 if (fcxp->use_ireqbuf) {
797 u32 pld_w0 =
798 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
800 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
801 BFA_PL_EID_TX,
802 reqlen + sizeof(struct fchs_s), fchs,
803 pld_w0);
804 } else {
805 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
806 BFA_PL_EID_TX,
807 reqlen + sizeof(struct fchs_s),
808 fchs);
810 } else {
811 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
812 reqlen + sizeof(struct fchs_s), fchs);
816 static void
817 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
818 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
820 if (fcxp_rsp->rsp_len > 0) {
821 if (fcxp->use_irspbuf) {
822 u32 pld_w0 =
823 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
825 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
826 BFA_PL_EID_RX,
827 (u16) fcxp_rsp->rsp_len,
828 &fcxp_rsp->fchs, pld_w0);
829 } else {
830 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
831 BFA_PL_EID_RX,
832 (u16) fcxp_rsp->rsp_len,
833 &fcxp_rsp->fchs);
835 } else {
836 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
837 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
842 * Handler to resume sending fcxp when space in available in cpe queue.
844 static void
845 bfa_fcxp_qresume(void *cbarg)
847 struct bfa_fcxp_s *fcxp = cbarg;
848 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
849 struct bfi_fcxp_send_req_s *send_req;
851 fcxp->reqq_waiting = BFA_FALSE;
852 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
853 bfa_fcxp_queue(fcxp, send_req);
857 * Queue fcxp send request to foimrware.
859 static void
860 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
862 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
863 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
864 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
865 struct bfa_rport_s *rport = reqi->bfa_rport;
867 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
868 bfa_lpuid(bfa));
870 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
871 if (rport) {
872 send_req->rport_fw_hndl = rport->fw_handle;
873 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
874 if (send_req->max_frmsz == 0)
875 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
876 } else {
877 send_req->rport_fw_hndl = 0;
878 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
881 send_req->vf_id = cpu_to_be16(reqi->vf_id);
882 send_req->lp_tag = reqi->lp_tag;
883 send_req->class = reqi->class;
884 send_req->rsp_timeout = rspi->rsp_timeout;
885 send_req->cts = reqi->cts;
886 send_req->fchs = reqi->fchs;
888 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
889 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
892 * setup req sgles
894 if (fcxp->use_ireqbuf == 1) {
895 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
896 BFA_FCXP_REQ_PLD_PA(fcxp));
897 } else {
898 if (fcxp->nreq_sgles > 0) {
899 bfa_assert(fcxp->nreq_sgles == 1);
900 hal_fcxp_set_local_sges(send_req->req_sge,
901 reqi->req_tot_len,
902 fcxp->req_sga_cbfn(fcxp->caller,
903 0));
904 } else {
905 bfa_assert(reqi->req_tot_len == 0);
906 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
911 * setup rsp sgles
913 if (fcxp->use_irspbuf == 1) {
914 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
916 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
917 BFA_FCXP_RSP_PLD_PA(fcxp));
919 } else {
920 if (fcxp->nrsp_sgles > 0) {
921 bfa_assert(fcxp->nrsp_sgles == 1);
922 hal_fcxp_set_local_sges(send_req->rsp_sge,
923 rspi->rsp_maxlen,
924 fcxp->rsp_sga_cbfn(fcxp->caller,
925 0));
926 } else {
927 bfa_assert(rspi->rsp_maxlen == 0);
928 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
932 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
934 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
936 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
937 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
941 * hal_fcxp_api BFA FCXP API
945 * Allocate an FCXP instance to send a response or to send a request
946 * that has a response. Request/response buffers are allocated by caller.
948 * @param[in] bfa BFA bfa instance
949 * @param[in] nreq_sgles Number of SG elements required for request
950 * buffer. 0, if fcxp internal buffers are used.
951 * Use bfa_fcxp_get_reqbuf() to get the
952 * internal req buffer.
953 * @param[in] req_sgles SG elements describing request buffer. Will be
954 * copied in by BFA and hence can be freed on
955 * return from this function.
956 * @param[in] get_req_sga function ptr to be called to get a request SG
957 * Address (given the sge index).
958 * @param[in] get_req_sglen function ptr to be called to get a request SG
959 * len (given the sge index).
960 * @param[in] get_rsp_sga function ptr to be called to get a response SG
961 * Address (given the sge index).
962 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
963 * len (given the sge index).
965 * @return FCXP instance. NULL on failure.
967 struct bfa_fcxp_s *
968 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
969 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
970 bfa_fcxp_get_sglen_t req_sglen_cbfn,
971 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
972 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
974 struct bfa_fcxp_s *fcxp = NULL;
976 bfa_assert(bfa != NULL);
978 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
979 if (fcxp == NULL)
980 return NULL;
982 bfa_trc(bfa, fcxp->fcxp_tag);
984 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
985 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
987 return fcxp;
991 * Get the internal request buffer pointer
993 * @param[in] fcxp BFA fcxp pointer
995 * @return pointer to the internal request buffer
997 void *
998 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
1000 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1001 void *reqbuf;
1003 bfa_assert(fcxp->use_ireqbuf == 1);
1004 reqbuf = ((u8 *)mod->req_pld_list_kva) +
1005 fcxp->fcxp_tag * mod->req_pld_sz;
1006 return reqbuf;
1010 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1012 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1014 return mod->req_pld_sz;
1018 * Get the internal response buffer pointer
1020 * @param[in] fcxp BFA fcxp pointer
1022 * @return pointer to the internal request buffer
1024 void *
1025 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1027 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1028 void *rspbuf;
1030 bfa_assert(fcxp->use_irspbuf == 1);
1032 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1033 fcxp->fcxp_tag * mod->rsp_pld_sz;
1034 return rspbuf;
1038 * Free the BFA FCXP
1040 * @param[in] fcxp BFA fcxp pointer
1042 * @return void
1044 void
1045 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1047 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1049 bfa_assert(fcxp != NULL);
1050 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1051 bfa_fcxp_put(fcxp);
1055 * Send a FCXP request
1057 * @param[in] fcxp BFA fcxp pointer
1058 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1059 * @param[in] vf_id virtual Fabric ID
1060 * @param[in] lp_tag lport tag
1061 * @param[in] cts use Continous sequence
1062 * @param[in] cos fc Class of Service
1063 * @param[in] reqlen request length, does not include FCHS length
1064 * @param[in] fchs fc Header Pointer. The header content will be copied
1065 * in by BFA.
1067 * @param[in] cbfn call back function to be called on receiving
1068 * the response
1069 * @param[in] cbarg arg for cbfn
1070 * @param[in] rsp_timeout
1071 * response timeout
1073 * @return bfa_status_t
1075 void
1076 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1077 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1078 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1079 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1081 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1082 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1083 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1084 struct bfi_fcxp_send_req_s *send_req;
1086 bfa_trc(bfa, fcxp->fcxp_tag);
1089 * setup request/response info
1091 reqi->bfa_rport = rport;
1092 reqi->vf_id = vf_id;
1093 reqi->lp_tag = lp_tag;
1094 reqi->class = cos;
1095 rspi->rsp_timeout = rsp_timeout;
1096 reqi->cts = cts;
1097 reqi->fchs = *fchs;
1098 reqi->req_tot_len = reqlen;
1099 rspi->rsp_maxlen = rsp_maxlen;
1100 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1101 fcxp->send_cbarg = cbarg;
1104 * If no room in CPE queue, wait for space in request queue
1106 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1107 if (!send_req) {
1108 bfa_trc(bfa, fcxp->fcxp_tag);
1109 fcxp->reqq_waiting = BFA_TRUE;
1110 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1111 return;
1114 bfa_fcxp_queue(fcxp, send_req);
1118 * Abort a BFA FCXP
1120 * @param[in] fcxp BFA fcxp pointer
1122 * @return void
1124 bfa_status_t
1125 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1127 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1128 bfa_assert(0);
1129 return BFA_STATUS_OK;
1132 void
1133 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1134 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1135 void *caller, int nreq_sgles,
1136 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1137 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1138 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1139 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1141 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1143 bfa_assert(list_empty(&mod->fcxp_free_q));
1145 wqe->alloc_cbfn = alloc_cbfn;
1146 wqe->alloc_cbarg = alloc_cbarg;
1147 wqe->caller = caller;
1148 wqe->bfa = bfa;
1149 wqe->nreq_sgles = nreq_sgles;
1150 wqe->nrsp_sgles = nrsp_sgles;
1151 wqe->req_sga_cbfn = req_sga_cbfn;
1152 wqe->req_sglen_cbfn = req_sglen_cbfn;
1153 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1154 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1156 list_add_tail(&wqe->qe, &mod->wait_q);
1159 void
1160 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1162 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1164 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1165 list_del(&wqe->qe);
1168 void
1169 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1172 * If waiting for room in request queue, cancel reqq wait
1173 * and free fcxp.
1175 if (fcxp->reqq_waiting) {
1176 fcxp->reqq_waiting = BFA_FALSE;
1177 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1178 bfa_fcxp_free(fcxp);
1179 return;
1182 fcxp->send_cbfn = bfa_fcxp_null_comp;
1188 * hal_fcxp_public BFA FCXP public functions
1191 void
1192 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1194 switch (msg->mhdr.msg_id) {
1195 case BFI_FCXP_I2H_SEND_RSP:
1196 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1197 break;
1199 default:
1200 bfa_trc(bfa, msg->mhdr.msg_id);
1201 bfa_assert(0);
1206 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1208 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1210 return mod->rsp_pld_sz;
1215 * BFA LPS state machine functions
1219 * Init state -- no login
1221 static void
1222 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1224 bfa_trc(lps->bfa, lps->lp_tag);
1225 bfa_trc(lps->bfa, event);
1227 switch (event) {
1228 case BFA_LPS_SM_LOGIN:
1229 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1230 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1231 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1232 } else {
1233 bfa_sm_set_state(lps, bfa_lps_sm_login);
1234 bfa_lps_send_login(lps);
1237 if (lps->fdisc)
1238 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1239 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1240 else
1241 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1242 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1243 break;
1245 case BFA_LPS_SM_LOGOUT:
1246 bfa_lps_logout_comp(lps);
1247 break;
1249 case BFA_LPS_SM_DELETE:
1250 bfa_lps_free(lps);
1251 break;
1253 case BFA_LPS_SM_RX_CVL:
1254 case BFA_LPS_SM_OFFLINE:
1255 break;
1257 case BFA_LPS_SM_FWRSP:
1259 * Could happen when fabric detects loopback and discards
1260 * the lps request. Fw will eventually sent out the timeout
1261 * Just ignore
1263 break;
1265 default:
1266 bfa_sm_fault(lps->bfa, event);
1271 * login is in progress -- awaiting response from firmware
1273 static void
1274 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1276 bfa_trc(lps->bfa, lps->lp_tag);
1277 bfa_trc(lps->bfa, event);
1279 switch (event) {
1280 case BFA_LPS_SM_FWRSP:
1281 if (lps->status == BFA_STATUS_OK) {
1282 bfa_sm_set_state(lps, bfa_lps_sm_online);
1283 if (lps->fdisc)
1284 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1285 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1286 else
1287 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1288 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1289 } else {
1290 bfa_sm_set_state(lps, bfa_lps_sm_init);
1291 if (lps->fdisc)
1292 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1293 BFA_PL_EID_LOGIN, 0,
1294 "FDISC Fail (RJT or timeout)");
1295 else
1296 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1297 BFA_PL_EID_LOGIN, 0,
1298 "FLOGI Fail (RJT or timeout)");
1300 bfa_lps_login_comp(lps);
1301 break;
1303 case BFA_LPS_SM_OFFLINE:
1304 bfa_sm_set_state(lps, bfa_lps_sm_init);
1305 break;
1307 default:
1308 bfa_sm_fault(lps->bfa, event);
1313 * login pending - awaiting space in request queue
1315 static void
1316 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1318 bfa_trc(lps->bfa, lps->lp_tag);
1319 bfa_trc(lps->bfa, event);
1321 switch (event) {
1322 case BFA_LPS_SM_RESUME:
1323 bfa_sm_set_state(lps, bfa_lps_sm_login);
1324 break;
1326 case BFA_LPS_SM_OFFLINE:
1327 bfa_sm_set_state(lps, bfa_lps_sm_init);
1328 bfa_reqq_wcancel(&lps->wqe);
1329 break;
1331 case BFA_LPS_SM_RX_CVL:
1333 * Login was not even sent out; so when getting out
1334 * of this state, it will appear like a login retry
1335 * after Clear virtual link
1337 break;
1339 default:
1340 bfa_sm_fault(lps->bfa, event);
1345 * login complete
1347 static void
1348 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1350 bfa_trc(lps->bfa, lps->lp_tag);
1351 bfa_trc(lps->bfa, event);
1353 switch (event) {
1354 case BFA_LPS_SM_LOGOUT:
1355 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1356 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1357 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1358 } else {
1359 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1360 bfa_lps_send_logout(lps);
1362 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1363 BFA_PL_EID_LOGO, 0, "Logout");
1364 break;
1366 case BFA_LPS_SM_RX_CVL:
1367 bfa_sm_set_state(lps, bfa_lps_sm_init);
1369 /* Let the vport module know about this event */
1370 bfa_lps_cvl_event(lps);
1371 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1372 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1373 break;
1375 case BFA_LPS_SM_OFFLINE:
1376 case BFA_LPS_SM_DELETE:
1377 bfa_sm_set_state(lps, bfa_lps_sm_init);
1378 break;
1380 default:
1381 bfa_sm_fault(lps->bfa, event);
1386 * logout in progress - awaiting firmware response
1388 static void
1389 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1391 bfa_trc(lps->bfa, lps->lp_tag);
1392 bfa_trc(lps->bfa, event);
1394 switch (event) {
1395 case BFA_LPS_SM_FWRSP:
1396 bfa_sm_set_state(lps, bfa_lps_sm_init);
1397 bfa_lps_logout_comp(lps);
1398 break;
1400 case BFA_LPS_SM_OFFLINE:
1401 bfa_sm_set_state(lps, bfa_lps_sm_init);
1402 break;
1404 default:
1405 bfa_sm_fault(lps->bfa, event);
1410 * logout pending -- awaiting space in request queue
1412 static void
1413 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1415 bfa_trc(lps->bfa, lps->lp_tag);
1416 bfa_trc(lps->bfa, event);
1418 switch (event) {
1419 case BFA_LPS_SM_RESUME:
1420 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1421 bfa_lps_send_logout(lps);
1422 break;
1424 case BFA_LPS_SM_OFFLINE:
1425 bfa_sm_set_state(lps, bfa_lps_sm_init);
1426 bfa_reqq_wcancel(&lps->wqe);
1427 break;
1429 default:
1430 bfa_sm_fault(lps->bfa, event);
1437 * lps_pvt BFA LPS private functions
1441 * return memory requirement
1443 static void
1444 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1445 u32 *dm_len)
1447 if (cfg->drvcfg.min_cfg)
1448 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1449 else
1450 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1454 * bfa module attach at initialization time
1456 static void
1457 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1458 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1460 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1461 struct bfa_lps_s *lps;
1462 int i;
1464 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1465 mod->num_lps = BFA_LPS_MAX_LPORTS;
1466 if (cfg->drvcfg.min_cfg)
1467 mod->num_lps = BFA_LPS_MIN_LPORTS;
1468 else
1469 mod->num_lps = BFA_LPS_MAX_LPORTS;
1470 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1472 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1474 INIT_LIST_HEAD(&mod->lps_free_q);
1475 INIT_LIST_HEAD(&mod->lps_active_q);
1477 for (i = 0; i < mod->num_lps; i++, lps++) {
1478 lps->bfa = bfa;
1479 lps->lp_tag = (u8) i;
1480 lps->reqq = BFA_REQQ_LPS;
1481 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1482 list_add_tail(&lps->qe, &mod->lps_free_q);
1486 static void
1487 bfa_lps_detach(struct bfa_s *bfa)
1491 static void
1492 bfa_lps_start(struct bfa_s *bfa)
1496 static void
1497 bfa_lps_stop(struct bfa_s *bfa)
1502 * IOC in disabled state -- consider all lps offline
1504 static void
1505 bfa_lps_iocdisable(struct bfa_s *bfa)
1507 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1508 struct bfa_lps_s *lps;
1509 struct list_head *qe, *qen;
1511 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1512 lps = (struct bfa_lps_s *) qe;
1513 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1518 * Firmware login response
1520 static void
1521 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1523 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1524 struct bfa_lps_s *lps;
1526 bfa_assert(rsp->lp_tag < mod->num_lps);
1527 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1529 lps->status = rsp->status;
1530 switch (rsp->status) {
1531 case BFA_STATUS_OK:
1532 lps->fport = rsp->f_port;
1533 lps->npiv_en = rsp->npiv_en;
1534 lps->lp_pid = rsp->lp_pid;
1535 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1536 lps->pr_pwwn = rsp->port_name;
1537 lps->pr_nwwn = rsp->node_name;
1538 lps->auth_req = rsp->auth_req;
1539 lps->lp_mac = rsp->lp_mac;
1540 lps->brcd_switch = rsp->brcd_switch;
1541 lps->fcf_mac = rsp->fcf_mac;
1543 break;
1545 case BFA_STATUS_FABRIC_RJT:
1546 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1547 lps->lsrjt_expl = rsp->lsrjt_expl;
1549 break;
1551 case BFA_STATUS_EPROTOCOL:
1552 lps->ext_status = rsp->ext_status;
1554 break;
1556 default:
1557 /* Nothing to do with other status */
1558 break;
1561 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1565 * Firmware logout response
1567 static void
1568 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1570 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1571 struct bfa_lps_s *lps;
1573 bfa_assert(rsp->lp_tag < mod->num_lps);
1574 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1576 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1580 * Firmware received a Clear virtual link request (for FCoE)
1582 static void
1583 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1585 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1586 struct bfa_lps_s *lps;
1588 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1590 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1594 * Space is available in request queue, resume queueing request to firmware.
1596 static void
1597 bfa_lps_reqq_resume(void *lps_arg)
1599 struct bfa_lps_s *lps = lps_arg;
1601 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1605 * lps is freed -- triggered by vport delete
1607 static void
1608 bfa_lps_free(struct bfa_lps_s *lps)
1610 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1612 lps->lp_pid = 0;
1613 list_del(&lps->qe);
1614 list_add_tail(&lps->qe, &mod->lps_free_q);
1618 * send login request to firmware
1620 static void
1621 bfa_lps_send_login(struct bfa_lps_s *lps)
1623 struct bfi_lps_login_req_s *m;
1625 m = bfa_reqq_next(lps->bfa, lps->reqq);
1626 bfa_assert(m);
1628 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1629 bfa_lpuid(lps->bfa));
1631 m->lp_tag = lps->lp_tag;
1632 m->alpa = lps->alpa;
1633 m->pdu_size = cpu_to_be16(lps->pdusz);
1634 m->pwwn = lps->pwwn;
1635 m->nwwn = lps->nwwn;
1636 m->fdisc = lps->fdisc;
1637 m->auth_en = lps->auth_en;
1639 bfa_reqq_produce(lps->bfa, lps->reqq);
1643 * send logout request to firmware
1645 static void
1646 bfa_lps_send_logout(struct bfa_lps_s *lps)
1648 struct bfi_lps_logout_req_s *m;
1650 m = bfa_reqq_next(lps->bfa, lps->reqq);
1651 bfa_assert(m);
1653 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1654 bfa_lpuid(lps->bfa));
1656 m->lp_tag = lps->lp_tag;
1657 m->port_name = lps->pwwn;
1658 bfa_reqq_produce(lps->bfa, lps->reqq);
1662 * Indirect login completion handler for non-fcs
1664 static void
1665 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1667 struct bfa_lps_s *lps = arg;
1669 if (!complete)
1670 return;
1672 if (lps->fdisc)
1673 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1674 else
1675 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1679 * Login completion handler -- direct call for fcs, queue for others
1681 static void
1682 bfa_lps_login_comp(struct bfa_lps_s *lps)
1684 if (!lps->bfa->fcs) {
1685 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1686 lps);
1687 return;
1690 if (lps->fdisc)
1691 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1692 else
1693 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1697 * Indirect logout completion handler for non-fcs
1699 static void
1700 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1702 struct bfa_lps_s *lps = arg;
1704 if (!complete)
1705 return;
1707 if (lps->fdisc)
1708 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1712 * Logout completion handler -- direct call for fcs, queue for others
1714 static void
1715 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1717 if (!lps->bfa->fcs) {
1718 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1719 lps);
1720 return;
1722 if (lps->fdisc)
1723 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1727 * Clear virtual link completion handler for non-fcs
1729 static void
1730 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1732 struct bfa_lps_s *lps = arg;
1734 if (!complete)
1735 return;
1737 /* Clear virtual link to base port will result in link down */
1738 if (lps->fdisc)
1739 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1743 * Received Clear virtual link event --direct call for fcs,
1744 * queue for others
1746 static void
1747 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1749 if (!lps->bfa->fcs) {
1750 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1751 lps);
1752 return;
1755 /* Clear virtual link to base port will result in link down */
1756 if (lps->fdisc)
1757 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1763 * lps_public BFA LPS public functions
1767 bfa_lps_get_max_vport(struct bfa_s *bfa)
1769 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1770 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1771 else
1772 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1776 * Allocate a lport srvice tag.
1778 struct bfa_lps_s *
1779 bfa_lps_alloc(struct bfa_s *bfa)
1781 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1782 struct bfa_lps_s *lps = NULL;
1784 bfa_q_deq(&mod->lps_free_q, &lps);
1786 if (lps == NULL)
1787 return NULL;
1789 list_add_tail(&lps->qe, &mod->lps_active_q);
1791 bfa_sm_set_state(lps, bfa_lps_sm_init);
1792 return lps;
1796 * Free lport service tag. This can be called anytime after an alloc.
1797 * No need to wait for any pending login/logout completions.
1799 void
1800 bfa_lps_delete(struct bfa_lps_s *lps)
1802 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1806 * Initiate a lport login.
1808 void
1809 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1810 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1812 lps->uarg = uarg;
1813 lps->alpa = alpa;
1814 lps->pdusz = pdusz;
1815 lps->pwwn = pwwn;
1816 lps->nwwn = nwwn;
1817 lps->fdisc = BFA_FALSE;
1818 lps->auth_en = auth_en;
1819 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1823 * Initiate a lport fdisc login.
1825 void
1826 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1827 wwn_t nwwn)
1829 lps->uarg = uarg;
1830 lps->alpa = 0;
1831 lps->pdusz = pdusz;
1832 lps->pwwn = pwwn;
1833 lps->nwwn = nwwn;
1834 lps->fdisc = BFA_TRUE;
1835 lps->auth_en = BFA_FALSE;
1836 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1841 * Initiate a lport FDSIC logout.
1843 void
1844 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1846 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1850 * Discard a pending login request -- should be called only for
1851 * link down handling.
1853 void
1854 bfa_lps_discard(struct bfa_lps_s *lps)
1856 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1860 * Return lport services tag
1863 bfa_lps_get_tag(struct bfa_lps_s *lps)
1865 return lps->lp_tag;
1869 * Return lport services tag given the pid
1872 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1874 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1875 struct bfa_lps_s *lps;
1876 int i;
1878 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1879 if (lps->lp_pid == pid)
1880 return lps->lp_tag;
1883 /* Return base port tag anyway */
1884 return 0;
1888 * return if fabric login indicates support for NPIV
1890 bfa_boolean_t
1891 bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1893 return lps->npiv_en;
1897 * Return TRUE if attached to F-Port, else return FALSE
1899 bfa_boolean_t
1900 bfa_lps_is_fport(struct bfa_lps_s *lps)
1902 return lps->fport;
1906 * Return TRUE if attached to a Brocade Fabric
1908 bfa_boolean_t
1909 bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1911 return lps->brcd_switch;
1914 * return TRUE if authentication is required
1916 bfa_boolean_t
1917 bfa_lps_is_authreq(struct bfa_lps_s *lps)
1919 return lps->auth_req;
1922 bfa_eproto_status_t
1923 bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1925 return lps->ext_status;
1929 * return port id assigned to the lport
1932 bfa_lps_get_pid(struct bfa_lps_s *lps)
1934 return lps->lp_pid;
1938 * return port id assigned to the base lport
1941 bfa_lps_get_base_pid(struct bfa_s *bfa)
1943 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1945 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1949 * Return bb_credit assigned in FLOGI response
1952 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1954 return lps->pr_bbcred;
1958 * Return peer port name
1960 wwn_t
1961 bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1963 return lps->pr_pwwn;
1967 * Return peer node name
1969 wwn_t
1970 bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1972 return lps->pr_nwwn;
1976 * return reason code if login request is rejected
1979 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
1981 return lps->lsrjt_rsn;
1985 * return explanation code if login request is rejected
1988 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
1990 return lps->lsrjt_expl;
1994 * Return fpma/spma MAC for lport
1996 mac_t
1997 bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
1999 return lps->lp_mac;
2003 * LPS firmware message class handler.
2005 void
2006 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2008 union bfi_lps_i2h_msg_u msg;
2010 bfa_trc(bfa, m->mhdr.msg_id);
2011 msg.msg = m;
2013 switch (m->mhdr.msg_id) {
2014 case BFI_LPS_H2I_LOGIN_RSP:
2015 bfa_lps_login_rsp(bfa, msg.login_rsp);
2016 break;
2018 case BFI_LPS_H2I_LOGOUT_RSP:
2019 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2020 break;
2022 case BFI_LPS_H2I_CVL_EVENT:
2023 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2024 break;
2026 default:
2027 bfa_trc(bfa, m->mhdr.msg_id);
2028 bfa_assert(0);
2033 * FC PORT state machine functions
2035 static void
2036 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2037 enum bfa_fcport_sm_event event)
2039 bfa_trc(fcport->bfa, event);
2041 switch (event) {
2042 case BFA_FCPORT_SM_START:
2044 * Start event after IOC is configured and BFA is started.
2046 if (bfa_fcport_send_enable(fcport)) {
2047 bfa_trc(fcport->bfa, BFA_TRUE);
2048 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2049 } else {
2050 bfa_trc(fcport->bfa, BFA_FALSE);
2051 bfa_sm_set_state(fcport,
2052 bfa_fcport_sm_enabling_qwait);
2054 break;
2056 case BFA_FCPORT_SM_ENABLE:
2058 * Port is persistently configured to be in enabled state. Do
2059 * not change state. Port enabling is done when START event is
2060 * received.
2062 break;
2064 case BFA_FCPORT_SM_DISABLE:
2066 * If a port is persistently configured to be disabled, the
2067 * first event will a port disable request.
2069 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2070 break;
2072 case BFA_FCPORT_SM_HWFAIL:
2073 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2074 break;
2076 default:
2077 bfa_sm_fault(fcport->bfa, event);
2081 static void
2082 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2083 enum bfa_fcport_sm_event event)
2085 char pwwn_buf[BFA_STRING_32];
2086 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2087 bfa_trc(fcport->bfa, event);
2089 switch (event) {
2090 case BFA_FCPORT_SM_QRESUME:
2091 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2092 bfa_fcport_send_enable(fcport);
2093 break;
2095 case BFA_FCPORT_SM_STOP:
2096 bfa_reqq_wcancel(&fcport->reqq_wait);
2097 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2098 break;
2100 case BFA_FCPORT_SM_ENABLE:
2102 * Already enable is in progress.
2104 break;
2106 case BFA_FCPORT_SM_DISABLE:
2108 * Just send disable request to firmware when room becomes
2109 * available in request queue.
2111 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2112 bfa_reqq_wcancel(&fcport->reqq_wait);
2113 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2114 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2115 wwn2str(pwwn_buf, fcport->pwwn);
2116 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2117 "Base port disabled: WWN = %s\n", pwwn_buf);
2118 break;
2120 case BFA_FCPORT_SM_LINKUP:
2121 case BFA_FCPORT_SM_LINKDOWN:
2123 * Possible to get link events when doing back-to-back
2124 * enable/disables.
2126 break;
2128 case BFA_FCPORT_SM_HWFAIL:
2129 bfa_reqq_wcancel(&fcport->reqq_wait);
2130 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2131 break;
2133 default:
2134 bfa_sm_fault(fcport->bfa, event);
2138 static void
2139 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2140 enum bfa_fcport_sm_event event)
2142 char pwwn_buf[BFA_STRING_32];
2143 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2144 bfa_trc(fcport->bfa, event);
2146 switch (event) {
2147 case BFA_FCPORT_SM_FWRSP:
2148 case BFA_FCPORT_SM_LINKDOWN:
2149 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2150 break;
2152 case BFA_FCPORT_SM_LINKUP:
2153 bfa_fcport_update_linkinfo(fcport);
2154 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2156 bfa_assert(fcport->event_cbfn);
2157 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2158 break;
2160 case BFA_FCPORT_SM_ENABLE:
2162 * Already being enabled.
2164 break;
2166 case BFA_FCPORT_SM_DISABLE:
2167 if (bfa_fcport_send_disable(fcport))
2168 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2169 else
2170 bfa_sm_set_state(fcport,
2171 bfa_fcport_sm_disabling_qwait);
2173 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2174 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2175 wwn2str(pwwn_buf, fcport->pwwn);
2176 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2177 "Base port disabled: WWN = %s\n", pwwn_buf);
2178 break;
2180 case BFA_FCPORT_SM_STOP:
2181 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2182 break;
2184 case BFA_FCPORT_SM_HWFAIL:
2185 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2186 break;
2188 default:
2189 bfa_sm_fault(fcport->bfa, event);
2193 static void
2194 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2195 enum bfa_fcport_sm_event event)
2197 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2198 char pwwn_buf[BFA_STRING_32];
2199 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2201 bfa_trc(fcport->bfa, event);
2203 switch (event) {
2204 case BFA_FCPORT_SM_LINKUP:
2205 bfa_fcport_update_linkinfo(fcport);
2206 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2207 bfa_assert(fcport->event_cbfn);
2208 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2209 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2210 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2212 bfa_trc(fcport->bfa,
2213 pevent->link_state.vc_fcf.fcf.fipenabled);
2214 bfa_trc(fcport->bfa,
2215 pevent->link_state.vc_fcf.fcf.fipfailed);
2217 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2218 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2219 BFA_PL_EID_FIP_FCF_DISC, 0,
2220 "FIP FCF Discovery Failed");
2221 else
2222 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2223 BFA_PL_EID_FIP_FCF_DISC, 0,
2224 "FIP FCF Discovered");
2227 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2228 wwn2str(pwwn_buf, fcport->pwwn);
2229 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2230 "Base port online: WWN = %s\n", pwwn_buf);
2231 break;
2233 case BFA_FCPORT_SM_LINKDOWN:
2235 * Possible to get link down event.
2237 break;
2239 case BFA_FCPORT_SM_ENABLE:
2241 * Already enabled.
2243 break;
2245 case BFA_FCPORT_SM_DISABLE:
2246 if (bfa_fcport_send_disable(fcport))
2247 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2248 else
2249 bfa_sm_set_state(fcport,
2250 bfa_fcport_sm_disabling_qwait);
2252 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2253 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2254 wwn2str(pwwn_buf, fcport->pwwn);
2255 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2256 "Base port disabled: WWN = %s\n", pwwn_buf);
2257 break;
2259 case BFA_FCPORT_SM_STOP:
2260 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2261 break;
2263 case BFA_FCPORT_SM_HWFAIL:
2264 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2265 break;
2267 default:
2268 bfa_sm_fault(fcport->bfa, event);
2272 static void
2273 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2274 enum bfa_fcport_sm_event event)
2276 char pwwn_buf[BFA_STRING_32];
2277 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2279 bfa_trc(fcport->bfa, event);
2281 switch (event) {
2282 case BFA_FCPORT_SM_ENABLE:
2284 * Already enabled.
2286 break;
2288 case BFA_FCPORT_SM_DISABLE:
2289 if (bfa_fcport_send_disable(fcport))
2290 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2291 else
2292 bfa_sm_set_state(fcport,
2293 bfa_fcport_sm_disabling_qwait);
2295 bfa_fcport_reset_linkinfo(fcport);
2296 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2297 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2298 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2299 wwn2str(pwwn_buf, fcport->pwwn);
2300 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2301 "Base port offline: WWN = %s\n", pwwn_buf);
2302 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2303 "Base port disabled: WWN = %s\n", pwwn_buf);
2304 break;
2306 case BFA_FCPORT_SM_LINKDOWN:
2307 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2308 bfa_fcport_reset_linkinfo(fcport);
2309 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2310 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2311 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2312 wwn2str(pwwn_buf, fcport->pwwn);
2313 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2314 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2315 "Base port offline: WWN = %s\n", pwwn_buf);
2316 else
2317 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2318 "Base port (WWN = %s) "
2319 "lost fabric connectivity\n", pwwn_buf);
2320 break;
2322 case BFA_FCPORT_SM_STOP:
2323 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2324 bfa_fcport_reset_linkinfo(fcport);
2325 wwn2str(pwwn_buf, fcport->pwwn);
2326 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2327 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2328 "Base port offline: WWN = %s\n", pwwn_buf);
2329 else
2330 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2331 "Base port (WWN = %s) "
2332 "lost fabric connectivity\n", pwwn_buf);
2333 break;
2335 case BFA_FCPORT_SM_HWFAIL:
2336 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2337 bfa_fcport_reset_linkinfo(fcport);
2338 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2339 wwn2str(pwwn_buf, fcport->pwwn);
2340 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2341 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2342 "Base port offline: WWN = %s\n", pwwn_buf);
2343 else
2344 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2345 "Base port (WWN = %s) "
2346 "lost fabric connectivity\n", pwwn_buf);
2347 break;
2349 default:
2350 bfa_sm_fault(fcport->bfa, event);
2354 static void
2355 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2356 enum bfa_fcport_sm_event event)
2358 bfa_trc(fcport->bfa, event);
2360 switch (event) {
2361 case BFA_FCPORT_SM_QRESUME:
2362 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2363 bfa_fcport_send_disable(fcport);
2364 break;
2366 case BFA_FCPORT_SM_STOP:
2367 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2368 bfa_reqq_wcancel(&fcport->reqq_wait);
2369 break;
2371 case BFA_FCPORT_SM_ENABLE:
2372 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2373 break;
2375 case BFA_FCPORT_SM_DISABLE:
2377 * Already being disabled.
2379 break;
2381 case BFA_FCPORT_SM_LINKUP:
2382 case BFA_FCPORT_SM_LINKDOWN:
2384 * Possible to get link events when doing back-to-back
2385 * enable/disables.
2387 break;
2389 case BFA_FCPORT_SM_HWFAIL:
2390 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2391 bfa_reqq_wcancel(&fcport->reqq_wait);
2392 break;
2394 default:
2395 bfa_sm_fault(fcport->bfa, event);
2399 static void
2400 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2401 enum bfa_fcport_sm_event event)
2403 bfa_trc(fcport->bfa, event);
2405 switch (event) {
2406 case BFA_FCPORT_SM_QRESUME:
2407 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2408 bfa_fcport_send_disable(fcport);
2409 if (bfa_fcport_send_enable(fcport))
2410 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2411 else
2412 bfa_sm_set_state(fcport,
2413 bfa_fcport_sm_enabling_qwait);
2414 break;
2416 case BFA_FCPORT_SM_STOP:
2417 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2418 bfa_reqq_wcancel(&fcport->reqq_wait);
2419 break;
2421 case BFA_FCPORT_SM_ENABLE:
2422 break;
2424 case BFA_FCPORT_SM_DISABLE:
2425 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2426 break;
2428 case BFA_FCPORT_SM_LINKUP:
2429 case BFA_FCPORT_SM_LINKDOWN:
2431 * Possible to get link events when doing back-to-back
2432 * enable/disables.
2434 break;
2436 case BFA_FCPORT_SM_HWFAIL:
2437 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2438 bfa_reqq_wcancel(&fcport->reqq_wait);
2439 break;
2441 default:
2442 bfa_sm_fault(fcport->bfa, event);
2446 static void
2447 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2448 enum bfa_fcport_sm_event event)
2450 char pwwn_buf[BFA_STRING_32];
2451 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2452 bfa_trc(fcport->bfa, event);
2454 switch (event) {
2455 case BFA_FCPORT_SM_FWRSP:
2456 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2457 break;
2459 case BFA_FCPORT_SM_DISABLE:
2461 * Already being disabled.
2463 break;
2465 case BFA_FCPORT_SM_ENABLE:
2466 if (bfa_fcport_send_enable(fcport))
2467 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2468 else
2469 bfa_sm_set_state(fcport,
2470 bfa_fcport_sm_enabling_qwait);
2472 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2473 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2474 wwn2str(pwwn_buf, fcport->pwwn);
2475 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2476 "Base port enabled: WWN = %s\n", pwwn_buf);
2477 break;
2479 case BFA_FCPORT_SM_STOP:
2480 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2481 break;
2483 case BFA_FCPORT_SM_LINKUP:
2484 case BFA_FCPORT_SM_LINKDOWN:
2486 * Possible to get link events when doing back-to-back
2487 * enable/disables.
2489 break;
2491 case BFA_FCPORT_SM_HWFAIL:
2492 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2493 break;
2495 default:
2496 bfa_sm_fault(fcport->bfa, event);
2500 static void
2501 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2502 enum bfa_fcport_sm_event event)
2504 char pwwn_buf[BFA_STRING_32];
2505 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2506 bfa_trc(fcport->bfa, event);
2508 switch (event) {
2509 case BFA_FCPORT_SM_START:
2511 * Ignore start event for a port that is disabled.
2513 break;
2515 case BFA_FCPORT_SM_STOP:
2516 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2517 break;
2519 case BFA_FCPORT_SM_ENABLE:
2520 if (bfa_fcport_send_enable(fcport))
2521 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2522 else
2523 bfa_sm_set_state(fcport,
2524 bfa_fcport_sm_enabling_qwait);
2526 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2527 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2528 wwn2str(pwwn_buf, fcport->pwwn);
2529 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2530 "Base port enabled: WWN = %s\n", pwwn_buf);
2531 break;
2533 case BFA_FCPORT_SM_DISABLE:
2535 * Already disabled.
2537 break;
2539 case BFA_FCPORT_SM_HWFAIL:
2540 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2541 break;
2543 default:
2544 bfa_sm_fault(fcport->bfa, event);
2548 static void
2549 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2550 enum bfa_fcport_sm_event event)
2552 bfa_trc(fcport->bfa, event);
2554 switch (event) {
2555 case BFA_FCPORT_SM_START:
2556 if (bfa_fcport_send_enable(fcport))
2557 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2558 else
2559 bfa_sm_set_state(fcport,
2560 bfa_fcport_sm_enabling_qwait);
2561 break;
2563 default:
2565 * Ignore all other events.
2572 * Port is enabled. IOC is down/failed.
2574 static void
2575 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2576 enum bfa_fcport_sm_event event)
2578 bfa_trc(fcport->bfa, event);
2580 switch (event) {
2581 case BFA_FCPORT_SM_START:
2582 if (bfa_fcport_send_enable(fcport))
2583 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2584 else
2585 bfa_sm_set_state(fcport,
2586 bfa_fcport_sm_enabling_qwait);
2587 break;
2589 default:
2591 * Ignore all events.
2598 * Port is disabled. IOC is down/failed.
2600 static void
2601 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2602 enum bfa_fcport_sm_event event)
2604 bfa_trc(fcport->bfa, event);
2606 switch (event) {
2607 case BFA_FCPORT_SM_START:
2608 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2609 break;
2611 case BFA_FCPORT_SM_ENABLE:
2612 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2613 break;
2615 default:
2617 * Ignore all events.
2624 * Link state is down
2626 static void
2627 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2628 enum bfa_fcport_ln_sm_event event)
2630 bfa_trc(ln->fcport->bfa, event);
2632 switch (event) {
2633 case BFA_FCPORT_LN_SM_LINKUP:
2634 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2635 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2636 break;
2638 default:
2639 bfa_sm_fault(ln->fcport->bfa, event);
2644 * Link state is waiting for down notification
2646 static void
2647 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2648 enum bfa_fcport_ln_sm_event event)
2650 bfa_trc(ln->fcport->bfa, event);
2652 switch (event) {
2653 case BFA_FCPORT_LN_SM_LINKUP:
2654 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2655 break;
2657 case BFA_FCPORT_LN_SM_NOTIFICATION:
2658 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2659 break;
2661 default:
2662 bfa_sm_fault(ln->fcport->bfa, event);
2667 * Link state is waiting for down notification and there is a pending up
2669 static void
2670 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2671 enum bfa_fcport_ln_sm_event event)
2673 bfa_trc(ln->fcport->bfa, event);
2675 switch (event) {
2676 case BFA_FCPORT_LN_SM_LINKDOWN:
2677 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2678 break;
2680 case BFA_FCPORT_LN_SM_NOTIFICATION:
2681 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2682 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2683 break;
2685 default:
2686 bfa_sm_fault(ln->fcport->bfa, event);
2691 * Link state is up
2693 static void
2694 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2695 enum bfa_fcport_ln_sm_event event)
2697 bfa_trc(ln->fcport->bfa, event);
2699 switch (event) {
2700 case BFA_FCPORT_LN_SM_LINKDOWN:
2701 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2702 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2703 break;
2705 default:
2706 bfa_sm_fault(ln->fcport->bfa, event);
2711 * Link state is waiting for up notification
2713 static void
2714 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2715 enum bfa_fcport_ln_sm_event event)
2717 bfa_trc(ln->fcport->bfa, event);
2719 switch (event) {
2720 case BFA_FCPORT_LN_SM_LINKDOWN:
2721 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2722 break;
2724 case BFA_FCPORT_LN_SM_NOTIFICATION:
2725 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2726 break;
2728 default:
2729 bfa_sm_fault(ln->fcport->bfa, event);
2734 * Link state is waiting for up notification and there is a pending down
2736 static void
2737 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2738 enum bfa_fcport_ln_sm_event event)
2740 bfa_trc(ln->fcport->bfa, event);
2742 switch (event) {
2743 case BFA_FCPORT_LN_SM_LINKUP:
2744 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2745 break;
2747 case BFA_FCPORT_LN_SM_NOTIFICATION:
2748 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2749 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2750 break;
2752 default:
2753 bfa_sm_fault(ln->fcport->bfa, event);
2758 * Link state is waiting for up notification and there are pending down and up
2760 static void
2761 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2762 enum bfa_fcport_ln_sm_event event)
2764 bfa_trc(ln->fcport->bfa, event);
2766 switch (event) {
2767 case BFA_FCPORT_LN_SM_LINKDOWN:
2768 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2769 break;
2771 case BFA_FCPORT_LN_SM_NOTIFICATION:
2772 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2773 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2774 break;
2776 default:
2777 bfa_sm_fault(ln->fcport->bfa, event);
2784 * hal_port_private
2787 static void
2788 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2790 struct bfa_fcport_ln_s *ln = cbarg;
2792 if (complete)
2793 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2794 else
2795 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2799 * Send SCN notification to upper layers.
2800 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2802 static void
2803 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2804 bfa_boolean_t trunk)
2806 if (fcport->cfg.trunked && !trunk)
2807 return;
2809 switch (event) {
2810 case BFA_PORT_LINKUP:
2811 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2812 break;
2813 case BFA_PORT_LINKDOWN:
2814 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2815 break;
2816 default:
2817 bfa_assert(0);
2821 static void
2822 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2824 struct bfa_fcport_s *fcport = ln->fcport;
2826 if (fcport->bfa->fcs) {
2827 fcport->event_cbfn(fcport->event_cbarg, event);
2828 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2829 } else {
2830 ln->ln_event = event;
2831 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2832 __bfa_cb_fcport_event, ln);
2836 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2837 BFA_CACHELINE_SZ))
2839 static void
2840 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2841 u32 *dm_len)
2843 *dm_len += FCPORT_STATS_DMA_SZ;
2846 static void
2847 bfa_fcport_qresume(void *cbarg)
2849 struct bfa_fcport_s *fcport = cbarg;
2851 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2854 static void
2855 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2857 u8 *dm_kva;
2858 u64 dm_pa;
2860 dm_kva = bfa_meminfo_dma_virt(meminfo);
2861 dm_pa = bfa_meminfo_dma_phys(meminfo);
2863 fcport->stats_kva = dm_kva;
2864 fcport->stats_pa = dm_pa;
2865 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2867 dm_kva += FCPORT_STATS_DMA_SZ;
2868 dm_pa += FCPORT_STATS_DMA_SZ;
2870 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2871 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2875 * Memory initialization.
2877 static void
2878 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2879 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2881 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2882 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2883 struct bfa_fcport_ln_s *ln = &fcport->ln;
2884 struct bfa_timeval_s tv;
2886 memset(fcport, 0, sizeof(struct bfa_fcport_s));
2887 fcport->bfa = bfa;
2888 ln->fcport = fcport;
2890 bfa_fcport_mem_claim(fcport, meminfo);
2892 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2893 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2896 * initialize time stamp for stats reset
2898 bfa_os_gettimeofday(&tv);
2899 fcport->stats_reset_time = tv.tv_sec;
2902 * initialize and set default configuration
2904 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2905 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2906 port_cfg->trunked = BFA_FALSE;
2907 port_cfg->maxfrsize = 0;
2909 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2911 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2914 static void
2915 bfa_fcport_detach(struct bfa_s *bfa)
2920 * Called when IOC is ready.
2922 static void
2923 bfa_fcport_start(struct bfa_s *bfa)
2925 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2929 * Called before IOC is stopped.
2931 static void
2932 bfa_fcport_stop(struct bfa_s *bfa)
2934 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2935 bfa_trunk_iocdisable(bfa);
2939 * Called when IOC failure is detected.
2941 static void
2942 bfa_fcport_iocdisable(struct bfa_s *bfa)
2944 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2946 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2947 bfa_trunk_iocdisable(bfa);
2950 static void
2951 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2953 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2954 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2956 fcport->speed = pevent->link_state.speed;
2957 fcport->topology = pevent->link_state.topology;
2959 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2960 fcport->myalpa = 0;
2962 /* QoS Details */
2963 fcport->qos_attr = pevent->link_state.qos_attr;
2964 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2967 * update trunk state if applicable
2969 if (!fcport->cfg.trunked)
2970 trunk->attr.state = BFA_TRUNK_DISABLED;
2972 /* update FCoE specific */
2973 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2975 bfa_trc(fcport->bfa, fcport->speed);
2976 bfa_trc(fcport->bfa, fcport->topology);
2979 static void
2980 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2982 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2983 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2987 * Send port enable message to firmware.
2989 static bfa_boolean_t
2990 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2992 struct bfi_fcport_enable_req_s *m;
2995 * Increment message tag before queue check, so that responses to old
2996 * requests are discarded.
2998 fcport->msgtag++;
3001 * check for room in queue to send request now
3003 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3004 if (!m) {
3005 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3006 &fcport->reqq_wait);
3007 return BFA_FALSE;
3010 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3011 bfa_lpuid(fcport->bfa));
3012 m->nwwn = fcport->nwwn;
3013 m->pwwn = fcport->pwwn;
3014 m->port_cfg = fcport->cfg;
3015 m->msgtag = fcport->msgtag;
3016 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3017 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3018 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3019 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3022 * queue I/O message to firmware
3024 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3025 return BFA_TRUE;
3029 * Send port disable message to firmware.
3031 static bfa_boolean_t
3032 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3034 struct bfi_fcport_req_s *m;
3037 * Increment message tag before queue check, so that responses to old
3038 * requests are discarded.
3040 fcport->msgtag++;
3043 * check for room in queue to send request now
3045 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3046 if (!m) {
3047 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3048 &fcport->reqq_wait);
3049 return BFA_FALSE;
3052 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3053 bfa_lpuid(fcport->bfa));
3054 m->msgtag = fcport->msgtag;
3057 * queue I/O message to firmware
3059 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3061 return BFA_TRUE;
3064 static void
3065 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3067 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
3068 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
3070 bfa_trc(fcport->bfa, fcport->pwwn);
3071 bfa_trc(fcport->bfa, fcport->nwwn);
3074 static void
3075 bfa_fcport_send_txcredit(void *port_cbarg)
3078 struct bfa_fcport_s *fcport = port_cbarg;
3079 struct bfi_fcport_set_svc_params_req_s *m;
3082 * check for room in queue to send request now
3084 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3085 if (!m) {
3086 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3087 return;
3090 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3091 bfa_lpuid(fcport->bfa));
3092 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3095 * queue I/O message to firmware
3097 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3100 static void
3101 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3102 struct bfa_qos_stats_s *s)
3104 u32 *dip = (u32 *) d;
3105 __be32 *sip = (__be32 *) s;
3106 int i;
3108 /* Now swap the 32 bit fields */
3109 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3110 dip[i] = be32_to_cpu(sip[i]);
3113 static void
3114 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3115 struct bfa_fcoe_stats_s *s)
3117 u32 *dip = (u32 *) d;
3118 __be32 *sip = (__be32 *) s;
3119 int i;
3121 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3122 i = i + 2) {
3123 #ifdef __BIGENDIAN
3124 dip[i] = be32_to_cpu(sip[i]);
3125 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3126 #else
3127 dip[i] = be32_to_cpu(sip[i + 1]);
3128 dip[i + 1] = be32_to_cpu(sip[i]);
3129 #endif
3133 static void
3134 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3136 struct bfa_fcport_s *fcport = cbarg;
3138 if (complete) {
3139 if (fcport->stats_status == BFA_STATUS_OK) {
3140 struct bfa_timeval_s tv;
3142 /* Swap FC QoS or FCoE stats */
3143 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3144 bfa_fcport_qos_stats_swap(
3145 &fcport->stats_ret->fcqos,
3146 &fcport->stats->fcqos);
3147 } else {
3148 bfa_fcport_fcoe_stats_swap(
3149 &fcport->stats_ret->fcoe,
3150 &fcport->stats->fcoe);
3152 bfa_os_gettimeofday(&tv);
3153 fcport->stats_ret->fcoe.secs_reset =
3154 tv.tv_sec - fcport->stats_reset_time;
3157 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3158 } else {
3159 fcport->stats_busy = BFA_FALSE;
3160 fcport->stats_status = BFA_STATUS_OK;
3164 static void
3165 bfa_fcport_stats_get_timeout(void *cbarg)
3167 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3169 bfa_trc(fcport->bfa, fcport->stats_qfull);
3171 if (fcport->stats_qfull) {
3172 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3173 fcport->stats_qfull = BFA_FALSE;
3176 fcport->stats_status = BFA_STATUS_ETIMER;
3177 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3178 fcport);
3181 static void
3182 bfa_fcport_send_stats_get(void *cbarg)
3184 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3185 struct bfi_fcport_req_s *msg;
3187 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3189 if (!msg) {
3190 fcport->stats_qfull = BFA_TRUE;
3191 bfa_reqq_winit(&fcport->stats_reqq_wait,
3192 bfa_fcport_send_stats_get, fcport);
3193 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3194 &fcport->stats_reqq_wait);
3195 return;
3197 fcport->stats_qfull = BFA_FALSE;
3199 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3200 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3201 bfa_lpuid(fcport->bfa));
3202 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3205 static void
3206 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3208 struct bfa_fcport_s *fcport = cbarg;
3210 if (complete) {
3211 struct bfa_timeval_s tv;
3214 * re-initialize time stamp for stats reset
3216 bfa_os_gettimeofday(&tv);
3217 fcport->stats_reset_time = tv.tv_sec;
3219 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3220 } else {
3221 fcport->stats_busy = BFA_FALSE;
3222 fcport->stats_status = BFA_STATUS_OK;
3226 static void
3227 bfa_fcport_stats_clr_timeout(void *cbarg)
3229 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3231 bfa_trc(fcport->bfa, fcport->stats_qfull);
3233 if (fcport->stats_qfull) {
3234 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3235 fcport->stats_qfull = BFA_FALSE;
3238 fcport->stats_status = BFA_STATUS_ETIMER;
3239 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3240 __bfa_cb_fcport_stats_clr, fcport);
3243 static void
3244 bfa_fcport_send_stats_clear(void *cbarg)
3246 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3247 struct bfi_fcport_req_s *msg;
3249 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3251 if (!msg) {
3252 fcport->stats_qfull = BFA_TRUE;
3253 bfa_reqq_winit(&fcport->stats_reqq_wait,
3254 bfa_fcport_send_stats_clear, fcport);
3255 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3256 &fcport->stats_reqq_wait);
3257 return;
3259 fcport->stats_qfull = BFA_FALSE;
3261 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3262 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3263 bfa_lpuid(fcport->bfa));
3264 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3268 * Handle trunk SCN event from firmware.
3270 static void
3271 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3273 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3274 struct bfi_fcport_trunk_link_s *tlink;
3275 struct bfa_trunk_link_attr_s *lattr;
3276 enum bfa_trunk_state state_prev;
3277 int i;
3278 int link_bm = 0;
3280 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3281 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3282 scn->trunk_state == BFA_TRUNK_OFFLINE);
3284 bfa_trc(fcport->bfa, trunk->attr.state);
3285 bfa_trc(fcport->bfa, scn->trunk_state);
3286 bfa_trc(fcport->bfa, scn->trunk_speed);
3289 * Save off new state for trunk attribute query
3291 state_prev = trunk->attr.state;
3292 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3293 trunk->attr.state = scn->trunk_state;
3294 trunk->attr.speed = scn->trunk_speed;
3295 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3296 lattr = &trunk->attr.link_attr[i];
3297 tlink = &scn->tlink[i];
3299 lattr->link_state = tlink->state;
3300 lattr->trunk_wwn = tlink->trunk_wwn;
3301 lattr->fctl = tlink->fctl;
3302 lattr->speed = tlink->speed;
3303 lattr->deskew = be32_to_cpu(tlink->deskew);
3305 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3306 fcport->speed = tlink->speed;
3307 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3308 link_bm |= 1 << i;
3311 bfa_trc(fcport->bfa, lattr->link_state);
3312 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3313 bfa_trc(fcport->bfa, lattr->fctl);
3314 bfa_trc(fcport->bfa, lattr->speed);
3315 bfa_trc(fcport->bfa, lattr->deskew);
3318 switch (link_bm) {
3319 case 3:
3320 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3321 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3322 break;
3323 case 2:
3324 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3325 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3326 break;
3327 case 1:
3328 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3329 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3330 break;
3331 default:
3332 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3333 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3337 * Notify upper layers if trunk state changed.
3339 if ((state_prev != trunk->attr.state) ||
3340 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3341 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3342 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3346 static void
3347 bfa_trunk_iocdisable(struct bfa_s *bfa)
3349 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3350 int i = 0;
3353 * In trunked mode, notify upper layers that link is down
3355 if (fcport->cfg.trunked) {
3356 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3357 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3359 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3360 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3361 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3362 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3363 fcport->trunk.attr.link_attr[i].fctl =
3364 BFA_TRUNK_LINK_FCTL_NORMAL;
3365 fcport->trunk.attr.link_attr[i].link_state =
3366 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3367 fcport->trunk.attr.link_attr[i].speed =
3368 BFA_PORT_SPEED_UNKNOWN;
3369 fcport->trunk.attr.link_attr[i].deskew = 0;
3377 * hal_port_public
3381 * Called to initialize port attributes
3383 void
3384 bfa_fcport_init(struct bfa_s *bfa)
3386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3389 * Initialize port attributes from IOC hardware data.
3391 bfa_fcport_set_wwns(fcport);
3392 if (fcport->cfg.maxfrsize == 0)
3393 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3394 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3395 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3397 bfa_assert(fcport->cfg.maxfrsize);
3398 bfa_assert(fcport->cfg.rx_bbcredit);
3399 bfa_assert(fcport->speed_sup);
3403 * Firmware message handler.
3405 void
3406 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3408 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3409 union bfi_fcport_i2h_msg_u i2hmsg;
3411 i2hmsg.msg = msg;
3412 fcport->event_arg.i2hmsg = i2hmsg;
3414 bfa_trc(bfa, msg->mhdr.msg_id);
3415 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3417 switch (msg->mhdr.msg_id) {
3418 case BFI_FCPORT_I2H_ENABLE_RSP:
3419 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3420 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3421 break;
3423 case BFI_FCPORT_I2H_DISABLE_RSP:
3424 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3425 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3426 break;
3428 case BFI_FCPORT_I2H_EVENT:
3429 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3430 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3431 else
3432 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3433 break;
3435 case BFI_FCPORT_I2H_TRUNK_SCN:
3436 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3437 break;
3439 case BFI_FCPORT_I2H_STATS_GET_RSP:
3441 * check for timer pop before processing the rsp
3443 if (fcport->stats_busy == BFA_FALSE ||
3444 fcport->stats_status == BFA_STATUS_ETIMER)
3445 break;
3447 bfa_timer_stop(&fcport->timer);
3448 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3449 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3450 __bfa_cb_fcport_stats_get, fcport);
3451 break;
3453 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3455 * check for timer pop before processing the rsp
3457 if (fcport->stats_busy == BFA_FALSE ||
3458 fcport->stats_status == BFA_STATUS_ETIMER)
3459 break;
3461 bfa_timer_stop(&fcport->timer);
3462 fcport->stats_status = BFA_STATUS_OK;
3463 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3464 __bfa_cb_fcport_stats_clr, fcport);
3465 break;
3467 case BFI_FCPORT_I2H_ENABLE_AEN:
3468 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3469 break;
3471 case BFI_FCPORT_I2H_DISABLE_AEN:
3472 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3473 break;
3475 default:
3476 bfa_assert(0);
3477 break;
3484 * hal_port_api
3488 * Registered callback for port events.
3490 void
3491 bfa_fcport_event_register(struct bfa_s *bfa,
3492 void (*cbfn) (void *cbarg,
3493 enum bfa_port_linkstate event),
3494 void *cbarg)
3496 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3498 fcport->event_cbfn = cbfn;
3499 fcport->event_cbarg = cbarg;
3502 bfa_status_t
3503 bfa_fcport_enable(struct bfa_s *bfa)
3505 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3507 if (bfa_ioc_is_disabled(&bfa->ioc))
3508 return BFA_STATUS_IOC_DISABLED;
3510 if (fcport->diag_busy)
3511 return BFA_STATUS_DIAG_BUSY;
3513 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3514 return BFA_STATUS_OK;
3517 bfa_status_t
3518 bfa_fcport_disable(struct bfa_s *bfa)
3521 if (bfa_ioc_is_disabled(&bfa->ioc))
3522 return BFA_STATUS_IOC_DISABLED;
3524 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3525 return BFA_STATUS_OK;
3529 * Configure port speed.
3531 bfa_status_t
3532 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3534 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3536 bfa_trc(bfa, speed);
3538 if (fcport->cfg.trunked == BFA_TRUE)
3539 return BFA_STATUS_TRUNK_ENABLED;
3540 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3541 bfa_trc(bfa, fcport->speed_sup);
3542 return BFA_STATUS_UNSUPP_SPEED;
3545 fcport->cfg.speed = speed;
3547 return BFA_STATUS_OK;
3551 * Get current speed.
3553 enum bfa_port_speed
3554 bfa_fcport_get_speed(struct bfa_s *bfa)
3556 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3558 return fcport->speed;
3562 * Configure port topology.
3564 bfa_status_t
3565 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3567 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3569 bfa_trc(bfa, topology);
3570 bfa_trc(bfa, fcport->cfg.topology);
3572 switch (topology) {
3573 case BFA_PORT_TOPOLOGY_P2P:
3574 case BFA_PORT_TOPOLOGY_LOOP:
3575 case BFA_PORT_TOPOLOGY_AUTO:
3576 break;
3578 default:
3579 return BFA_STATUS_EINVAL;
3582 fcport->cfg.topology = topology;
3583 return BFA_STATUS_OK;
3587 * Get current topology.
3589 enum bfa_port_topology
3590 bfa_fcport_get_topology(struct bfa_s *bfa)
3592 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3594 return fcport->topology;
3597 bfa_status_t
3598 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3600 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3602 bfa_trc(bfa, alpa);
3603 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3604 bfa_trc(bfa, fcport->cfg.hardalpa);
3606 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3607 fcport->cfg.hardalpa = alpa;
3609 return BFA_STATUS_OK;
3612 bfa_status_t
3613 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3615 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3617 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3618 bfa_trc(bfa, fcport->cfg.hardalpa);
3620 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3621 return BFA_STATUS_OK;
3624 bfa_boolean_t
3625 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3627 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3629 *alpa = fcport->cfg.hardalpa;
3630 return fcport->cfg.cfg_hardalpa;
3634 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3636 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3638 return fcport->myalpa;
3641 bfa_status_t
3642 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3644 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3646 bfa_trc(bfa, maxfrsize);
3647 bfa_trc(bfa, fcport->cfg.maxfrsize);
3649 /* with in range */
3650 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3651 return BFA_STATUS_INVLD_DFSZ;
3653 /* power of 2, if not the max frame size of 2112 */
3654 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3655 return BFA_STATUS_INVLD_DFSZ;
3657 fcport->cfg.maxfrsize = maxfrsize;
3658 return BFA_STATUS_OK;
3662 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3664 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3666 return fcport->cfg.maxfrsize;
3670 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3672 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3674 return fcport->cfg.rx_bbcredit;
3677 void
3678 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3680 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3682 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3683 bfa_fcport_send_txcredit(fcport);
3687 * Get port attributes.
3690 wwn_t
3691 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3693 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3694 if (node)
3695 return fcport->nwwn;
3696 else
3697 return fcport->pwwn;
3700 void
3701 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3703 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3705 memset(attr, 0, sizeof(struct bfa_port_attr_s));
3707 attr->nwwn = fcport->nwwn;
3708 attr->pwwn = fcport->pwwn;
3710 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
3711 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
3713 memcpy(&attr->pport_cfg, &fcport->cfg,
3714 sizeof(struct bfa_port_cfg_s));
3715 /* speed attributes */
3716 attr->pport_cfg.speed = fcport->cfg.speed;
3717 attr->speed_supported = fcport->speed_sup;
3718 attr->speed = fcport->speed;
3719 attr->cos_supported = FC_CLASS_3;
3721 /* topology attributes */
3722 attr->pport_cfg.topology = fcport->cfg.topology;
3723 attr->topology = fcport->topology;
3724 attr->pport_cfg.trunked = fcport->cfg.trunked;
3726 /* beacon attributes */
3727 attr->beacon = fcport->beacon;
3728 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3729 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
3730 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3732 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3733 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3734 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3735 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3736 attr->port_state = BFA_PORT_ST_IOCDIS;
3737 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3738 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3740 /* FCoE vlan */
3741 attr->fcoe_vlan = fcport->fcoe_vlan;
3744 #define BFA_FCPORT_STATS_TOV 1000
3747 * Fetch port statistics (FCQoS or FCoE).
3749 bfa_status_t
3750 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3751 bfa_cb_port_t cbfn, void *cbarg)
3753 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3755 if (fcport->stats_busy) {
3756 bfa_trc(bfa, fcport->stats_busy);
3757 return BFA_STATUS_DEVBUSY;
3760 fcport->stats_busy = BFA_TRUE;
3761 fcport->stats_ret = stats;
3762 fcport->stats_cbfn = cbfn;
3763 fcport->stats_cbarg = cbarg;
3765 bfa_fcport_send_stats_get(fcport);
3767 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3768 fcport, BFA_FCPORT_STATS_TOV);
3769 return BFA_STATUS_OK;
3773 * Reset port statistics (FCQoS or FCoE).
3775 bfa_status_t
3776 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3778 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3780 if (fcport->stats_busy) {
3781 bfa_trc(bfa, fcport->stats_busy);
3782 return BFA_STATUS_DEVBUSY;
3785 fcport->stats_busy = BFA_TRUE;
3786 fcport->stats_cbfn = cbfn;
3787 fcport->stats_cbarg = cbarg;
3789 bfa_fcport_send_stats_clear(fcport);
3791 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3792 fcport, BFA_FCPORT_STATS_TOV);
3793 return BFA_STATUS_OK;
3798 * Fetch port attributes.
3800 bfa_boolean_t
3801 bfa_fcport_is_disabled(struct bfa_s *bfa)
3803 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3805 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3806 BFA_PORT_ST_DISABLED;
3810 bfa_boolean_t
3811 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3813 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3815 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3821 * Get default minimum ratelim speed
3823 enum bfa_port_speed
3824 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3826 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3828 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3829 return fcport->cfg.trl_def_speed;
3833 bfa_boolean_t
3834 bfa_fcport_is_linkup(struct bfa_s *bfa)
3836 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3838 return (!fcport->cfg.trunked &&
3839 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3840 (fcport->cfg.trunked &&
3841 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3844 bfa_boolean_t
3845 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3847 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3849 return fcport->cfg.qos_enabled;
3853 * Rport State machine functions
3856 * Beginning state, only online event expected.
3858 static void
3859 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3861 bfa_trc(rp->bfa, rp->rport_tag);
3862 bfa_trc(rp->bfa, event);
3864 switch (event) {
3865 case BFA_RPORT_SM_CREATE:
3866 bfa_stats(rp, sm_un_cr);
3867 bfa_sm_set_state(rp, bfa_rport_sm_created);
3868 break;
3870 default:
3871 bfa_stats(rp, sm_un_unexp);
3872 bfa_sm_fault(rp->bfa, event);
3876 static void
3877 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3879 bfa_trc(rp->bfa, rp->rport_tag);
3880 bfa_trc(rp->bfa, event);
3882 switch (event) {
3883 case BFA_RPORT_SM_ONLINE:
3884 bfa_stats(rp, sm_cr_on);
3885 if (bfa_rport_send_fwcreate(rp))
3886 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3887 else
3888 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3889 break;
3891 case BFA_RPORT_SM_DELETE:
3892 bfa_stats(rp, sm_cr_del);
3893 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3894 bfa_rport_free(rp);
3895 break;
3897 case BFA_RPORT_SM_HWFAIL:
3898 bfa_stats(rp, sm_cr_hwf);
3899 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3900 break;
3902 default:
3903 bfa_stats(rp, sm_cr_unexp);
3904 bfa_sm_fault(rp->bfa, event);
3909 * Waiting for rport create response from firmware.
3911 static void
3912 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3914 bfa_trc(rp->bfa, rp->rport_tag);
3915 bfa_trc(rp->bfa, event);
3917 switch (event) {
3918 case BFA_RPORT_SM_FWRSP:
3919 bfa_stats(rp, sm_fwc_rsp);
3920 bfa_sm_set_state(rp, bfa_rport_sm_online);
3921 bfa_rport_online_cb(rp);
3922 break;
3924 case BFA_RPORT_SM_DELETE:
3925 bfa_stats(rp, sm_fwc_del);
3926 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3927 break;
3929 case BFA_RPORT_SM_OFFLINE:
3930 bfa_stats(rp, sm_fwc_off);
3931 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3932 break;
3934 case BFA_RPORT_SM_HWFAIL:
3935 bfa_stats(rp, sm_fwc_hwf);
3936 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3937 break;
3939 default:
3940 bfa_stats(rp, sm_fwc_unexp);
3941 bfa_sm_fault(rp->bfa, event);
3946 * Request queue is full, awaiting queue resume to send create request.
3948 static void
3949 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3951 bfa_trc(rp->bfa, rp->rport_tag);
3952 bfa_trc(rp->bfa, event);
3954 switch (event) {
3955 case BFA_RPORT_SM_QRESUME:
3956 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3957 bfa_rport_send_fwcreate(rp);
3958 break;
3960 case BFA_RPORT_SM_DELETE:
3961 bfa_stats(rp, sm_fwc_del);
3962 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3963 bfa_reqq_wcancel(&rp->reqq_wait);
3964 bfa_rport_free(rp);
3965 break;
3967 case BFA_RPORT_SM_OFFLINE:
3968 bfa_stats(rp, sm_fwc_off);
3969 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3970 bfa_reqq_wcancel(&rp->reqq_wait);
3971 bfa_rport_offline_cb(rp);
3972 break;
3974 case BFA_RPORT_SM_HWFAIL:
3975 bfa_stats(rp, sm_fwc_hwf);
3976 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3977 bfa_reqq_wcancel(&rp->reqq_wait);
3978 break;
3980 default:
3981 bfa_stats(rp, sm_fwc_unexp);
3982 bfa_sm_fault(rp->bfa, event);
3987 * Online state - normal parking state.
3989 static void
3990 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3992 struct bfi_rport_qos_scn_s *qos_scn;
3994 bfa_trc(rp->bfa, rp->rport_tag);
3995 bfa_trc(rp->bfa, event);
3997 switch (event) {
3998 case BFA_RPORT_SM_OFFLINE:
3999 bfa_stats(rp, sm_on_off);
4000 if (bfa_rport_send_fwdelete(rp))
4001 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4002 else
4003 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4004 break;
4006 case BFA_RPORT_SM_DELETE:
4007 bfa_stats(rp, sm_on_del);
4008 if (bfa_rport_send_fwdelete(rp))
4009 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4010 else
4011 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4012 break;
4014 case BFA_RPORT_SM_HWFAIL:
4015 bfa_stats(rp, sm_on_hwf);
4016 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4017 break;
4019 case BFA_RPORT_SM_SET_SPEED:
4020 bfa_rport_send_fwspeed(rp);
4021 break;
4023 case BFA_RPORT_SM_QOS_SCN:
4024 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4025 rp->qos_attr = qos_scn->new_qos_attr;
4026 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4027 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4028 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4029 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4031 qos_scn->old_qos_attr.qos_flow_id =
4032 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4033 qos_scn->new_qos_attr.qos_flow_id =
4034 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4036 if (qos_scn->old_qos_attr.qos_flow_id !=
4037 qos_scn->new_qos_attr.qos_flow_id)
4038 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4039 qos_scn->old_qos_attr,
4040 qos_scn->new_qos_attr);
4041 if (qos_scn->old_qos_attr.qos_priority !=
4042 qos_scn->new_qos_attr.qos_priority)
4043 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4044 qos_scn->old_qos_attr,
4045 qos_scn->new_qos_attr);
4046 break;
4048 default:
4049 bfa_stats(rp, sm_on_unexp);
4050 bfa_sm_fault(rp->bfa, event);
4055 * Firmware rport is being deleted - awaiting f/w response.
4057 static void
4058 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4060 bfa_trc(rp->bfa, rp->rport_tag);
4061 bfa_trc(rp->bfa, event);
4063 switch (event) {
4064 case BFA_RPORT_SM_FWRSP:
4065 bfa_stats(rp, sm_fwd_rsp);
4066 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4067 bfa_rport_offline_cb(rp);
4068 break;
4070 case BFA_RPORT_SM_DELETE:
4071 bfa_stats(rp, sm_fwd_del);
4072 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4073 break;
4075 case BFA_RPORT_SM_HWFAIL:
4076 bfa_stats(rp, sm_fwd_hwf);
4077 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4078 bfa_rport_offline_cb(rp);
4079 break;
4081 default:
4082 bfa_stats(rp, sm_fwd_unexp);
4083 bfa_sm_fault(rp->bfa, event);
4087 static void
4088 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4090 bfa_trc(rp->bfa, rp->rport_tag);
4091 bfa_trc(rp->bfa, event);
4093 switch (event) {
4094 case BFA_RPORT_SM_QRESUME:
4095 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4096 bfa_rport_send_fwdelete(rp);
4097 break;
4099 case BFA_RPORT_SM_DELETE:
4100 bfa_stats(rp, sm_fwd_del);
4101 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4102 break;
4104 case BFA_RPORT_SM_HWFAIL:
4105 bfa_stats(rp, sm_fwd_hwf);
4106 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4107 bfa_reqq_wcancel(&rp->reqq_wait);
4108 bfa_rport_offline_cb(rp);
4109 break;
4111 default:
4112 bfa_stats(rp, sm_fwd_unexp);
4113 bfa_sm_fault(rp->bfa, event);
4118 * Offline state.
4120 static void
4121 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4123 bfa_trc(rp->bfa, rp->rport_tag);
4124 bfa_trc(rp->bfa, event);
4126 switch (event) {
4127 case BFA_RPORT_SM_DELETE:
4128 bfa_stats(rp, sm_off_del);
4129 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4130 bfa_rport_free(rp);
4131 break;
4133 case BFA_RPORT_SM_ONLINE:
4134 bfa_stats(rp, sm_off_on);
4135 if (bfa_rport_send_fwcreate(rp))
4136 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4137 else
4138 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4139 break;
4141 case BFA_RPORT_SM_HWFAIL:
4142 bfa_stats(rp, sm_off_hwf);
4143 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4144 break;
4146 default:
4147 bfa_stats(rp, sm_off_unexp);
4148 bfa_sm_fault(rp->bfa, event);
4153 * Rport is deleted, waiting for firmware response to delete.
4155 static void
4156 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4158 bfa_trc(rp->bfa, rp->rport_tag);
4159 bfa_trc(rp->bfa, event);
4161 switch (event) {
4162 case BFA_RPORT_SM_FWRSP:
4163 bfa_stats(rp, sm_del_fwrsp);
4164 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4165 bfa_rport_free(rp);
4166 break;
4168 case BFA_RPORT_SM_HWFAIL:
4169 bfa_stats(rp, sm_del_hwf);
4170 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4171 bfa_rport_free(rp);
4172 break;
4174 default:
4175 bfa_sm_fault(rp->bfa, event);
4179 static void
4180 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4182 bfa_trc(rp->bfa, rp->rport_tag);
4183 bfa_trc(rp->bfa, event);
4185 switch (event) {
4186 case BFA_RPORT_SM_QRESUME:
4187 bfa_stats(rp, sm_del_fwrsp);
4188 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4189 bfa_rport_send_fwdelete(rp);
4190 break;
4192 case BFA_RPORT_SM_HWFAIL:
4193 bfa_stats(rp, sm_del_hwf);
4194 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4195 bfa_reqq_wcancel(&rp->reqq_wait);
4196 bfa_rport_free(rp);
4197 break;
4199 default:
4200 bfa_sm_fault(rp->bfa, event);
4205 * Waiting for rport create response from firmware. A delete is pending.
4207 static void
4208 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4209 enum bfa_rport_event event)
4211 bfa_trc(rp->bfa, rp->rport_tag);
4212 bfa_trc(rp->bfa, event);
4214 switch (event) {
4215 case BFA_RPORT_SM_FWRSP:
4216 bfa_stats(rp, sm_delp_fwrsp);
4217 if (bfa_rport_send_fwdelete(rp))
4218 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4219 else
4220 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4221 break;
4223 case BFA_RPORT_SM_HWFAIL:
4224 bfa_stats(rp, sm_delp_hwf);
4225 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4226 bfa_rport_free(rp);
4227 break;
4229 default:
4230 bfa_stats(rp, sm_delp_unexp);
4231 bfa_sm_fault(rp->bfa, event);
4236 * Waiting for rport create response from firmware. Rport offline is pending.
4238 static void
4239 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4240 enum bfa_rport_event event)
4242 bfa_trc(rp->bfa, rp->rport_tag);
4243 bfa_trc(rp->bfa, event);
4245 switch (event) {
4246 case BFA_RPORT_SM_FWRSP:
4247 bfa_stats(rp, sm_offp_fwrsp);
4248 if (bfa_rport_send_fwdelete(rp))
4249 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4250 else
4251 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4252 break;
4254 case BFA_RPORT_SM_DELETE:
4255 bfa_stats(rp, sm_offp_del);
4256 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4257 break;
4259 case BFA_RPORT_SM_HWFAIL:
4260 bfa_stats(rp, sm_offp_hwf);
4261 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4262 break;
4264 default:
4265 bfa_stats(rp, sm_offp_unexp);
4266 bfa_sm_fault(rp->bfa, event);
4271 * IOC h/w failed.
4273 static void
4274 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4276 bfa_trc(rp->bfa, rp->rport_tag);
4277 bfa_trc(rp->bfa, event);
4279 switch (event) {
4280 case BFA_RPORT_SM_OFFLINE:
4281 bfa_stats(rp, sm_iocd_off);
4282 bfa_rport_offline_cb(rp);
4283 break;
4285 case BFA_RPORT_SM_DELETE:
4286 bfa_stats(rp, sm_iocd_del);
4287 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4288 bfa_rport_free(rp);
4289 break;
4291 case BFA_RPORT_SM_ONLINE:
4292 bfa_stats(rp, sm_iocd_on);
4293 if (bfa_rport_send_fwcreate(rp))
4294 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4295 else
4296 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4297 break;
4299 case BFA_RPORT_SM_HWFAIL:
4300 break;
4302 default:
4303 bfa_stats(rp, sm_iocd_unexp);
4304 bfa_sm_fault(rp->bfa, event);
4311 * bfa_rport_private BFA rport private functions
4314 static void
4315 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4317 struct bfa_rport_s *rp = cbarg;
4319 if (complete)
4320 bfa_cb_rport_online(rp->rport_drv);
4323 static void
4324 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4326 struct bfa_rport_s *rp = cbarg;
4328 if (complete)
4329 bfa_cb_rport_offline(rp->rport_drv);
4332 static void
4333 bfa_rport_qresume(void *cbarg)
4335 struct bfa_rport_s *rp = cbarg;
4337 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4340 static void
4341 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4342 u32 *dm_len)
4344 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4345 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4347 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4350 static void
4351 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4352 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4354 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4355 struct bfa_rport_s *rp;
4356 u16 i;
4358 INIT_LIST_HEAD(&mod->rp_free_q);
4359 INIT_LIST_HEAD(&mod->rp_active_q);
4361 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4362 mod->rps_list = rp;
4363 mod->num_rports = cfg->fwcfg.num_rports;
4365 bfa_assert(mod->num_rports &&
4366 !(mod->num_rports & (mod->num_rports - 1)));
4368 for (i = 0; i < mod->num_rports; i++, rp++) {
4369 memset(rp, 0, sizeof(struct bfa_rport_s));
4370 rp->bfa = bfa;
4371 rp->rport_tag = i;
4372 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4375 * - is unused
4377 if (i)
4378 list_add_tail(&rp->qe, &mod->rp_free_q);
4380 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4384 * consume memory
4386 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4389 static void
4390 bfa_rport_detach(struct bfa_s *bfa)
4394 static void
4395 bfa_rport_start(struct bfa_s *bfa)
4399 static void
4400 bfa_rport_stop(struct bfa_s *bfa)
4404 static void
4405 bfa_rport_iocdisable(struct bfa_s *bfa)
4407 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4408 struct bfa_rport_s *rport;
4409 struct list_head *qe, *qen;
4411 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4412 rport = (struct bfa_rport_s *) qe;
4413 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4417 static struct bfa_rport_s *
4418 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4420 struct bfa_rport_s *rport;
4422 bfa_q_deq(&mod->rp_free_q, &rport);
4423 if (rport)
4424 list_add_tail(&rport->qe, &mod->rp_active_q);
4426 return rport;
4429 static void
4430 bfa_rport_free(struct bfa_rport_s *rport)
4432 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4434 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4435 list_del(&rport->qe);
4436 list_add_tail(&rport->qe, &mod->rp_free_q);
4439 static bfa_boolean_t
4440 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4442 struct bfi_rport_create_req_s *m;
4445 * check for room in queue to send request now
4447 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4448 if (!m) {
4449 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4450 return BFA_FALSE;
4453 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4454 bfa_lpuid(rp->bfa));
4455 m->bfa_handle = rp->rport_tag;
4456 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4457 m->pid = rp->rport_info.pid;
4458 m->lp_tag = rp->rport_info.lp_tag;
4459 m->local_pid = rp->rport_info.local_pid;
4460 m->fc_class = rp->rport_info.fc_class;
4461 m->vf_en = rp->rport_info.vf_en;
4462 m->vf_id = rp->rport_info.vf_id;
4463 m->cisc = rp->rport_info.cisc;
4466 * queue I/O message to firmware
4468 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4469 return BFA_TRUE;
4472 static bfa_boolean_t
4473 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4475 struct bfi_rport_delete_req_s *m;
4478 * check for room in queue to send request now
4480 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4481 if (!m) {
4482 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4483 return BFA_FALSE;
4486 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4487 bfa_lpuid(rp->bfa));
4488 m->fw_handle = rp->fw_handle;
4491 * queue I/O message to firmware
4493 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4494 return BFA_TRUE;
4497 static bfa_boolean_t
4498 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4500 struct bfa_rport_speed_req_s *m;
4503 * check for room in queue to send request now
4505 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4506 if (!m) {
4507 bfa_trc(rp->bfa, rp->rport_info.speed);
4508 return BFA_FALSE;
4511 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4512 bfa_lpuid(rp->bfa));
4513 m->fw_handle = rp->fw_handle;
4514 m->speed = (u8)rp->rport_info.speed;
4517 * queue I/O message to firmware
4519 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4520 return BFA_TRUE;
4526 * bfa_rport_public
4530 * Rport interrupt processing.
4532 void
4533 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4535 union bfi_rport_i2h_msg_u msg;
4536 struct bfa_rport_s *rp;
4538 bfa_trc(bfa, m->mhdr.msg_id);
4540 msg.msg = m;
4542 switch (m->mhdr.msg_id) {
4543 case BFI_RPORT_I2H_CREATE_RSP:
4544 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4545 rp->fw_handle = msg.create_rsp->fw_handle;
4546 rp->qos_attr = msg.create_rsp->qos_attr;
4547 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4548 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4549 break;
4551 case BFI_RPORT_I2H_DELETE_RSP:
4552 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4553 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4554 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4555 break;
4557 case BFI_RPORT_I2H_QOS_SCN:
4558 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4559 rp->event_arg.fw_msg = msg.qos_scn_evt;
4560 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4561 break;
4563 default:
4564 bfa_trc(bfa, m->mhdr.msg_id);
4565 bfa_assert(0);
4572 * bfa_rport_api
4575 struct bfa_rport_s *
4576 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4578 struct bfa_rport_s *rp;
4580 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4582 if (rp == NULL)
4583 return NULL;
4585 rp->bfa = bfa;
4586 rp->rport_drv = rport_drv;
4587 bfa_rport_clear_stats(rp);
4589 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4590 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4592 return rp;
4595 void
4596 bfa_rport_delete(struct bfa_rport_s *rport)
4598 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
4601 void
4602 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4604 bfa_assert(rport_info->max_frmsz != 0);
4607 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4608 * responses. Default to minimum size.
4610 if (rport_info->max_frmsz == 0) {
4611 bfa_trc(rport->bfa, rport->rport_tag);
4612 rport_info->max_frmsz = FC_MIN_PDUSZ;
4615 rport->rport_info = *rport_info;
4616 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4619 void
4620 bfa_rport_offline(struct bfa_rport_s *rport)
4622 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
4625 void
4626 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4628 bfa_assert(speed != 0);
4629 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4631 rport->rport_info.speed = speed;
4632 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4635 void
4636 bfa_rport_clear_stats(struct bfa_rport_s *rport)
4638 memset(&rport->stats, 0, sizeof(rport->stats));
4643 * SGPG related functions
4647 * Compute and return memory needed by FCP(im) module.
4649 static void
4650 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4651 u32 *dm_len)
4653 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4654 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4656 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4657 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4661 static void
4662 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4663 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4665 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4666 int i;
4667 struct bfa_sgpg_s *hsgpg;
4668 struct bfi_sgpg_s *sgpg;
4669 u64 align_len;
4671 union {
4672 u64 pa;
4673 union bfi_addr_u addr;
4674 } sgpg_pa, sgpg_pa_tmp;
4676 INIT_LIST_HEAD(&mod->sgpg_q);
4677 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4679 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4681 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4682 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4683 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4684 mod->sgpg_arr_pa += align_len;
4685 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4686 align_len);
4687 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4688 align_len);
4690 hsgpg = mod->hsgpg_arr;
4691 sgpg = mod->sgpg_arr;
4692 sgpg_pa.pa = mod->sgpg_arr_pa;
4693 mod->free_sgpgs = mod->num_sgpgs;
4695 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4697 for (i = 0; i < mod->num_sgpgs; i++) {
4698 memset(hsgpg, 0, sizeof(*hsgpg));
4699 memset(sgpg, 0, sizeof(*sgpg));
4701 hsgpg->sgpg = sgpg;
4702 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4703 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4704 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4706 hsgpg++;
4707 sgpg++;
4708 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4711 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4712 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4713 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4716 static void
4717 bfa_sgpg_detach(struct bfa_s *bfa)
4721 static void
4722 bfa_sgpg_start(struct bfa_s *bfa)
4726 static void
4727 bfa_sgpg_stop(struct bfa_s *bfa)
4731 static void
4732 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4739 * hal_sgpg_public BFA SGPG public functions
4742 bfa_status_t
4743 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4745 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4746 struct bfa_sgpg_s *hsgpg;
4747 int i;
4749 bfa_trc_fp(bfa, nsgpgs);
4751 if (mod->free_sgpgs < nsgpgs)
4752 return BFA_STATUS_ENOMEM;
4754 for (i = 0; i < nsgpgs; i++) {
4755 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4756 bfa_assert(hsgpg);
4757 list_add_tail(&hsgpg->qe, sgpg_q);
4760 mod->free_sgpgs -= nsgpgs;
4761 return BFA_STATUS_OK;
4764 void
4765 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4767 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4768 struct bfa_sgpg_wqe_s *wqe;
4770 bfa_trc_fp(bfa, nsgpg);
4772 mod->free_sgpgs += nsgpg;
4773 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
4775 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4777 if (list_empty(&mod->sgpg_wait_q))
4778 return;
4781 * satisfy as many waiting requests as possible
4783 do {
4784 wqe = bfa_q_first(&mod->sgpg_wait_q);
4785 if (mod->free_sgpgs < wqe->nsgpg)
4786 nsgpg = mod->free_sgpgs;
4787 else
4788 nsgpg = wqe->nsgpg;
4789 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4790 wqe->nsgpg -= nsgpg;
4791 if (wqe->nsgpg == 0) {
4792 list_del(&wqe->qe);
4793 wqe->cbfn(wqe->cbarg);
4795 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4798 void
4799 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4801 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4803 bfa_assert(nsgpg > 0);
4804 bfa_assert(nsgpg > mod->free_sgpgs);
4806 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4809 * allocate any left to this one first
4811 if (mod->free_sgpgs) {
4813 * no one else is waiting for SGPG
4815 bfa_assert(list_empty(&mod->sgpg_wait_q));
4816 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4817 wqe->nsgpg -= mod->free_sgpgs;
4818 mod->free_sgpgs = 0;
4821 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4824 void
4825 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4827 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4829 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4830 list_del(&wqe->qe);
4832 if (wqe->nsgpg_total != wqe->nsgpg)
4833 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4834 wqe->nsgpg_total - wqe->nsgpg);
4837 void
4838 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4839 void *cbarg)
4841 INIT_LIST_HEAD(&wqe->sgpg_q);
4842 wqe->cbfn = cbfn;
4843 wqe->cbarg = cbarg;
4847 * UF related functions
4850 *****************************************************************************
4851 * Internal functions
4852 *****************************************************************************
4854 static void
4855 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4857 struct bfa_uf_s *uf = cbarg;
4858 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4860 if (complete)
4861 ufm->ufrecv(ufm->cbarg, uf);
4864 static void
4865 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4867 u32 uf_pb_tot_sz;
4869 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4870 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4871 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4872 BFA_DMA_ALIGN_SZ);
4874 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4875 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4877 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
4880 static void
4881 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4883 struct bfi_uf_buf_post_s *uf_bp_msg;
4884 struct bfi_sge_s *sge;
4885 union bfi_addr_u sga_zero = { {0} };
4886 u16 i;
4887 u16 buf_len;
4889 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4890 uf_bp_msg = ufm->uf_buf_posts;
4892 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4893 i++, uf_bp_msg++) {
4894 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
4896 uf_bp_msg->buf_tag = i;
4897 buf_len = sizeof(struct bfa_uf_buf_s);
4898 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4899 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4900 bfa_lpuid(ufm->bfa));
4902 sge = uf_bp_msg->sge;
4903 sge[0].sg_len = buf_len;
4904 sge[0].flags = BFI_SGE_DATA_LAST;
4905 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4906 bfa_sge_to_be(sge);
4908 sge[1].sg_len = buf_len;
4909 sge[1].flags = BFI_SGE_PGDLEN;
4910 sge[1].sga = sga_zero;
4911 bfa_sge_to_be(&sge[1]);
4915 * advance pointer beyond consumed memory
4917 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4920 static void
4921 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4923 u16 i;
4924 struct bfa_uf_s *uf;
4927 * Claim block of memory for UF list
4929 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4932 * Initialize UFs and queue it in UF free queue
4934 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
4935 memset(uf, 0, sizeof(struct bfa_uf_s));
4936 uf->bfa = ufm->bfa;
4937 uf->uf_tag = i;
4938 uf->pb_len = sizeof(struct bfa_uf_buf_s);
4939 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4940 uf->buf_pa = ufm_pbs_pa(ufm, i);
4941 list_add_tail(&uf->qe, &ufm->uf_free_q);
4945 * advance memory pointer
4947 bfa_meminfo_kva(mi) = (u8 *) uf;
4950 static void
4951 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4953 claim_uf_pbs(ufm, mi);
4954 claim_ufs(ufm, mi);
4955 claim_uf_post_msgs(ufm, mi);
4958 static void
4959 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4961 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4964 * dma-able memory for UF posted bufs
4966 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4967 BFA_DMA_ALIGN_SZ);
4970 * kernel Virtual memory for UFs and UF buf post msg copies
4972 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4973 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4976 static void
4977 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4978 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4980 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4982 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
4983 ufm->bfa = bfa;
4984 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4985 INIT_LIST_HEAD(&ufm->uf_free_q);
4986 INIT_LIST_HEAD(&ufm->uf_posted_q);
4988 uf_mem_claim(ufm, meminfo);
4991 static void
4992 bfa_uf_detach(struct bfa_s *bfa)
4996 static struct bfa_uf_s *
4997 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4999 struct bfa_uf_s *uf;
5001 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5002 return uf;
5005 static void
5006 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5008 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5011 static bfa_status_t
5012 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5014 struct bfi_uf_buf_post_s *uf_post_msg;
5016 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5017 if (!uf_post_msg)
5018 return BFA_STATUS_FAILED;
5020 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5021 sizeof(struct bfi_uf_buf_post_s));
5022 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
5024 bfa_trc(ufm->bfa, uf->uf_tag);
5026 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5027 return BFA_STATUS_OK;
5030 static void
5031 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5033 struct bfa_uf_s *uf;
5035 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5036 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5037 break;
5041 static void
5042 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5044 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5045 u16 uf_tag = m->buf_tag;
5046 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
5047 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5048 u8 *buf = &uf_buf->d[0];
5049 struct fchs_s *fchs;
5051 m->frm_len = be16_to_cpu(m->frm_len);
5052 m->xfr_len = be16_to_cpu(m->xfr_len);
5054 fchs = (struct fchs_s *)uf_buf;
5056 list_del(&uf->qe); /* dequeue from posted queue */
5058 uf->data_ptr = buf;
5059 uf->data_len = m->xfr_len;
5061 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
5063 if (uf->data_len == sizeof(struct fchs_s)) {
5064 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5065 uf->data_len, (struct fchs_s *)buf);
5066 } else {
5067 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5068 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5069 BFA_PL_EID_RX, uf->data_len,
5070 (struct fchs_s *)buf, pld_w0);
5073 if (bfa->fcs)
5074 __bfa_cb_uf_recv(uf, BFA_TRUE);
5075 else
5076 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5079 static void
5080 bfa_uf_stop(struct bfa_s *bfa)
5084 static void
5085 bfa_uf_iocdisable(struct bfa_s *bfa)
5087 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5088 struct bfa_uf_s *uf;
5089 struct list_head *qe, *qen;
5091 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5092 uf = (struct bfa_uf_s *) qe;
5093 list_del(&uf->qe);
5094 bfa_uf_put(ufm, uf);
5098 static void
5099 bfa_uf_start(struct bfa_s *bfa)
5101 bfa_uf_post_all(BFA_UF_MOD(bfa));
5107 * hal_uf_api
5111 * Register handler for all unsolicted recieve frames.
5113 * @param[in] bfa BFA instance
5114 * @param[in] ufrecv receive handler function
5115 * @param[in] cbarg receive handler arg
5117 void
5118 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5120 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5122 ufm->ufrecv = ufrecv;
5123 ufm->cbarg = cbarg;
5127 * Free an unsolicited frame back to BFA.
5129 * @param[in] uf unsolicited frame to be freed
5131 * @return None
5133 void
5134 bfa_uf_free(struct bfa_uf_s *uf)
5136 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5137 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5143 * uf_pub BFA uf module public functions
5145 void
5146 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5148 bfa_trc(bfa, msg->mhdr.msg_id);
5150 switch (msg->mhdr.msg_id) {
5151 case BFI_UF_I2H_FRM_RCVD:
5152 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5153 break;
5155 default:
5156 bfa_trc(bfa, msg->mhdr.msg_id);
5157 bfa_assert(0);