GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / scsi / bfa / bfa_rport.c
blob2907180935e26a302c2ce47827a7246aba11c2b7
1 /*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <bfa.h>
19 #include <bfa_svc.h>
20 #include <cs/bfa_debug.h>
21 #include <bfi/bfi_rport.h>
22 #include "bfa_intr_priv.h"
24 BFA_TRC_FILE(HAL, RPORT);
25 BFA_MODULE(rport);
27 #define bfa_rport_offline_cb(__rp) do { \
28 if ((__rp)->bfa->fcs) \
29 bfa_cb_rport_offline((__rp)->rport_drv); \
30 else { \
31 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
32 __bfa_cb_rport_offline, (__rp)); \
33 } \
34 } while (0)
36 #define bfa_rport_online_cb(__rp) do { \
37 if ((__rp)->bfa->fcs) \
38 bfa_cb_rport_online((__rp)->rport_drv); \
39 else { \
40 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
41 __bfa_cb_rport_online, (__rp)); \
42 } \
43 } while (0)
46 * forward declarations
48 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
49 static void bfa_rport_free(struct bfa_rport_s *rport);
50 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
51 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
52 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
53 static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete);
54 static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete);
56 /**
57 * bfa_rport_sm BFA rport state machine
61 enum bfa_rport_event {
62 BFA_RPORT_SM_CREATE = 1, /* rport create event */
63 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
64 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
65 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
66 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
67 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
68 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
69 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
70 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
73 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
74 enum bfa_rport_event event);
75 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
76 enum bfa_rport_event event);
77 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
78 enum bfa_rport_event event);
79 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
80 enum bfa_rport_event event);
81 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
82 enum bfa_rport_event event);
83 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
84 enum bfa_rport_event event);
85 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
86 enum bfa_rport_event event);
87 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
88 enum bfa_rport_event event);
89 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
90 enum bfa_rport_event event);
91 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
92 enum bfa_rport_event event);
93 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
94 enum bfa_rport_event event);
95 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
96 enum bfa_rport_event event);
97 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
98 enum bfa_rport_event event);
101 * Beginning state, only online event expected.
103 static void
104 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
106 bfa_trc(rp->bfa, rp->rport_tag);
107 bfa_trc(rp->bfa, event);
109 switch (event) {
110 case BFA_RPORT_SM_CREATE:
111 bfa_stats(rp, sm_un_cr);
112 bfa_sm_set_state(rp, bfa_rport_sm_created);
113 break;
115 default:
116 bfa_stats(rp, sm_un_unexp);
117 bfa_sm_fault(rp->bfa, event);
121 static void
122 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
124 bfa_trc(rp->bfa, rp->rport_tag);
125 bfa_trc(rp->bfa, event);
127 switch (event) {
128 case BFA_RPORT_SM_ONLINE:
129 bfa_stats(rp, sm_cr_on);
130 if (bfa_rport_send_fwcreate(rp))
131 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
132 else
133 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
134 break;
136 case BFA_RPORT_SM_DELETE:
137 bfa_stats(rp, sm_cr_del);
138 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
139 bfa_rport_free(rp);
140 break;
142 case BFA_RPORT_SM_HWFAIL:
143 bfa_stats(rp, sm_cr_hwf);
144 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
145 break;
147 default:
148 bfa_stats(rp, sm_cr_unexp);
149 bfa_sm_fault(rp->bfa, event);
154 * Waiting for rport create response from firmware.
156 static void
157 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
159 bfa_trc(rp->bfa, rp->rport_tag);
160 bfa_trc(rp->bfa, event);
162 switch (event) {
163 case BFA_RPORT_SM_FWRSP:
164 bfa_stats(rp, sm_fwc_rsp);
165 bfa_sm_set_state(rp, bfa_rport_sm_online);
166 bfa_rport_online_cb(rp);
167 break;
169 case BFA_RPORT_SM_DELETE:
170 bfa_stats(rp, sm_fwc_del);
171 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
172 break;
174 case BFA_RPORT_SM_OFFLINE:
175 bfa_stats(rp, sm_fwc_off);
176 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
177 break;
179 case BFA_RPORT_SM_HWFAIL:
180 bfa_stats(rp, sm_fwc_hwf);
181 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
182 break;
184 default:
185 bfa_stats(rp, sm_fwc_unexp);
186 bfa_sm_fault(rp->bfa, event);
191 * Request queue is full, awaiting queue resume to send create request.
193 static void
194 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
196 bfa_trc(rp->bfa, rp->rport_tag);
197 bfa_trc(rp->bfa, event);
199 switch (event) {
200 case BFA_RPORT_SM_QRESUME:
201 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
202 bfa_rport_send_fwcreate(rp);
203 break;
205 case BFA_RPORT_SM_DELETE:
206 bfa_stats(rp, sm_fwc_del);
207 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
208 bfa_reqq_wcancel(&rp->reqq_wait);
209 bfa_rport_free(rp);
210 break;
212 case BFA_RPORT_SM_OFFLINE:
213 bfa_stats(rp, sm_fwc_off);
214 bfa_sm_set_state(rp, bfa_rport_sm_offline);
215 bfa_reqq_wcancel(&rp->reqq_wait);
216 bfa_rport_offline_cb(rp);
217 break;
219 case BFA_RPORT_SM_HWFAIL:
220 bfa_stats(rp, sm_fwc_hwf);
221 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
222 bfa_reqq_wcancel(&rp->reqq_wait);
223 break;
225 default:
226 bfa_stats(rp, sm_fwc_unexp);
227 bfa_sm_fault(rp->bfa, event);
232 * Online state - normal parking state.
234 static void
235 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
237 struct bfi_rport_qos_scn_s *qos_scn;
239 bfa_trc(rp->bfa, rp->rport_tag);
240 bfa_trc(rp->bfa, event);
242 switch (event) {
243 case BFA_RPORT_SM_OFFLINE:
244 bfa_stats(rp, sm_on_off);
245 if (bfa_rport_send_fwdelete(rp))
246 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
247 else
248 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
249 break;
251 case BFA_RPORT_SM_DELETE:
252 bfa_stats(rp, sm_on_del);
253 if (bfa_rport_send_fwdelete(rp))
254 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
255 else
256 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
257 break;
259 case BFA_RPORT_SM_HWFAIL:
260 bfa_stats(rp, sm_on_hwf);
261 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
262 break;
264 case BFA_RPORT_SM_SET_SPEED:
265 bfa_rport_send_fwspeed(rp);
266 break;
268 case BFA_RPORT_SM_QOS_SCN:
269 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
270 rp->qos_attr = qos_scn->new_qos_attr;
271 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
272 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
273 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
274 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
276 qos_scn->old_qos_attr.qos_flow_id =
277 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
278 qos_scn->new_qos_attr.qos_flow_id =
279 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
280 qos_scn->old_qos_attr.qos_priority =
281 bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority);
282 qos_scn->new_qos_attr.qos_priority =
283 bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority);
285 if (qos_scn->old_qos_attr.qos_flow_id !=
286 qos_scn->new_qos_attr.qos_flow_id)
287 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
288 qos_scn->old_qos_attr,
289 qos_scn->new_qos_attr);
290 if (qos_scn->old_qos_attr.qos_priority !=
291 qos_scn->new_qos_attr.qos_priority)
292 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
293 qos_scn->old_qos_attr,
294 qos_scn->new_qos_attr);
295 break;
297 default:
298 bfa_stats(rp, sm_on_unexp);
299 bfa_sm_fault(rp->bfa, event);
304 * Firmware rport is being deleted - awaiting f/w response.
306 static void
307 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
309 bfa_trc(rp->bfa, rp->rport_tag);
310 bfa_trc(rp->bfa, event);
312 switch (event) {
313 case BFA_RPORT_SM_FWRSP:
314 bfa_stats(rp, sm_fwd_rsp);
315 bfa_sm_set_state(rp, bfa_rport_sm_offline);
316 bfa_rport_offline_cb(rp);
317 break;
319 case BFA_RPORT_SM_DELETE:
320 bfa_stats(rp, sm_fwd_del);
321 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
322 break;
324 case BFA_RPORT_SM_HWFAIL:
325 bfa_stats(rp, sm_fwd_hwf);
326 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
327 bfa_rport_offline_cb(rp);
328 break;
330 default:
331 bfa_stats(rp, sm_fwd_unexp);
332 bfa_sm_fault(rp->bfa, event);
336 static void
337 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
339 bfa_trc(rp->bfa, rp->rport_tag);
340 bfa_trc(rp->bfa, event);
342 switch (event) {
343 case BFA_RPORT_SM_QRESUME:
344 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
345 bfa_rport_send_fwdelete(rp);
346 break;
348 case BFA_RPORT_SM_DELETE:
349 bfa_stats(rp, sm_fwd_del);
350 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
351 break;
353 case BFA_RPORT_SM_HWFAIL:
354 bfa_stats(rp, sm_fwd_hwf);
355 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
356 bfa_reqq_wcancel(&rp->reqq_wait);
357 bfa_rport_offline_cb(rp);
358 break;
360 default:
361 bfa_stats(rp, sm_fwd_unexp);
362 bfa_sm_fault(rp->bfa, event);
367 * Offline state.
369 static void
370 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
372 bfa_trc(rp->bfa, rp->rport_tag);
373 bfa_trc(rp->bfa, event);
375 switch (event) {
376 case BFA_RPORT_SM_DELETE:
377 bfa_stats(rp, sm_off_del);
378 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
379 bfa_rport_free(rp);
380 break;
382 case BFA_RPORT_SM_ONLINE:
383 bfa_stats(rp, sm_off_on);
384 if (bfa_rport_send_fwcreate(rp))
385 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
386 else
387 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
388 break;
390 case BFA_RPORT_SM_HWFAIL:
391 bfa_stats(rp, sm_off_hwf);
392 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
393 break;
395 default:
396 bfa_stats(rp, sm_off_unexp);
397 bfa_sm_fault(rp->bfa, event);
402 * Rport is deleted, waiting for firmware response to delete.
404 static void
405 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
407 bfa_trc(rp->bfa, rp->rport_tag);
408 bfa_trc(rp->bfa, event);
410 switch (event) {
411 case BFA_RPORT_SM_FWRSP:
412 bfa_stats(rp, sm_del_fwrsp);
413 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
414 bfa_rport_free(rp);
415 break;
417 case BFA_RPORT_SM_HWFAIL:
418 bfa_stats(rp, sm_del_hwf);
419 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
420 bfa_rport_free(rp);
421 break;
423 default:
424 bfa_sm_fault(rp->bfa, event);
428 static void
429 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
431 bfa_trc(rp->bfa, rp->rport_tag);
432 bfa_trc(rp->bfa, event);
434 switch (event) {
435 case BFA_RPORT_SM_QRESUME:
436 bfa_stats(rp, sm_del_fwrsp);
437 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
438 bfa_rport_send_fwdelete(rp);
439 break;
441 case BFA_RPORT_SM_HWFAIL:
442 bfa_stats(rp, sm_del_hwf);
443 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
444 bfa_reqq_wcancel(&rp->reqq_wait);
445 bfa_rport_free(rp);
446 break;
448 default:
449 bfa_sm_fault(rp->bfa, event);
454 * Waiting for rport create response from firmware. A delete is pending.
456 static void
457 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
458 enum bfa_rport_event event)
460 bfa_trc(rp->bfa, rp->rport_tag);
461 bfa_trc(rp->bfa, event);
463 switch (event) {
464 case BFA_RPORT_SM_FWRSP:
465 bfa_stats(rp, sm_delp_fwrsp);
466 if (bfa_rport_send_fwdelete(rp))
467 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
468 else
469 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
470 break;
472 case BFA_RPORT_SM_HWFAIL:
473 bfa_stats(rp, sm_delp_hwf);
474 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
475 bfa_rport_free(rp);
476 break;
478 default:
479 bfa_stats(rp, sm_delp_unexp);
480 bfa_sm_fault(rp->bfa, event);
485 * Waiting for rport create response from firmware. Rport offline is pending.
487 static void
488 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
489 enum bfa_rport_event event)
491 bfa_trc(rp->bfa, rp->rport_tag);
492 bfa_trc(rp->bfa, event);
494 switch (event) {
495 case BFA_RPORT_SM_FWRSP:
496 bfa_stats(rp, sm_offp_fwrsp);
497 if (bfa_rport_send_fwdelete(rp))
498 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
499 else
500 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
501 break;
503 case BFA_RPORT_SM_DELETE:
504 bfa_stats(rp, sm_offp_del);
505 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
506 break;
508 case BFA_RPORT_SM_HWFAIL:
509 bfa_stats(rp, sm_offp_hwf);
510 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
511 break;
513 default:
514 bfa_stats(rp, sm_offp_unexp);
515 bfa_sm_fault(rp->bfa, event);
520 * IOC h/w failed.
522 static void
523 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
525 bfa_trc(rp->bfa, rp->rport_tag);
526 bfa_trc(rp->bfa, event);
528 switch (event) {
529 case BFA_RPORT_SM_OFFLINE:
530 bfa_stats(rp, sm_iocd_off);
531 bfa_rport_offline_cb(rp);
532 break;
534 case BFA_RPORT_SM_DELETE:
535 bfa_stats(rp, sm_iocd_del);
536 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
537 bfa_rport_free(rp);
538 break;
540 case BFA_RPORT_SM_ONLINE:
541 bfa_stats(rp, sm_iocd_on);
542 if (bfa_rport_send_fwcreate(rp))
543 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
544 else
545 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
546 break;
548 case BFA_RPORT_SM_HWFAIL:
549 break;
551 default:
552 bfa_stats(rp, sm_iocd_unexp);
553 bfa_sm_fault(rp->bfa, event);
560 * bfa_rport_private BFA rport private functions
563 static void
564 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
566 struct bfa_rport_s *rp = cbarg;
568 if (complete)
569 bfa_cb_rport_online(rp->rport_drv);
572 static void
573 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
575 struct bfa_rport_s *rp = cbarg;
577 if (complete)
578 bfa_cb_rport_offline(rp->rport_drv);
581 static void
582 bfa_rport_qresume(void *cbarg)
584 struct bfa_rport_s *rp = cbarg;
586 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
589 static void
590 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
591 u32 *dm_len)
593 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
594 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
596 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
599 static void
600 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
601 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
603 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
604 struct bfa_rport_s *rp;
605 u16 i;
607 INIT_LIST_HEAD(&mod->rp_free_q);
608 INIT_LIST_HEAD(&mod->rp_active_q);
610 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
611 mod->rps_list = rp;
612 mod->num_rports = cfg->fwcfg.num_rports;
614 bfa_assert(mod->num_rports
615 && !(mod->num_rports & (mod->num_rports - 1)));
617 for (i = 0; i < mod->num_rports; i++, rp++) {
618 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
619 rp->bfa = bfa;
620 rp->rport_tag = i;
621 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
624 * - is unused
626 if (i)
627 list_add_tail(&rp->qe, &mod->rp_free_q);
629 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
633 * consume memory
635 bfa_meminfo_kva(meminfo) = (u8 *) rp;
638 static void
639 bfa_rport_detach(struct bfa_s *bfa)
643 static void
644 bfa_rport_start(struct bfa_s *bfa)
648 static void
649 bfa_rport_stop(struct bfa_s *bfa)
653 static void
654 bfa_rport_iocdisable(struct bfa_s *bfa)
656 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
657 struct bfa_rport_s *rport;
658 struct list_head *qe, *qen;
660 list_for_each_safe(qe, qen, &mod->rp_active_q) {
661 rport = (struct bfa_rport_s *) qe;
662 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
666 static struct bfa_rport_s *
667 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
669 struct bfa_rport_s *rport;
671 bfa_q_deq(&mod->rp_free_q, &rport);
672 if (rport)
673 list_add_tail(&rport->qe, &mod->rp_active_q);
675 return rport;
678 static void
679 bfa_rport_free(struct bfa_rport_s *rport)
681 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
683 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
684 list_del(&rport->qe);
685 list_add_tail(&rport->qe, &mod->rp_free_q);
688 static bfa_boolean_t
689 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
691 struct bfi_rport_create_req_s *m;
694 * check for room in queue to send request now
696 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
697 if (!m) {
698 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
699 return BFA_FALSE;
702 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
703 bfa_lpuid(rp->bfa));
704 m->bfa_handle = rp->rport_tag;
705 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
706 m->pid = rp->rport_info.pid;
707 m->lp_tag = rp->rport_info.lp_tag;
708 m->local_pid = rp->rport_info.local_pid;
709 m->fc_class = rp->rport_info.fc_class;
710 m->vf_en = rp->rport_info.vf_en;
711 m->vf_id = rp->rport_info.vf_id;
712 m->cisc = rp->rport_info.cisc;
715 * queue I/O message to firmware
717 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
718 return BFA_TRUE;
721 static bfa_boolean_t
722 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
724 struct bfi_rport_delete_req_s *m;
727 * check for room in queue to send request now
729 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
730 if (!m) {
731 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
732 return BFA_FALSE;
735 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
736 bfa_lpuid(rp->bfa));
737 m->fw_handle = rp->fw_handle;
740 * queue I/O message to firmware
742 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
743 return BFA_TRUE;
746 static bfa_boolean_t
747 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
749 struct bfa_rport_speed_req_s *m;
752 * check for room in queue to send request now
754 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
755 if (!m) {
756 bfa_trc(rp->bfa, rp->rport_info.speed);
757 return BFA_FALSE;
760 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
761 bfa_lpuid(rp->bfa));
762 m->fw_handle = rp->fw_handle;
763 m->speed = (u8)rp->rport_info.speed;
766 * queue I/O message to firmware
768 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
769 return BFA_TRUE;
775 * bfa_rport_public
779 * Rport interrupt processing.
781 void
782 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
784 union bfi_rport_i2h_msg_u msg;
785 struct bfa_rport_s *rp;
787 bfa_trc(bfa, m->mhdr.msg_id);
789 msg.msg = m;
791 switch (m->mhdr.msg_id) {
792 case BFI_RPORT_I2H_CREATE_RSP:
793 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
794 rp->fw_handle = msg.create_rsp->fw_handle;
795 rp->qos_attr = msg.create_rsp->qos_attr;
796 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
797 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
798 break;
800 case BFI_RPORT_I2H_DELETE_RSP:
801 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
802 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
803 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
804 break;
806 case BFI_RPORT_I2H_QOS_SCN:
807 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
808 rp->event_arg.fw_msg = msg.qos_scn_evt;
809 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
810 break;
812 default:
813 bfa_trc(bfa, m->mhdr.msg_id);
814 bfa_assert(0);
821 * bfa_rport_api
824 struct bfa_rport_s *
825 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
827 struct bfa_rport_s *rp;
829 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
831 if (rp == NULL)
832 return NULL;
834 rp->bfa = bfa;
835 rp->rport_drv = rport_drv;
836 bfa_rport_clear_stats(rp);
838 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
839 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
841 return rp;
844 void
845 bfa_rport_delete(struct bfa_rport_s *rport)
847 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
850 void
851 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
853 bfa_assert(rport_info->max_frmsz != 0);
856 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
857 * responses. Default to minimum size.
859 if (rport_info->max_frmsz == 0) {
860 bfa_trc(rport->bfa, rport->rport_tag);
861 rport_info->max_frmsz = FC_MIN_PDUSZ;
864 bfa_os_assign(rport->rport_info, *rport_info);
865 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
868 void
869 bfa_rport_offline(struct bfa_rport_s *rport)
871 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
874 void
875 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed)
877 bfa_assert(speed != 0);
878 bfa_assert(speed != BFA_PPORT_SPEED_AUTO);
880 rport->rport_info.speed = speed;
881 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
884 void
885 bfa_rport_get_stats(struct bfa_rport_s *rport,
886 struct bfa_rport_hal_stats_s *stats)
888 *stats = rport->stats;
891 void
892 bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
893 struct bfa_rport_qos_attr_s *qos_attr)
895 qos_attr->qos_priority = bfa_os_ntohl(rport->qos_attr.qos_priority);
896 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
900 void
901 bfa_rport_clear_stats(struct bfa_rport_s *rport)
903 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));