Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / bfa / bfa_fcpim.c
blobc0353cdca92956f6d3d82070f9cd7f0ce1556f63
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
21 BFA_TRC_FILE(HAL, FCPIM);
22 BFA_MODULE(fcpim);
25 * BFA ITNIM Related definitions
27 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
30 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
32 #define bfa_fcpim_additn(__itnim) \
33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim) do { \
35 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
36 bfa_itnim_update_del_itn_stats(__itnim); \
37 list_del(&(__itnim)->qe); \
38 WARN_ON(!list_empty(&(__itnim)->io_q)); \
39 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
40 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
41 } while (0)
43 #define bfa_itnim_online_cb(__itnim) do { \
44 if ((__itnim)->bfa->fcs) \
45 bfa_cb_itnim_online((__itnim)->ditn); \
46 else { \
47 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
48 __bfa_cb_itnim_online, (__itnim)); \
49 } \
50 } while (0)
52 #define bfa_itnim_offline_cb(__itnim) do { \
53 if ((__itnim)->bfa->fcs) \
54 bfa_cb_itnim_offline((__itnim)->ditn); \
55 else { \
56 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
57 __bfa_cb_itnim_offline, (__itnim)); \
58 } \
59 } while (0)
61 #define bfa_itnim_sler_cb(__itnim) do { \
62 if ((__itnim)->bfa->fcs) \
63 bfa_cb_itnim_sler((__itnim)->ditn); \
64 else { \
65 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
66 __bfa_cb_itnim_sler, (__itnim)); \
67 } \
68 } while (0)
71 * itnim state machine event
73 enum bfa_itnim_event {
74 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
75 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
76 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
77 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
78 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
79 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
80 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
81 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
82 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
86 * BFA IOIM related definitions
88 #define bfa_ioim_move_to_comp_q(__ioim) do { \
89 list_del(&(__ioim)->qe); \
90 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
91 } while (0)
94 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
95 if ((__fcpim)->profile_comp) \
96 (__fcpim)->profile_comp(__ioim); \
97 } while (0)
99 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
100 if ((__fcpim)->profile_start) \
101 (__fcpim)->profile_start(__ioim); \
102 } while (0)
105 * IO state machine events
107 enum bfa_ioim_event {
108 BFA_IOIM_SM_START = 1, /* io start request from host */
109 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
110 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
111 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
112 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
113 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
114 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
115 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
116 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
117 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
118 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
119 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
120 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
121 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
122 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
123 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
124 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
125 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
130 * BFA TSKIM related definitions
134 * task management completion handling
136 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
137 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
138 bfa_tskim_notify_comp(__tskim); \
139 } while (0)
141 #define bfa_tskim_notify_comp(__tskim) do { \
142 if ((__tskim)->notify) \
143 bfa_itnim_tskdone((__tskim)->itnim); \
144 } while (0)
147 enum bfa_tskim_event {
148 BFA_TSKIM_SM_START = 1, /* TM command start */
149 BFA_TSKIM_SM_DONE = 2, /* TM completion */
150 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
151 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
152 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
153 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
154 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
155 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
159 * forward declaration for BFA ITNIM functions
161 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
162 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
163 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
164 static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
165 static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
166 static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
167 static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
168 static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
169 static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
170 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
171 static void bfa_itnim_iotov(void *itnim_arg);
172 static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
173 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
174 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
177 * forward declaration of ITNIM state machine
179 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
180 enum bfa_itnim_event event);
181 static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
182 enum bfa_itnim_event event);
183 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
184 enum bfa_itnim_event event);
185 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
186 enum bfa_itnim_event event);
187 static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
188 enum bfa_itnim_event event);
189 static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
190 enum bfa_itnim_event event);
191 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
192 enum bfa_itnim_event event);
193 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
194 enum bfa_itnim_event event);
195 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
196 enum bfa_itnim_event event);
197 static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
198 enum bfa_itnim_event event);
199 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
200 enum bfa_itnim_event event);
201 static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
202 enum bfa_itnim_event event);
203 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
204 enum bfa_itnim_event event);
205 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
206 enum bfa_itnim_event event);
207 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
208 enum bfa_itnim_event event);
211 * forward declaration for BFA IOIM functions
213 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
214 static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
215 static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
216 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
217 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
218 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
219 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
220 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
221 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
222 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
225 * forward declaration of BFA IO state machine
227 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
228 enum bfa_ioim_event event);
229 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
230 enum bfa_ioim_event event);
231 static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
232 enum bfa_ioim_event event);
233 static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
234 enum bfa_ioim_event event);
235 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
236 enum bfa_ioim_event event);
237 static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
238 enum bfa_ioim_event event);
239 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243 static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247 static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
252 * forward declaration for BFA TSKIM functions
254 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
255 static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
256 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
257 struct scsi_lun lun);
258 static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
259 static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
260 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
261 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
262 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
263 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
266 * forward declaration of BFA TSKIM state machine
268 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
269 enum bfa_tskim_event event);
270 static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
271 enum bfa_tskim_event event);
272 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
273 enum bfa_tskim_event event);
274 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
275 enum bfa_tskim_event event);
276 static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
277 enum bfa_tskim_event event);
278 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
279 enum bfa_tskim_event event);
280 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
281 enum bfa_tskim_event event);
283 * BFA FCP Initiator Mode module
287 * Compute and return memory needed by FCP(im) module.
289 static void
290 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
291 u32 *dm_len)
293 bfa_itnim_meminfo(cfg, km_len, dm_len);
296 * IO memory
298 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
299 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
300 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
301 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
303 *km_len += cfg->fwcfg.num_ioim_reqs *
304 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
306 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
309 * task management command memory
311 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
312 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
313 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
317 static void
318 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
319 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
321 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
323 bfa_trc(bfa, cfg->drvcfg.path_tov);
324 bfa_trc(bfa, cfg->fwcfg.num_rports);
325 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
326 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
328 fcpim->bfa = bfa;
329 fcpim->num_itnims = cfg->fwcfg.num_rports;
330 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
331 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
332 fcpim->path_tov = cfg->drvcfg.path_tov;
333 fcpim->delay_comp = cfg->drvcfg.delay_comp;
334 fcpim->profile_comp = NULL;
335 fcpim->profile_start = NULL;
337 bfa_itnim_attach(fcpim, meminfo);
338 bfa_tskim_attach(fcpim, meminfo);
339 bfa_ioim_attach(fcpim, meminfo);
342 static void
343 bfa_fcpim_detach(struct bfa_s *bfa)
347 static void
348 bfa_fcpim_start(struct bfa_s *bfa)
352 static void
353 bfa_fcpim_stop(struct bfa_s *bfa)
357 static void
358 bfa_fcpim_iocdisable(struct bfa_s *bfa)
360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361 struct bfa_itnim_s *itnim;
362 struct list_head *qe, *qen;
364 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
365 itnim = (struct bfa_itnim_s *) qe;
366 bfa_itnim_iocdisable(itnim);
370 void
371 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
373 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
375 fcpim->path_tov = path_tov * 1000;
376 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
377 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
381 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
383 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
385 return fcpim->path_tov / 1000;
389 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
391 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
393 return fcpim->q_depth;
397 * BFA ITNIM module state machine functions
401 * Beginning/unallocated state - no events expected.
403 static void
404 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
406 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
407 bfa_trc(itnim->bfa, event);
409 switch (event) {
410 case BFA_ITNIM_SM_CREATE:
411 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
412 itnim->is_online = BFA_FALSE;
413 bfa_fcpim_additn(itnim);
414 break;
416 default:
417 bfa_sm_fault(itnim->bfa, event);
422 * Beginning state, only online event expected.
424 static void
425 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
427 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
428 bfa_trc(itnim->bfa, event);
430 switch (event) {
431 case BFA_ITNIM_SM_ONLINE:
432 if (bfa_itnim_send_fwcreate(itnim))
433 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
434 else
435 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
436 break;
438 case BFA_ITNIM_SM_DELETE:
439 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
440 bfa_fcpim_delitn(itnim);
441 break;
443 case BFA_ITNIM_SM_HWFAIL:
444 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
445 break;
447 default:
448 bfa_sm_fault(itnim->bfa, event);
453 * Waiting for itnim create response from firmware.
455 static void
456 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
458 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
459 bfa_trc(itnim->bfa, event);
461 switch (event) {
462 case BFA_ITNIM_SM_FWRSP:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
464 itnim->is_online = BFA_TRUE;
465 bfa_itnim_iotov_online(itnim);
466 bfa_itnim_online_cb(itnim);
467 break;
469 case BFA_ITNIM_SM_DELETE:
470 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
471 break;
473 case BFA_ITNIM_SM_OFFLINE:
474 if (bfa_itnim_send_fwdelete(itnim))
475 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
476 else
477 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
478 break;
480 case BFA_ITNIM_SM_HWFAIL:
481 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
482 break;
484 default:
485 bfa_sm_fault(itnim->bfa, event);
489 static void
490 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
491 enum bfa_itnim_event event)
493 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
494 bfa_trc(itnim->bfa, event);
496 switch (event) {
497 case BFA_ITNIM_SM_QRESUME:
498 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
499 bfa_itnim_send_fwcreate(itnim);
500 break;
502 case BFA_ITNIM_SM_DELETE:
503 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
504 bfa_reqq_wcancel(&itnim->reqq_wait);
505 bfa_fcpim_delitn(itnim);
506 break;
508 case BFA_ITNIM_SM_OFFLINE:
509 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
510 bfa_reqq_wcancel(&itnim->reqq_wait);
511 bfa_itnim_offline_cb(itnim);
512 break;
514 case BFA_ITNIM_SM_HWFAIL:
515 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
516 bfa_reqq_wcancel(&itnim->reqq_wait);
517 break;
519 default:
520 bfa_sm_fault(itnim->bfa, event);
525 * Waiting for itnim create response from firmware, a delete is pending.
527 static void
528 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
529 enum bfa_itnim_event event)
531 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
532 bfa_trc(itnim->bfa, event);
534 switch (event) {
535 case BFA_ITNIM_SM_FWRSP:
536 if (bfa_itnim_send_fwdelete(itnim))
537 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
538 else
539 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
540 break;
542 case BFA_ITNIM_SM_HWFAIL:
543 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
544 bfa_fcpim_delitn(itnim);
545 break;
547 default:
548 bfa_sm_fault(itnim->bfa, event);
553 * Online state - normal parking state.
555 static void
556 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
558 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
559 bfa_trc(itnim->bfa, event);
561 switch (event) {
562 case BFA_ITNIM_SM_OFFLINE:
563 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
564 itnim->is_online = BFA_FALSE;
565 bfa_itnim_iotov_start(itnim);
566 bfa_itnim_cleanup(itnim);
567 break;
569 case BFA_ITNIM_SM_DELETE:
570 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
571 itnim->is_online = BFA_FALSE;
572 bfa_itnim_cleanup(itnim);
573 break;
575 case BFA_ITNIM_SM_SLER:
576 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
577 itnim->is_online = BFA_FALSE;
578 bfa_itnim_iotov_start(itnim);
579 bfa_itnim_sler_cb(itnim);
580 break;
582 case BFA_ITNIM_SM_HWFAIL:
583 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
584 itnim->is_online = BFA_FALSE;
585 bfa_itnim_iotov_start(itnim);
586 bfa_itnim_iocdisable_cleanup(itnim);
587 break;
589 default:
590 bfa_sm_fault(itnim->bfa, event);
595 * Second level error recovery need.
597 static void
598 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
600 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
601 bfa_trc(itnim->bfa, event);
603 switch (event) {
604 case BFA_ITNIM_SM_OFFLINE:
605 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
606 bfa_itnim_cleanup(itnim);
607 break;
609 case BFA_ITNIM_SM_DELETE:
610 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
611 bfa_itnim_cleanup(itnim);
612 bfa_itnim_iotov_delete(itnim);
613 break;
615 case BFA_ITNIM_SM_HWFAIL:
616 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
617 bfa_itnim_iocdisable_cleanup(itnim);
618 break;
620 default:
621 bfa_sm_fault(itnim->bfa, event);
626 * Going offline. Waiting for active IO cleanup.
628 static void
629 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
630 enum bfa_itnim_event event)
632 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
633 bfa_trc(itnim->bfa, event);
635 switch (event) {
636 case BFA_ITNIM_SM_CLEANUP:
637 if (bfa_itnim_send_fwdelete(itnim))
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
639 else
640 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
641 break;
643 case BFA_ITNIM_SM_DELETE:
644 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
645 bfa_itnim_iotov_delete(itnim);
646 break;
648 case BFA_ITNIM_SM_HWFAIL:
649 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
650 bfa_itnim_iocdisable_cleanup(itnim);
651 bfa_itnim_offline_cb(itnim);
652 break;
654 case BFA_ITNIM_SM_SLER:
655 break;
657 default:
658 bfa_sm_fault(itnim->bfa, event);
663 * Deleting itnim. Waiting for active IO cleanup.
665 static void
666 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
667 enum bfa_itnim_event event)
669 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 bfa_trc(itnim->bfa, event);
672 switch (event) {
673 case BFA_ITNIM_SM_CLEANUP:
674 if (bfa_itnim_send_fwdelete(itnim))
675 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
676 else
677 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
678 break;
680 case BFA_ITNIM_SM_HWFAIL:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
682 bfa_itnim_iocdisable_cleanup(itnim);
683 break;
685 default:
686 bfa_sm_fault(itnim->bfa, event);
691 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
693 static void
694 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
696 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 bfa_trc(itnim->bfa, event);
699 switch (event) {
700 case BFA_ITNIM_SM_FWRSP:
701 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
702 bfa_itnim_offline_cb(itnim);
703 break;
705 case BFA_ITNIM_SM_DELETE:
706 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
707 break;
709 case BFA_ITNIM_SM_HWFAIL:
710 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
711 bfa_itnim_offline_cb(itnim);
712 break;
714 default:
715 bfa_sm_fault(itnim->bfa, event);
719 static void
720 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
721 enum bfa_itnim_event event)
723 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
724 bfa_trc(itnim->bfa, event);
726 switch (event) {
727 case BFA_ITNIM_SM_QRESUME:
728 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
729 bfa_itnim_send_fwdelete(itnim);
730 break;
732 case BFA_ITNIM_SM_DELETE:
733 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
734 break;
736 case BFA_ITNIM_SM_HWFAIL:
737 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
738 bfa_reqq_wcancel(&itnim->reqq_wait);
739 bfa_itnim_offline_cb(itnim);
740 break;
742 default:
743 bfa_sm_fault(itnim->bfa, event);
748 * Offline state.
750 static void
751 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
753 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
754 bfa_trc(itnim->bfa, event);
756 switch (event) {
757 case BFA_ITNIM_SM_DELETE:
758 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
759 bfa_itnim_iotov_delete(itnim);
760 bfa_fcpim_delitn(itnim);
761 break;
763 case BFA_ITNIM_SM_ONLINE:
764 if (bfa_itnim_send_fwcreate(itnim))
765 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
766 else
767 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
768 break;
770 case BFA_ITNIM_SM_HWFAIL:
771 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
772 break;
774 default:
775 bfa_sm_fault(itnim->bfa, event);
779 static void
780 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
781 enum bfa_itnim_event event)
783 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
784 bfa_trc(itnim->bfa, event);
786 switch (event) {
787 case BFA_ITNIM_SM_DELETE:
788 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
789 bfa_itnim_iotov_delete(itnim);
790 bfa_fcpim_delitn(itnim);
791 break;
793 case BFA_ITNIM_SM_OFFLINE:
794 bfa_itnim_offline_cb(itnim);
795 break;
797 case BFA_ITNIM_SM_ONLINE:
798 if (bfa_itnim_send_fwcreate(itnim))
799 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
800 else
801 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
802 break;
804 case BFA_ITNIM_SM_HWFAIL:
805 break;
807 default:
808 bfa_sm_fault(itnim->bfa, event);
813 * Itnim is deleted, waiting for firmware response to delete.
815 static void
816 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
818 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
819 bfa_trc(itnim->bfa, event);
821 switch (event) {
822 case BFA_ITNIM_SM_FWRSP:
823 case BFA_ITNIM_SM_HWFAIL:
824 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
825 bfa_fcpim_delitn(itnim);
826 break;
828 default:
829 bfa_sm_fault(itnim->bfa, event);
833 static void
834 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
835 enum bfa_itnim_event event)
837 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
838 bfa_trc(itnim->bfa, event);
840 switch (event) {
841 case BFA_ITNIM_SM_QRESUME:
842 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
843 bfa_itnim_send_fwdelete(itnim);
844 break;
846 case BFA_ITNIM_SM_HWFAIL:
847 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
848 bfa_reqq_wcancel(&itnim->reqq_wait);
849 bfa_fcpim_delitn(itnim);
850 break;
852 default:
853 bfa_sm_fault(itnim->bfa, event);
858 * Initiate cleanup of all IOs on an IOC failure.
860 static void
861 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
863 struct bfa_tskim_s *tskim;
864 struct bfa_ioim_s *ioim;
865 struct list_head *qe, *qen;
867 list_for_each_safe(qe, qen, &itnim->tsk_q) {
868 tskim = (struct bfa_tskim_s *) qe;
869 bfa_tskim_iocdisable(tskim);
872 list_for_each_safe(qe, qen, &itnim->io_q) {
873 ioim = (struct bfa_ioim_s *) qe;
874 bfa_ioim_iocdisable(ioim);
878 * For IO request in pending queue, we pretend an early timeout.
880 list_for_each_safe(qe, qen, &itnim->pending_q) {
881 ioim = (struct bfa_ioim_s *) qe;
882 bfa_ioim_tov(ioim);
885 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
886 ioim = (struct bfa_ioim_s *) qe;
887 bfa_ioim_iocdisable(ioim);
892 * IO cleanup completion
894 static void
895 bfa_itnim_cleanp_comp(void *itnim_cbarg)
897 struct bfa_itnim_s *itnim = itnim_cbarg;
899 bfa_stats(itnim, cleanup_comps);
900 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
904 * Initiate cleanup of all IOs.
906 static void
907 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
909 struct bfa_ioim_s *ioim;
910 struct bfa_tskim_s *tskim;
911 struct list_head *qe, *qen;
913 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
915 list_for_each_safe(qe, qen, &itnim->io_q) {
916 ioim = (struct bfa_ioim_s *) qe;
919 * Move IO to a cleanup queue from active queue so that a later
920 * TM will not pickup this IO.
922 list_del(&ioim->qe);
923 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
925 bfa_wc_up(&itnim->wc);
926 bfa_ioim_cleanup(ioim);
929 list_for_each_safe(qe, qen, &itnim->tsk_q) {
930 tskim = (struct bfa_tskim_s *) qe;
931 bfa_wc_up(&itnim->wc);
932 bfa_tskim_cleanup(tskim);
935 bfa_wc_wait(&itnim->wc);
938 static void
939 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
941 struct bfa_itnim_s *itnim = cbarg;
943 if (complete)
944 bfa_cb_itnim_online(itnim->ditn);
947 static void
948 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
950 struct bfa_itnim_s *itnim = cbarg;
952 if (complete)
953 bfa_cb_itnim_offline(itnim->ditn);
956 static void
957 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
959 struct bfa_itnim_s *itnim = cbarg;
961 if (complete)
962 bfa_cb_itnim_sler(itnim->ditn);
966 * Call to resume any I/O requests waiting for room in request queue.
968 static void
969 bfa_itnim_qresume(void *cbarg)
971 struct bfa_itnim_s *itnim = cbarg;
973 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
977 * bfa_itnim_public
980 void
981 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
983 bfa_wc_down(&itnim->wc);
986 void
987 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
989 bfa_wc_down(&itnim->wc);
992 void
993 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
994 u32 *dm_len)
997 * ITN memory
999 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1002 void
1003 bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1005 struct bfa_s *bfa = fcpim->bfa;
1006 struct bfa_itnim_s *itnim;
1007 int i, j;
1009 INIT_LIST_HEAD(&fcpim->itnim_q);
1011 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1012 fcpim->itnim_arr = itnim;
1014 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1015 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1016 itnim->bfa = bfa;
1017 itnim->fcpim = fcpim;
1018 itnim->reqq = BFA_REQQ_QOS_LO;
1019 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1020 itnim->iotov_active = BFA_FALSE;
1021 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1023 INIT_LIST_HEAD(&itnim->io_q);
1024 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1025 INIT_LIST_HEAD(&itnim->pending_q);
1026 INIT_LIST_HEAD(&itnim->tsk_q);
1027 INIT_LIST_HEAD(&itnim->delay_comp_q);
1028 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1029 itnim->ioprofile.io_latency.min[j] = ~0;
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1033 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1036 void
1037 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1039 bfa_stats(itnim, ioc_disabled);
1040 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1043 static bfa_boolean_t
1044 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1046 struct bfi_itnim_create_req_s *m;
1048 itnim->msg_no++;
1051 * check for room in queue to send request now
1053 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1054 if (!m) {
1055 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1056 return BFA_FALSE;
1059 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1060 bfa_lpuid(itnim->bfa));
1061 m->fw_handle = itnim->rport->fw_handle;
1062 m->class = FC_CLASS_3;
1063 m->seq_rec = itnim->seq_rec;
1064 m->msg_no = itnim->msg_no;
1065 bfa_stats(itnim, fw_create);
1068 * queue I/O message to firmware
1070 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1071 return BFA_TRUE;
1074 static bfa_boolean_t
1075 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1077 struct bfi_itnim_delete_req_s *m;
1080 * check for room in queue to send request now
1082 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1083 if (!m) {
1084 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1085 return BFA_FALSE;
1088 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1089 bfa_lpuid(itnim->bfa));
1090 m->fw_handle = itnim->rport->fw_handle;
1091 bfa_stats(itnim, fw_delete);
1094 * queue I/O message to firmware
1096 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1097 return BFA_TRUE;
1101 * Cleanup all pending failed inflight requests.
1103 static void
1104 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1106 struct bfa_ioim_s *ioim;
1107 struct list_head *qe, *qen;
1109 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1110 ioim = (struct bfa_ioim_s *)qe;
1111 bfa_ioim_delayed_comp(ioim, iotov);
1116 * Start all pending IO requests.
1118 static void
1119 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1121 struct bfa_ioim_s *ioim;
1123 bfa_itnim_iotov_stop(itnim);
1126 * Abort all inflight IO requests in the queue
1128 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1131 * Start all pending IO requests.
1133 while (!list_empty(&itnim->pending_q)) {
1134 bfa_q_deq(&itnim->pending_q, &ioim);
1135 list_add_tail(&ioim->qe, &itnim->io_q);
1136 bfa_ioim_start(ioim);
1141 * Fail all pending IO requests
1143 static void
1144 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1146 struct bfa_ioim_s *ioim;
1149 * Fail all inflight IO requests in the queue
1151 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1154 * Fail any pending IO requests.
1156 while (!list_empty(&itnim->pending_q)) {
1157 bfa_q_deq(&itnim->pending_q, &ioim);
1158 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1159 bfa_ioim_tov(ioim);
1164 * IO TOV timer callback. Fail any pending IO requests.
1166 static void
1167 bfa_itnim_iotov(void *itnim_arg)
1169 struct bfa_itnim_s *itnim = itnim_arg;
1171 itnim->iotov_active = BFA_FALSE;
1173 bfa_cb_itnim_tov_begin(itnim->ditn);
1174 bfa_itnim_iotov_cleanup(itnim);
1175 bfa_cb_itnim_tov(itnim->ditn);
1179 * Start IO TOV timer for failing back pending IO requests in offline state.
1181 static void
1182 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1184 if (itnim->fcpim->path_tov > 0) {
1186 itnim->iotov_active = BFA_TRUE;
1187 WARN_ON(!bfa_itnim_hold_io(itnim));
1188 bfa_timer_start(itnim->bfa, &itnim->timer,
1189 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1194 * Stop IO TOV timer.
1196 static void
1197 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1199 if (itnim->iotov_active) {
1200 itnim->iotov_active = BFA_FALSE;
1201 bfa_timer_stop(&itnim->timer);
1206 * Stop IO TOV timer.
1208 static void
1209 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1211 bfa_boolean_t pathtov_active = BFA_FALSE;
1213 if (itnim->iotov_active)
1214 pathtov_active = BFA_TRUE;
1216 bfa_itnim_iotov_stop(itnim);
1217 if (pathtov_active)
1218 bfa_cb_itnim_tov_begin(itnim->ditn);
1219 bfa_itnim_iotov_cleanup(itnim);
1220 if (pathtov_active)
1221 bfa_cb_itnim_tov(itnim->ditn);
1224 static void
1225 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1227 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1228 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1229 itnim->stats.iocomp_aborted;
1230 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1231 itnim->stats.iocomp_timedout;
1232 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1233 itnim->stats.iocom_sqer_needed;
1234 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1235 itnim->stats.iocom_res_free;
1236 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1237 itnim->stats.iocom_hostabrts;
1238 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1239 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1240 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1244 * bfa_itnim_public
1248 * Itnim interrupt processing.
1250 void
1251 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1253 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1254 union bfi_itnim_i2h_msg_u msg;
1255 struct bfa_itnim_s *itnim;
1257 bfa_trc(bfa, m->mhdr.msg_id);
1259 msg.msg = m;
1261 switch (m->mhdr.msg_id) {
1262 case BFI_ITNIM_I2H_CREATE_RSP:
1263 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1264 msg.create_rsp->bfa_handle);
1265 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1266 bfa_stats(itnim, create_comps);
1267 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1268 break;
1270 case BFI_ITNIM_I2H_DELETE_RSP:
1271 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1272 msg.delete_rsp->bfa_handle);
1273 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1274 bfa_stats(itnim, delete_comps);
1275 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1276 break;
1278 case BFI_ITNIM_I2H_SLER_EVENT:
1279 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1280 msg.sler_event->bfa_handle);
1281 bfa_stats(itnim, sler_events);
1282 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1283 break;
1285 default:
1286 bfa_trc(bfa, m->mhdr.msg_id);
1287 WARN_ON(1);
1292 * bfa_itnim_api
1295 struct bfa_itnim_s *
1296 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1298 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1299 struct bfa_itnim_s *itnim;
1301 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1302 WARN_ON(itnim->rport != rport);
1304 itnim->ditn = ditn;
1306 bfa_stats(itnim, creates);
1307 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1309 return itnim;
1312 void
1313 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1315 bfa_stats(itnim, deletes);
1316 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1319 void
1320 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1322 itnim->seq_rec = seq_rec;
1323 bfa_stats(itnim, onlines);
1324 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1327 void
1328 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1330 bfa_stats(itnim, offlines);
1331 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1335 * Return true if itnim is considered offline for holding off IO request.
1336 * IO is not held if itnim is being deleted.
1338 bfa_boolean_t
1339 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1341 return itnim->fcpim->path_tov && itnim->iotov_active &&
1342 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1343 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1344 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1345 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1346 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1347 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1350 void
1351 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1353 int j;
1354 memset(&itnim->stats, 0, sizeof(itnim->stats));
1355 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1356 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1357 itnim->ioprofile.io_latency.min[j] = ~0;
1361 * BFA IO module state machine functions
1365 * IO is not started (unallocated).
1367 static void
1368 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1370 switch (event) {
1371 case BFA_IOIM_SM_START:
1372 if (!bfa_itnim_is_online(ioim->itnim)) {
1373 if (!bfa_itnim_hold_io(ioim->itnim)) {
1374 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1375 list_del(&ioim->qe);
1376 list_add_tail(&ioim->qe,
1377 &ioim->fcpim->ioim_comp_q);
1378 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1379 __bfa_cb_ioim_pathtov, ioim);
1380 } else {
1381 list_del(&ioim->qe);
1382 list_add_tail(&ioim->qe,
1383 &ioim->itnim->pending_q);
1385 break;
1388 if (ioim->nsges > BFI_SGE_INLINE) {
1389 if (!bfa_ioim_sgpg_alloc(ioim)) {
1390 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1391 return;
1395 if (!bfa_ioim_send_ioreq(ioim)) {
1396 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1397 break;
1400 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1401 break;
1403 case BFA_IOIM_SM_IOTOV:
1404 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1405 bfa_ioim_move_to_comp_q(ioim);
1406 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1407 __bfa_cb_ioim_pathtov, ioim);
1408 break;
1410 case BFA_IOIM_SM_ABORT:
1412 * IO in pending queue can get abort requests. Complete abort
1413 * requests immediately.
1415 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1416 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1417 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1418 __bfa_cb_ioim_abort, ioim);
1419 break;
1421 default:
1422 bfa_sm_fault(ioim->bfa, event);
1427 * IO is waiting for SG pages.
1429 static void
1430 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1432 bfa_trc(ioim->bfa, ioim->iotag);
1433 bfa_trc(ioim->bfa, event);
1435 switch (event) {
1436 case BFA_IOIM_SM_SGALLOCED:
1437 if (!bfa_ioim_send_ioreq(ioim)) {
1438 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1439 break;
1441 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1442 break;
1444 case BFA_IOIM_SM_CLEANUP:
1445 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1446 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1447 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1448 ioim);
1449 bfa_ioim_notify_cleanup(ioim);
1450 break;
1452 case BFA_IOIM_SM_ABORT:
1453 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1454 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1455 bfa_ioim_move_to_comp_q(ioim);
1456 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1457 ioim);
1458 break;
1460 case BFA_IOIM_SM_HWFAIL:
1461 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1462 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1463 bfa_ioim_move_to_comp_q(ioim);
1464 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1465 ioim);
1466 break;
1468 default:
1469 bfa_sm_fault(ioim->bfa, event);
1474 * IO is active.
1476 static void
1477 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1479 switch (event) {
1480 case BFA_IOIM_SM_COMP_GOOD:
1481 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1482 bfa_ioim_move_to_comp_q(ioim);
1483 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1484 __bfa_cb_ioim_good_comp, ioim);
1485 break;
1487 case BFA_IOIM_SM_COMP:
1488 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1489 bfa_ioim_move_to_comp_q(ioim);
1490 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1491 ioim);
1492 break;
1494 case BFA_IOIM_SM_DONE:
1495 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1496 bfa_ioim_move_to_comp_q(ioim);
1497 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1498 ioim);
1499 break;
1501 case BFA_IOIM_SM_ABORT:
1502 ioim->iosp->abort_explicit = BFA_TRUE;
1503 ioim->io_cbfn = __bfa_cb_ioim_abort;
1505 if (bfa_ioim_send_abort(ioim))
1506 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1507 else {
1508 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1509 bfa_stats(ioim->itnim, qwait);
1510 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1511 &ioim->iosp->reqq_wait);
1513 break;
1515 case BFA_IOIM_SM_CLEANUP:
1516 ioim->iosp->abort_explicit = BFA_FALSE;
1517 ioim->io_cbfn = __bfa_cb_ioim_failed;
1519 if (bfa_ioim_send_abort(ioim))
1520 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1521 else {
1522 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1523 bfa_stats(ioim->itnim, qwait);
1524 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1525 &ioim->iosp->reqq_wait);
1527 break;
1529 case BFA_IOIM_SM_HWFAIL:
1530 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1531 bfa_ioim_move_to_comp_q(ioim);
1532 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1533 ioim);
1534 break;
1536 case BFA_IOIM_SM_SQRETRY:
1537 if (bfa_ioim_maxretry_reached(ioim)) {
1538 /* max retry reached, free IO */
1539 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1540 bfa_ioim_move_to_comp_q(ioim);
1541 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1542 __bfa_cb_ioim_failed, ioim);
1543 break;
1545 /* waiting for IO tag resource free */
1546 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1547 break;
1549 default:
1550 bfa_sm_fault(ioim->bfa, event);
1555 * IO is retried with new tag.
1557 static void
1558 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1560 switch (event) {
1561 case BFA_IOIM_SM_FREE:
1562 /* abts and rrq done. Now retry the IO with new tag */
1563 bfa_ioim_update_iotag(ioim);
1564 if (!bfa_ioim_send_ioreq(ioim)) {
1565 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1566 break;
1568 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1569 break;
1571 case BFA_IOIM_SM_CLEANUP:
1572 ioim->iosp->abort_explicit = BFA_FALSE;
1573 ioim->io_cbfn = __bfa_cb_ioim_failed;
1575 if (bfa_ioim_send_abort(ioim))
1576 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1577 else {
1578 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1579 bfa_stats(ioim->itnim, qwait);
1580 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1581 &ioim->iosp->reqq_wait);
1583 break;
1585 case BFA_IOIM_SM_HWFAIL:
1586 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1587 bfa_ioim_move_to_comp_q(ioim);
1588 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1589 __bfa_cb_ioim_failed, ioim);
1590 break;
1592 case BFA_IOIM_SM_ABORT:
1593 /* in this state IO abort is done.
1594 * Waiting for IO tag resource free.
1596 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1597 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1598 ioim);
1599 break;
1601 default:
1602 bfa_sm_fault(ioim->bfa, event);
1607 * IO is being aborted, waiting for completion from firmware.
1609 static void
1610 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1612 bfa_trc(ioim->bfa, ioim->iotag);
1613 bfa_trc(ioim->bfa, event);
1615 switch (event) {
1616 case BFA_IOIM_SM_COMP_GOOD:
1617 case BFA_IOIM_SM_COMP:
1618 case BFA_IOIM_SM_DONE:
1619 case BFA_IOIM_SM_FREE:
1620 break;
1622 case BFA_IOIM_SM_ABORT_DONE:
1623 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1624 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1625 ioim);
1626 break;
1628 case BFA_IOIM_SM_ABORT_COMP:
1629 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1630 bfa_ioim_move_to_comp_q(ioim);
1631 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1632 ioim);
1633 break;
1635 case BFA_IOIM_SM_COMP_UTAG:
1636 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1637 bfa_ioim_move_to_comp_q(ioim);
1638 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1639 ioim);
1640 break;
1642 case BFA_IOIM_SM_CLEANUP:
1643 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1644 ioim->iosp->abort_explicit = BFA_FALSE;
1646 if (bfa_ioim_send_abort(ioim))
1647 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1648 else {
1649 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1650 bfa_stats(ioim->itnim, qwait);
1651 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1652 &ioim->iosp->reqq_wait);
1654 break;
1656 case BFA_IOIM_SM_HWFAIL:
1657 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1658 bfa_ioim_move_to_comp_q(ioim);
1659 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1660 ioim);
1661 break;
1663 default:
1664 bfa_sm_fault(ioim->bfa, event);
1669 * IO is being cleaned up (implicit abort), waiting for completion from
1670 * firmware.
1672 static void
1673 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1675 bfa_trc(ioim->bfa, ioim->iotag);
1676 bfa_trc(ioim->bfa, event);
1678 switch (event) {
1679 case BFA_IOIM_SM_COMP_GOOD:
1680 case BFA_IOIM_SM_COMP:
1681 case BFA_IOIM_SM_DONE:
1682 case BFA_IOIM_SM_FREE:
1683 break;
1685 case BFA_IOIM_SM_ABORT:
1687 * IO is already being aborted implicitly
1689 ioim->io_cbfn = __bfa_cb_ioim_abort;
1690 break;
1692 case BFA_IOIM_SM_ABORT_DONE:
1693 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1694 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1695 bfa_ioim_notify_cleanup(ioim);
1696 break;
1698 case BFA_IOIM_SM_ABORT_COMP:
1699 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1700 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1701 bfa_ioim_notify_cleanup(ioim);
1702 break;
1704 case BFA_IOIM_SM_COMP_UTAG:
1705 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1706 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1707 bfa_ioim_notify_cleanup(ioim);
1708 break;
1710 case BFA_IOIM_SM_HWFAIL:
1711 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1712 bfa_ioim_move_to_comp_q(ioim);
1713 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1714 ioim);
1715 break;
1717 case BFA_IOIM_SM_CLEANUP:
1719 * IO can be in cleanup state already due to TM command.
1720 * 2nd cleanup request comes from ITN offline event.
1722 break;
1724 default:
1725 bfa_sm_fault(ioim->bfa, event);
1730 * IO is waiting for room in request CQ
1732 static void
1733 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1735 bfa_trc(ioim->bfa, ioim->iotag);
1736 bfa_trc(ioim->bfa, event);
1738 switch (event) {
1739 case BFA_IOIM_SM_QRESUME:
1740 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1741 bfa_ioim_send_ioreq(ioim);
1742 break;
1744 case BFA_IOIM_SM_ABORT:
1745 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1746 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1747 bfa_ioim_move_to_comp_q(ioim);
1748 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1749 ioim);
1750 break;
1752 case BFA_IOIM_SM_CLEANUP:
1753 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1754 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1755 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1756 ioim);
1757 bfa_ioim_notify_cleanup(ioim);
1758 break;
1760 case BFA_IOIM_SM_HWFAIL:
1761 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1762 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1763 bfa_ioim_move_to_comp_q(ioim);
1764 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1765 ioim);
1766 break;
1768 default:
1769 bfa_sm_fault(ioim->bfa, event);
1774 * Active IO is being aborted, waiting for room in request CQ.
1776 static void
1777 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1779 bfa_trc(ioim->bfa, ioim->iotag);
1780 bfa_trc(ioim->bfa, event);
1782 switch (event) {
1783 case BFA_IOIM_SM_QRESUME:
1784 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1785 bfa_ioim_send_abort(ioim);
1786 break;
1788 case BFA_IOIM_SM_CLEANUP:
1789 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1790 ioim->iosp->abort_explicit = BFA_FALSE;
1791 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1792 break;
1794 case BFA_IOIM_SM_COMP_GOOD:
1795 case BFA_IOIM_SM_COMP:
1796 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1797 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1798 bfa_ioim_move_to_comp_q(ioim);
1799 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1800 ioim);
1801 break;
1803 case BFA_IOIM_SM_DONE:
1804 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1805 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1806 bfa_ioim_move_to_comp_q(ioim);
1807 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1808 ioim);
1809 break;
1811 case BFA_IOIM_SM_HWFAIL:
1812 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1813 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1814 bfa_ioim_move_to_comp_q(ioim);
1815 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1816 ioim);
1817 break;
1819 default:
1820 bfa_sm_fault(ioim->bfa, event);
1825 * Active IO is being cleaned up, waiting for room in request CQ.
1827 static void
1828 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1830 bfa_trc(ioim->bfa, ioim->iotag);
1831 bfa_trc(ioim->bfa, event);
1833 switch (event) {
1834 case BFA_IOIM_SM_QRESUME:
1835 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1836 bfa_ioim_send_abort(ioim);
1837 break;
1839 case BFA_IOIM_SM_ABORT:
1841 * IO is already being cleaned up implicitly
1843 ioim->io_cbfn = __bfa_cb_ioim_abort;
1844 break;
1846 case BFA_IOIM_SM_COMP_GOOD:
1847 case BFA_IOIM_SM_COMP:
1848 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1849 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1850 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1851 bfa_ioim_notify_cleanup(ioim);
1852 break;
1854 case BFA_IOIM_SM_DONE:
1855 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1856 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1857 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1858 bfa_ioim_notify_cleanup(ioim);
1859 break;
1861 case BFA_IOIM_SM_HWFAIL:
1862 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1863 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1864 bfa_ioim_move_to_comp_q(ioim);
1865 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1866 ioim);
1867 break;
1869 default:
1870 bfa_sm_fault(ioim->bfa, event);
1875 * IO bfa callback is pending.
1877 static void
1878 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1880 switch (event) {
1881 case BFA_IOIM_SM_HCB:
1882 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1883 bfa_ioim_free(ioim);
1884 break;
1886 case BFA_IOIM_SM_CLEANUP:
1887 bfa_ioim_notify_cleanup(ioim);
1888 break;
1890 case BFA_IOIM_SM_HWFAIL:
1891 break;
1893 default:
1894 bfa_sm_fault(ioim->bfa, event);
1899 * IO bfa callback is pending. IO resource cannot be freed.
1901 static void
1902 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1904 bfa_trc(ioim->bfa, ioim->iotag);
1905 bfa_trc(ioim->bfa, event);
1907 switch (event) {
1908 case BFA_IOIM_SM_HCB:
1909 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1910 list_del(&ioim->qe);
1911 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1912 break;
1914 case BFA_IOIM_SM_FREE:
1915 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1916 break;
1918 case BFA_IOIM_SM_CLEANUP:
1919 bfa_ioim_notify_cleanup(ioim);
1920 break;
1922 case BFA_IOIM_SM_HWFAIL:
1923 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1924 break;
1926 default:
1927 bfa_sm_fault(ioim->bfa, event);
1932 * IO is completed, waiting resource free from firmware.
1934 static void
1935 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1937 bfa_trc(ioim->bfa, ioim->iotag);
1938 bfa_trc(ioim->bfa, event);
1940 switch (event) {
1941 case BFA_IOIM_SM_FREE:
1942 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1943 bfa_ioim_free(ioim);
1944 break;
1946 case BFA_IOIM_SM_CLEANUP:
1947 bfa_ioim_notify_cleanup(ioim);
1948 break;
1950 case BFA_IOIM_SM_HWFAIL:
1951 break;
1953 default:
1954 bfa_sm_fault(ioim->bfa, event);
1959 static void
1960 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
1962 struct bfa_ioim_s *ioim = cbarg;
1964 if (!complete) {
1965 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1966 return;
1969 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
1972 static void
1973 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1975 struct bfa_ioim_s *ioim = cbarg;
1976 struct bfi_ioim_rsp_s *m;
1977 u8 *snsinfo = NULL;
1978 u8 sns_len = 0;
1979 s32 residue = 0;
1981 if (!complete) {
1982 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1983 return;
1986 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
1987 if (m->io_status == BFI_IOIM_STS_OK) {
1989 * setup sense information, if present
1991 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
1992 m->sns_len) {
1993 sns_len = m->sns_len;
1994 snsinfo = ioim->iosp->snsinfo;
1998 * setup residue value correctly for normal completions
2000 if (m->resid_flags == FCP_RESID_UNDER) {
2001 residue = be32_to_cpu(m->residue);
2002 bfa_stats(ioim->itnim, iocomp_underrun);
2004 if (m->resid_flags == FCP_RESID_OVER) {
2005 residue = be32_to_cpu(m->residue);
2006 residue = -residue;
2007 bfa_stats(ioim->itnim, iocomp_overrun);
2011 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2012 m->scsi_status, sns_len, snsinfo, residue);
2015 static void
2016 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2018 struct bfa_ioim_s *ioim = cbarg;
2020 if (!complete) {
2021 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2022 return;
2025 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2026 0, 0, NULL, 0);
2029 static void
2030 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2032 struct bfa_ioim_s *ioim = cbarg;
2034 bfa_stats(ioim->itnim, path_tov_expired);
2035 if (!complete) {
2036 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2037 return;
2040 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2041 0, 0, NULL, 0);
2044 static void
2045 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2047 struct bfa_ioim_s *ioim = cbarg;
2049 if (!complete) {
2050 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2051 return;
2054 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2057 static void
2058 bfa_ioim_sgpg_alloced(void *cbarg)
2060 struct bfa_ioim_s *ioim = cbarg;
2062 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2063 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2064 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2065 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2069 * Send I/O request to firmware.
2071 static bfa_boolean_t
2072 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2074 struct bfa_itnim_s *itnim = ioim->itnim;
2075 struct bfi_ioim_req_s *m;
2076 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2077 struct bfi_sge_s *sge, *sgpge;
2078 u32 pgdlen = 0;
2079 u32 fcp_dl;
2080 u64 addr;
2081 struct scatterlist *sg;
2082 struct bfa_sgpg_s *sgpg;
2083 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2084 u32 i, sge_id, pgcumsz;
2085 enum dma_data_direction dmadir;
2088 * check for room in queue to send request now
2090 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2091 if (!m) {
2092 bfa_stats(ioim->itnim, qwait);
2093 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2094 &ioim->iosp->reqq_wait);
2095 return BFA_FALSE;
2099 * build i/o request message next
2101 m->io_tag = cpu_to_be16(ioim->iotag);
2102 m->rport_hdl = ioim->itnim->rport->fw_handle;
2103 m->io_timeout = 0;
2105 sge = &m->sges[0];
2106 sgpg = ioim->sgpg;
2107 sge_id = 0;
2108 sgpge = NULL;
2109 pgcumsz = 0;
2110 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2111 if (i == 0) {
2112 /* build inline IO SG element */
2113 addr = bfa_sgaddr_le(sg_dma_address(sg));
2114 sge->sga = *(union bfi_addr_u *) &addr;
2115 pgdlen = sg_dma_len(sg);
2116 sge->sg_len = pgdlen;
2117 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2118 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2119 bfa_sge_to_be(sge);
2120 sge++;
2121 } else {
2122 if (sge_id == 0)
2123 sgpge = sgpg->sgpg->sges;
2125 addr = bfa_sgaddr_le(sg_dma_address(sg));
2126 sgpge->sga = *(union bfi_addr_u *) &addr;
2127 sgpge->sg_len = sg_dma_len(sg);
2128 pgcumsz += sgpge->sg_len;
2130 /* set flags */
2131 if (i < (ioim->nsges - 1) &&
2132 sge_id < (BFI_SGPG_DATA_SGES - 1))
2133 sgpge->flags = BFI_SGE_DATA;
2134 else if (i < (ioim->nsges - 1))
2135 sgpge->flags = BFI_SGE_DATA_CPL;
2136 else
2137 sgpge->flags = BFI_SGE_DATA_LAST;
2139 bfa_sge_to_le(sgpge);
2141 sgpge++;
2142 if (i == (ioim->nsges - 1)) {
2143 sgpge->flags = BFI_SGE_PGDLEN;
2144 sgpge->sga.a32.addr_lo = 0;
2145 sgpge->sga.a32.addr_hi = 0;
2146 sgpge->sg_len = pgcumsz;
2147 bfa_sge_to_le(sgpge);
2148 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2149 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2150 sgpge->flags = BFI_SGE_LINK;
2151 sgpge->sga = sgpg->sgpg_pa;
2152 sgpge->sg_len = pgcumsz;
2153 bfa_sge_to_le(sgpge);
2154 sge_id = 0;
2155 pgcumsz = 0;
2160 if (ioim->nsges > BFI_SGE_INLINE) {
2161 sge->sga = ioim->sgpg->sgpg_pa;
2162 } else {
2163 sge->sga.a32.addr_lo = 0;
2164 sge->sga.a32.addr_hi = 0;
2166 sge->sg_len = pgdlen;
2167 sge->flags = BFI_SGE_PGDLEN;
2168 bfa_sge_to_be(sge);
2171 * set up I/O command parameters
2173 m->cmnd = cmnd_z0;
2174 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2175 dmadir = cmnd->sc_data_direction;
2176 if (dmadir == DMA_TO_DEVICE)
2177 m->cmnd.iodir = FCP_IODIR_WRITE;
2178 else if (dmadir == DMA_FROM_DEVICE)
2179 m->cmnd.iodir = FCP_IODIR_READ;
2180 else
2181 m->cmnd.iodir = FCP_IODIR_NONE;
2183 m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2184 fcp_dl = scsi_bufflen(cmnd);
2185 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2188 * set up I/O message header
2190 switch (m->cmnd.iodir) {
2191 case FCP_IODIR_READ:
2192 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2193 bfa_stats(itnim, input_reqs);
2194 ioim->itnim->stats.rd_throughput += fcp_dl;
2195 break;
2196 case FCP_IODIR_WRITE:
2197 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2198 bfa_stats(itnim, output_reqs);
2199 ioim->itnim->stats.wr_throughput += fcp_dl;
2200 break;
2201 case FCP_IODIR_RW:
2202 bfa_stats(itnim, input_reqs);
2203 bfa_stats(itnim, output_reqs);
2204 default:
2205 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2207 if (itnim->seq_rec ||
2208 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2209 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2212 * queue I/O message to firmware
2214 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2215 return BFA_TRUE;
2219 * Setup any additional SG pages needed.Inline SG element is setup
2220 * at queuing time.
2222 static bfa_boolean_t
2223 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2225 u16 nsgpgs;
2227 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2230 * allocate SG pages needed
2232 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2233 if (!nsgpgs)
2234 return BFA_TRUE;
2236 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2237 != BFA_STATUS_OK) {
2238 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2239 return BFA_FALSE;
2242 ioim->nsgpgs = nsgpgs;
2243 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2245 return BFA_TRUE;
2249 * Send I/O abort request to firmware.
2251 static bfa_boolean_t
2252 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2254 struct bfi_ioim_abort_req_s *m;
2255 enum bfi_ioim_h2i msgop;
2258 * check for room in queue to send request now
2260 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2261 if (!m)
2262 return BFA_FALSE;
2265 * build i/o request message next
2267 if (ioim->iosp->abort_explicit)
2268 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2269 else
2270 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2272 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2273 m->io_tag = cpu_to_be16(ioim->iotag);
2274 m->abort_tag = ++ioim->abort_tag;
2277 * queue I/O message to firmware
2279 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2280 return BFA_TRUE;
2284 * Call to resume any I/O requests waiting for room in request queue.
2286 static void
2287 bfa_ioim_qresume(void *cbarg)
2289 struct bfa_ioim_s *ioim = cbarg;
2291 bfa_stats(ioim->itnim, qresumes);
2292 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2296 static void
2297 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2300 * Move IO from itnim queue to fcpim global queue since itnim will be
2301 * freed.
2303 list_del(&ioim->qe);
2304 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2306 if (!ioim->iosp->tskim) {
2307 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2308 bfa_cb_dequeue(&ioim->hcb_qe);
2309 list_del(&ioim->qe);
2310 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2312 bfa_itnim_iodone(ioim->itnim);
2313 } else
2314 bfa_wc_down(&ioim->iosp->tskim->wc);
2317 static bfa_boolean_t
2318 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2320 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2321 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2322 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2323 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2324 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2325 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2326 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2327 return BFA_FALSE;
2329 return BFA_TRUE;
2332 void
2333 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2336 * If path tov timer expired, failback with PATHTOV status - these
2337 * IO requests are not normally retried by IO stack.
2339 * Otherwise device cameback online and fail it with normal failed
2340 * status so that IO stack retries these failed IO requests.
2342 if (iotov)
2343 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2344 else {
2345 ioim->io_cbfn = __bfa_cb_ioim_failed;
2346 bfa_stats(ioim->itnim, iocom_nexus_abort);
2348 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2351 * Move IO to fcpim global queue since itnim will be
2352 * freed.
2354 list_del(&ioim->qe);
2355 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2360 * Memory allocation and initialization.
2362 void
2363 bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2365 struct bfa_ioim_s *ioim;
2366 struct bfa_ioim_sp_s *iosp;
2367 u16 i;
2368 u8 *snsinfo;
2369 u32 snsbufsz;
2372 * claim memory first
2374 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2375 fcpim->ioim_arr = ioim;
2376 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2378 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2379 fcpim->ioim_sp_arr = iosp;
2380 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2383 * Claim DMA memory for per IO sense data.
2385 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2386 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2387 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2389 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2390 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2391 snsinfo = fcpim->snsbase.kva;
2392 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2395 * Initialize ioim free queues
2397 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2398 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2399 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2401 for (i = 0; i < fcpim->num_ioim_reqs;
2402 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2404 * initialize IOIM
2406 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2407 ioim->iotag = i;
2408 ioim->bfa = fcpim->bfa;
2409 ioim->fcpim = fcpim;
2410 ioim->iosp = iosp;
2411 iosp->snsinfo = snsinfo;
2412 INIT_LIST_HEAD(&ioim->sgpg_q);
2413 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2414 bfa_ioim_qresume, ioim);
2415 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2416 bfa_ioim_sgpg_alloced, ioim);
2417 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2419 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2423 void
2424 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2426 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2427 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2428 struct bfa_ioim_s *ioim;
2429 u16 iotag;
2430 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2432 iotag = be16_to_cpu(rsp->io_tag);
2434 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2435 WARN_ON(ioim->iotag != iotag);
2437 bfa_trc(ioim->bfa, ioim->iotag);
2438 bfa_trc(ioim->bfa, rsp->io_status);
2439 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2441 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2442 ioim->iosp->comp_rspmsg = *m;
2444 switch (rsp->io_status) {
2445 case BFI_IOIM_STS_OK:
2446 bfa_stats(ioim->itnim, iocomp_ok);
2447 if (rsp->reuse_io_tag == 0)
2448 evt = BFA_IOIM_SM_DONE;
2449 else
2450 evt = BFA_IOIM_SM_COMP;
2451 break;
2453 case BFI_IOIM_STS_TIMEDOUT:
2454 bfa_stats(ioim->itnim, iocomp_timedout);
2455 case BFI_IOIM_STS_ABORTED:
2456 rsp->io_status = BFI_IOIM_STS_ABORTED;
2457 bfa_stats(ioim->itnim, iocomp_aborted);
2458 if (rsp->reuse_io_tag == 0)
2459 evt = BFA_IOIM_SM_DONE;
2460 else
2461 evt = BFA_IOIM_SM_COMP;
2462 break;
2464 case BFI_IOIM_STS_PROTO_ERR:
2465 bfa_stats(ioim->itnim, iocom_proto_err);
2466 WARN_ON(!rsp->reuse_io_tag);
2467 evt = BFA_IOIM_SM_COMP;
2468 break;
2470 case BFI_IOIM_STS_SQER_NEEDED:
2471 bfa_stats(ioim->itnim, iocom_sqer_needed);
2472 WARN_ON(rsp->reuse_io_tag != 0);
2473 evt = BFA_IOIM_SM_SQRETRY;
2474 break;
2476 case BFI_IOIM_STS_RES_FREE:
2477 bfa_stats(ioim->itnim, iocom_res_free);
2478 evt = BFA_IOIM_SM_FREE;
2479 break;
2481 case BFI_IOIM_STS_HOST_ABORTED:
2482 bfa_stats(ioim->itnim, iocom_hostabrts);
2483 if (rsp->abort_tag != ioim->abort_tag) {
2484 bfa_trc(ioim->bfa, rsp->abort_tag);
2485 bfa_trc(ioim->bfa, ioim->abort_tag);
2486 return;
2489 if (rsp->reuse_io_tag)
2490 evt = BFA_IOIM_SM_ABORT_COMP;
2491 else
2492 evt = BFA_IOIM_SM_ABORT_DONE;
2493 break;
2495 case BFI_IOIM_STS_UTAG:
2496 bfa_stats(ioim->itnim, iocom_utags);
2497 evt = BFA_IOIM_SM_COMP_UTAG;
2498 break;
2500 default:
2501 WARN_ON(1);
2504 bfa_sm_send_event(ioim, evt);
2507 void
2508 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2510 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2511 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2512 struct bfa_ioim_s *ioim;
2513 u16 iotag;
2515 iotag = be16_to_cpu(rsp->io_tag);
2517 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2518 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2520 bfa_ioim_cb_profile_comp(fcpim, ioim);
2521 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2525 * Called by itnim to clean up IO while going offline.
2527 void
2528 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2530 bfa_trc(ioim->bfa, ioim->iotag);
2531 bfa_stats(ioim->itnim, io_cleanups);
2533 ioim->iosp->tskim = NULL;
2534 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2537 void
2538 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2540 bfa_trc(ioim->bfa, ioim->iotag);
2541 bfa_stats(ioim->itnim, io_tmaborts);
2543 ioim->iosp->tskim = tskim;
2544 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2548 * IOC failure handling.
2550 void
2551 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2553 bfa_trc(ioim->bfa, ioim->iotag);
2554 bfa_stats(ioim->itnim, io_iocdowns);
2555 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2559 * IO offline TOV popped. Fail the pending IO.
2561 void
2562 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2564 bfa_trc(ioim->bfa, ioim->iotag);
2565 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2570 * Allocate IOIM resource for initiator mode I/O request.
2572 struct bfa_ioim_s *
2573 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2574 struct bfa_itnim_s *itnim, u16 nsges)
2576 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2577 struct bfa_ioim_s *ioim;
2580 * alocate IOIM resource
2582 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2583 if (!ioim) {
2584 bfa_stats(itnim, no_iotags);
2585 return NULL;
2588 ioim->dio = dio;
2589 ioim->itnim = itnim;
2590 ioim->nsges = nsges;
2591 ioim->nsgpgs = 0;
2593 bfa_stats(itnim, total_ios);
2594 fcpim->ios_active++;
2596 list_add_tail(&ioim->qe, &itnim->io_q);
2598 return ioim;
2601 void
2602 bfa_ioim_free(struct bfa_ioim_s *ioim)
2604 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2606 if (ioim->nsgpgs > 0)
2607 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2609 bfa_stats(ioim->itnim, io_comps);
2610 fcpim->ios_active--;
2612 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2613 list_del(&ioim->qe);
2614 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2617 void
2618 bfa_ioim_start(struct bfa_ioim_s *ioim)
2620 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2623 * Obtain the queue over which this request has to be issued
2625 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2626 BFA_FALSE : bfa_itnim_get_reqq(ioim);
2628 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2632 * Driver I/O abort request.
2634 bfa_status_t
2635 bfa_ioim_abort(struct bfa_ioim_s *ioim)
2638 bfa_trc(ioim->bfa, ioim->iotag);
2640 if (!bfa_ioim_is_abortable(ioim))
2641 return BFA_STATUS_FAILED;
2643 bfa_stats(ioim->itnim, io_aborts);
2644 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2646 return BFA_STATUS_OK;
2650 * BFA TSKIM state machine functions
2654 * Task management command beginning state.
2656 static void
2657 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2659 bfa_trc(tskim->bfa, event);
2661 switch (event) {
2662 case BFA_TSKIM_SM_START:
2663 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2664 bfa_tskim_gather_ios(tskim);
2667 * If device is offline, do not send TM on wire. Just cleanup
2668 * any pending IO requests and complete TM request.
2670 if (!bfa_itnim_is_online(tskim->itnim)) {
2671 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2672 tskim->tsk_status = BFI_TSKIM_STS_OK;
2673 bfa_tskim_cleanup_ios(tskim);
2674 return;
2677 if (!bfa_tskim_send(tskim)) {
2678 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2679 bfa_stats(tskim->itnim, tm_qwait);
2680 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2681 &tskim->reqq_wait);
2683 break;
2685 default:
2686 bfa_sm_fault(tskim->bfa, event);
2691 * TM command is active, awaiting completion from firmware to
2692 * cleanup IO requests in TM scope.
2694 static void
2695 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2697 bfa_trc(tskim->bfa, event);
2699 switch (event) {
2700 case BFA_TSKIM_SM_DONE:
2701 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2702 bfa_tskim_cleanup_ios(tskim);
2703 break;
2705 case BFA_TSKIM_SM_CLEANUP:
2706 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2707 if (!bfa_tskim_send_abort(tskim)) {
2708 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2709 bfa_stats(tskim->itnim, tm_qwait);
2710 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2711 &tskim->reqq_wait);
2713 break;
2715 case BFA_TSKIM_SM_HWFAIL:
2716 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2717 bfa_tskim_iocdisable_ios(tskim);
2718 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2719 break;
2721 default:
2722 bfa_sm_fault(tskim->bfa, event);
2727 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2728 * completion event from firmware.
2730 static void
2731 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2733 bfa_trc(tskim->bfa, event);
2735 switch (event) {
2736 case BFA_TSKIM_SM_DONE:
2738 * Ignore and wait for ABORT completion from firmware.
2740 break;
2742 case BFA_TSKIM_SM_CLEANUP_DONE:
2743 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2744 bfa_tskim_cleanup_ios(tskim);
2745 break;
2747 case BFA_TSKIM_SM_HWFAIL:
2748 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2749 bfa_tskim_iocdisable_ios(tskim);
2750 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2751 break;
2753 default:
2754 bfa_sm_fault(tskim->bfa, event);
2758 static void
2759 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2761 bfa_trc(tskim->bfa, event);
2763 switch (event) {
2764 case BFA_TSKIM_SM_IOS_DONE:
2765 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2766 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2767 break;
2769 case BFA_TSKIM_SM_CLEANUP:
2771 * Ignore, TM command completed on wire.
2772 * Notify TM conmpletion on IO cleanup completion.
2774 break;
2776 case BFA_TSKIM_SM_HWFAIL:
2777 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2778 bfa_tskim_iocdisable_ios(tskim);
2779 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2780 break;
2782 default:
2783 bfa_sm_fault(tskim->bfa, event);
2788 * Task management command is waiting for room in request CQ
2790 static void
2791 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2793 bfa_trc(tskim->bfa, event);
2795 switch (event) {
2796 case BFA_TSKIM_SM_QRESUME:
2797 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2798 bfa_tskim_send(tskim);
2799 break;
2801 case BFA_TSKIM_SM_CLEANUP:
2803 * No need to send TM on wire since ITN is offline.
2805 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2806 bfa_reqq_wcancel(&tskim->reqq_wait);
2807 bfa_tskim_cleanup_ios(tskim);
2808 break;
2810 case BFA_TSKIM_SM_HWFAIL:
2811 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2812 bfa_reqq_wcancel(&tskim->reqq_wait);
2813 bfa_tskim_iocdisable_ios(tskim);
2814 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2815 break;
2817 default:
2818 bfa_sm_fault(tskim->bfa, event);
2823 * Task management command is active, awaiting for room in request CQ
2824 * to send clean up request.
2826 static void
2827 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2828 enum bfa_tskim_event event)
2830 bfa_trc(tskim->bfa, event);
2832 switch (event) {
2833 case BFA_TSKIM_SM_DONE:
2834 bfa_reqq_wcancel(&tskim->reqq_wait);
2836 * Fall through !!!
2838 case BFA_TSKIM_SM_QRESUME:
2839 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2840 bfa_tskim_send_abort(tskim);
2841 break;
2843 case BFA_TSKIM_SM_HWFAIL:
2844 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2845 bfa_reqq_wcancel(&tskim->reqq_wait);
2846 bfa_tskim_iocdisable_ios(tskim);
2847 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2848 break;
2850 default:
2851 bfa_sm_fault(tskim->bfa, event);
2856 * BFA callback is pending
2858 static void
2859 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2861 bfa_trc(tskim->bfa, event);
2863 switch (event) {
2864 case BFA_TSKIM_SM_HCB:
2865 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2866 bfa_tskim_free(tskim);
2867 break;
2869 case BFA_TSKIM_SM_CLEANUP:
2870 bfa_tskim_notify_comp(tskim);
2871 break;
2873 case BFA_TSKIM_SM_HWFAIL:
2874 break;
2876 default:
2877 bfa_sm_fault(tskim->bfa, event);
2881 static void
2882 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2884 struct bfa_tskim_s *tskim = cbarg;
2886 if (!complete) {
2887 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2888 return;
2891 bfa_stats(tskim->itnim, tm_success);
2892 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2895 static void
2896 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2898 struct bfa_tskim_s *tskim = cbarg;
2900 if (!complete) {
2901 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2902 return;
2905 bfa_stats(tskim->itnim, tm_failures);
2906 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2907 BFI_TSKIM_STS_FAILED);
2910 static bfa_boolean_t
2911 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
2913 switch (tskim->tm_cmnd) {
2914 case FCP_TM_TARGET_RESET:
2915 return BFA_TRUE;
2917 case FCP_TM_ABORT_TASK_SET:
2918 case FCP_TM_CLEAR_TASK_SET:
2919 case FCP_TM_LUN_RESET:
2920 case FCP_TM_CLEAR_ACA:
2921 return !memcmp(&tskim->lun, &lun, sizeof(lun));
2923 default:
2924 WARN_ON(1);
2927 return BFA_FALSE;
2931 * Gather affected IO requests and task management commands.
2933 static void
2934 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2936 struct bfa_itnim_s *itnim = tskim->itnim;
2937 struct bfa_ioim_s *ioim;
2938 struct list_head *qe, *qen;
2939 struct scsi_cmnd *cmnd;
2940 struct scsi_lun scsilun;
2942 INIT_LIST_HEAD(&tskim->io_q);
2945 * Gather any active IO requests first.
2947 list_for_each_safe(qe, qen, &itnim->io_q) {
2948 ioim = (struct bfa_ioim_s *) qe;
2949 cmnd = (struct scsi_cmnd *) ioim->dio;
2950 int_to_scsilun(cmnd->device->lun, &scsilun);
2951 if (bfa_tskim_match_scope(tskim, scsilun)) {
2952 list_del(&ioim->qe);
2953 list_add_tail(&ioim->qe, &tskim->io_q);
2958 * Failback any pending IO requests immediately.
2960 list_for_each_safe(qe, qen, &itnim->pending_q) {
2961 ioim = (struct bfa_ioim_s *) qe;
2962 cmnd = (struct scsi_cmnd *) ioim->dio;
2963 int_to_scsilun(cmnd->device->lun, &scsilun);
2964 if (bfa_tskim_match_scope(tskim, scsilun)) {
2965 list_del(&ioim->qe);
2966 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2967 bfa_ioim_tov(ioim);
2973 * IO cleanup completion
2975 static void
2976 bfa_tskim_cleanp_comp(void *tskim_cbarg)
2978 struct bfa_tskim_s *tskim = tskim_cbarg;
2980 bfa_stats(tskim->itnim, tm_io_comps);
2981 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
2985 * Gather affected IO requests and task management commands.
2987 static void
2988 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
2990 struct bfa_ioim_s *ioim;
2991 struct list_head *qe, *qen;
2993 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
2995 list_for_each_safe(qe, qen, &tskim->io_q) {
2996 ioim = (struct bfa_ioim_s *) qe;
2997 bfa_wc_up(&tskim->wc);
2998 bfa_ioim_cleanup_tm(ioim, tskim);
3001 bfa_wc_wait(&tskim->wc);
3005 * Send task management request to firmware.
3007 static bfa_boolean_t
3008 bfa_tskim_send(struct bfa_tskim_s *tskim)
3010 struct bfa_itnim_s *itnim = tskim->itnim;
3011 struct bfi_tskim_req_s *m;
3014 * check for room in queue to send request now
3016 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3017 if (!m)
3018 return BFA_FALSE;
3021 * build i/o request message next
3023 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3024 bfa_lpuid(tskim->bfa));
3026 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3027 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3028 m->t_secs = tskim->tsecs;
3029 m->lun = tskim->lun;
3030 m->tm_flags = tskim->tm_cmnd;
3033 * queue I/O message to firmware
3035 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3036 return BFA_TRUE;
3040 * Send abort request to cleanup an active TM to firmware.
3042 static bfa_boolean_t
3043 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3045 struct bfa_itnim_s *itnim = tskim->itnim;
3046 struct bfi_tskim_abortreq_s *m;
3049 * check for room in queue to send request now
3051 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3052 if (!m)
3053 return BFA_FALSE;
3056 * build i/o request message next
3058 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3059 bfa_lpuid(tskim->bfa));
3061 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3064 * queue I/O message to firmware
3066 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3067 return BFA_TRUE;
3071 * Call to resume task management cmnd waiting for room in request queue.
3073 static void
3074 bfa_tskim_qresume(void *cbarg)
3076 struct bfa_tskim_s *tskim = cbarg;
3078 bfa_stats(tskim->itnim, tm_qresumes);
3079 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3083 * Cleanup IOs associated with a task mangement command on IOC failures.
3085 static void
3086 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3088 struct bfa_ioim_s *ioim;
3089 struct list_head *qe, *qen;
3091 list_for_each_safe(qe, qen, &tskim->io_q) {
3092 ioim = (struct bfa_ioim_s *) qe;
3093 bfa_ioim_iocdisable(ioim);
3098 * Notification on completions from related ioim.
3100 void
3101 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3103 bfa_wc_down(&tskim->wc);
3107 * Handle IOC h/w failure notification from itnim.
3109 void
3110 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3112 tskim->notify = BFA_FALSE;
3113 bfa_stats(tskim->itnim, tm_iocdowns);
3114 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3118 * Cleanup TM command and associated IOs as part of ITNIM offline.
3120 void
3121 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3123 tskim->notify = BFA_TRUE;
3124 bfa_stats(tskim->itnim, tm_cleanups);
3125 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3129 * Memory allocation and initialization.
3131 void
3132 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3134 struct bfa_tskim_s *tskim;
3135 u16 i;
3137 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3139 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3140 fcpim->tskim_arr = tskim;
3142 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3144 * initialize TSKIM
3146 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3147 tskim->tsk_tag = i;
3148 tskim->bfa = fcpim->bfa;
3149 tskim->fcpim = fcpim;
3150 tskim->notify = BFA_FALSE;
3151 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3152 tskim);
3153 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3155 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3158 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3161 void
3162 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3164 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3165 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3166 struct bfa_tskim_s *tskim;
3167 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
3169 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3170 WARN_ON(tskim->tsk_tag != tsk_tag);
3172 tskim->tsk_status = rsp->tsk_status;
3175 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3176 * requests. All other statuses are for normal completions.
3178 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3179 bfa_stats(tskim->itnim, tm_cleanup_comps);
3180 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3181 } else {
3182 bfa_stats(tskim->itnim, tm_fw_rsps);
3183 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3188 struct bfa_tskim_s *
3189 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3191 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3192 struct bfa_tskim_s *tskim;
3194 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3196 if (tskim)
3197 tskim->dtsk = dtsk;
3199 return tskim;
3202 void
3203 bfa_tskim_free(struct bfa_tskim_s *tskim)
3205 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3206 list_del(&tskim->qe);
3207 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3211 * Start a task management command.
3213 * @param[in] tskim BFA task management command instance
3214 * @param[in] itnim i-t nexus for the task management command
3215 * @param[in] lun lun, if applicable
3216 * @param[in] tm_cmnd Task management command code.
3217 * @param[in] t_secs Timeout in seconds
3219 * @return None.
3221 void
3222 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3223 struct scsi_lun lun,
3224 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3226 tskim->itnim = itnim;
3227 tskim->lun = lun;
3228 tskim->tm_cmnd = tm_cmnd;
3229 tskim->tsecs = tsecs;
3230 tskim->notify = BFA_FALSE;
3231 bfa_stats(itnim, tm_cmnds);
3233 list_add_tail(&tskim->qe, &itnim->tsk_q);
3234 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);