1 /* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * Handles operations such as session offload/upload etc, and manages
3 * session resources such as connection id and qp resources.
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
15 static void bnx2fc_upld_timer(unsigned long data
);
16 static void bnx2fc_ofld_timer(unsigned long data
);
17 static int bnx2fc_init_tgt(struct bnx2fc_rport
*tgt
,
18 struct fcoe_port
*port
,
19 struct fc_rport_priv
*rdata
);
20 static u32
bnx2fc_alloc_conn_id(struct bnx2fc_hba
*hba
,
21 struct bnx2fc_rport
*tgt
);
22 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba
*hba
,
23 struct bnx2fc_rport
*tgt
);
24 static void bnx2fc_free_session_resc(struct bnx2fc_hba
*hba
,
25 struct bnx2fc_rport
*tgt
);
26 static void bnx2fc_free_conn_id(struct bnx2fc_hba
*hba
, u32 conn_id
);
28 static void bnx2fc_upld_timer(unsigned long data
)
31 struct bnx2fc_rport
*tgt
= (struct bnx2fc_rport
*)data
;
33 BNX2FC_TGT_DBG(tgt
, "upld_timer - Upload compl not received!!\n");
34 /* fake upload completion */
35 clear_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
);
36 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
37 wake_up_interruptible(&tgt
->upld_wait
);
40 static void bnx2fc_ofld_timer(unsigned long data
)
43 struct bnx2fc_rport
*tgt
= (struct bnx2fc_rport
*)data
;
45 BNX2FC_TGT_DBG(tgt
, "entered bnx2fc_ofld_timer\n");
46 /* NOTE: This function should never be called, as
47 * offload should never timeout
50 * If the timer has expired, this session is dead
51 * Clear offloaded flag and logout of this device.
52 * Since OFFLOADED flag is cleared, this case
53 * will be considered as offload error and the
54 * port will be logged off, and conn_id, session
55 * resources are freed up in bnx2fc_offload_session
57 clear_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
);
58 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL
, &tgt
->flags
);
59 wake_up_interruptible(&tgt
->ofld_wait
);
62 static void bnx2fc_offload_session(struct fcoe_port
*port
,
63 struct bnx2fc_rport
*tgt
,
64 struct fc_rport_priv
*rdata
)
66 struct fc_lport
*lport
= rdata
->local_port
;
67 struct fc_rport
*rport
= rdata
->rport
;
68 struct bnx2fc_hba
*hba
= port
->priv
;
72 /* Initialize bnx2fc_rport */
73 /* NOTE: tgt is already bzero'd */
74 rval
= bnx2fc_init_tgt(tgt
, port
, rdata
);
76 printk(KERN_ERR PFX
"Failed to allocate conn id for "
77 "port_id (%6x)\n", rport
->port_id
);
81 /* Allocate session resources */
82 rval
= bnx2fc_alloc_session_resc(hba
, tgt
);
84 printk(KERN_ERR PFX
"Failed to allocate resources\n");
89 * Initialize FCoE session offload process.
90 * Upon completion of offload process add
91 * rport to list of rports
94 clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL
, &tgt
->flags
);
95 rval
= bnx2fc_send_session_ofld_req(port
, tgt
);
97 printk(KERN_ERR PFX
"ofld_req failed\n");
102 * wait for the session is offloaded and enabled. 3 Secs
103 * should be ample time for this process to complete.
105 setup_timer(&tgt
->ofld_timer
, bnx2fc_ofld_timer
, (unsigned long)tgt
);
106 mod_timer(&tgt
->ofld_timer
, jiffies
+ BNX2FC_FW_TIMEOUT
);
108 wait_event_interruptible(tgt
->ofld_wait
,
110 BNX2FC_FLAG_OFLD_REQ_CMPL
,
112 if (signal_pending(current
))
113 flush_signals(current
);
115 del_timer_sync(&tgt
->ofld_timer
);
117 if (!(test_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
))) {
118 if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE
,
120 BNX2FC_TGT_DBG(tgt
, "ctx_alloc_failure, "
121 "retry ofld..%d\n", i
++);
122 msleep_interruptible(1000);
131 if (bnx2fc_map_doorbell(tgt
)) {
132 printk(KERN_ERR PFX
"map doorbell failed - no mem\n");
133 /* upload will take care of cleaning up sess resc */
134 lport
->tt
.rport_logoff(rdata
);
139 /* couldn't offload the session. log off from this rport */
140 BNX2FC_TGT_DBG(tgt
, "bnx2fc_offload_session - offload error\n");
141 lport
->tt
.rport_logoff(rdata
);
142 /* Free session resources */
143 bnx2fc_free_session_resc(hba
, tgt
);
144 if (tgt
->fcoe_conn_id
!= -1)
145 bnx2fc_free_conn_id(hba
, tgt
->fcoe_conn_id
);
148 void bnx2fc_flush_active_ios(struct bnx2fc_rport
*tgt
)
150 struct bnx2fc_cmd
*io_req
;
151 struct list_head
*list
;
152 struct list_head
*tmp
;
155 BNX2FC_TGT_DBG(tgt
, "Entered flush_active_ios - %d\n",
156 tgt
->num_active_ios
.counter
);
158 spin_lock_bh(&tgt
->tgt_lock
);
159 tgt
->flush_in_prog
= 1;
161 list_for_each_safe(list
, tmp
, &tgt
->active_cmd_queue
) {
163 io_req
= (struct bnx2fc_cmd
*)list
;
164 list_del_init(&io_req
->link
);
165 io_req
->on_active_queue
= 0;
166 BNX2FC_IO_DBG(io_req
, "cmd_queue cleanup\n");
168 if (cancel_delayed_work(&io_req
->timeout_work
)) {
169 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT
,
170 &io_req
->req_flags
)) {
171 /* Handle eh_abort timeout */
172 BNX2FC_IO_DBG(io_req
, "eh_abort for IO "
174 complete(&io_req
->tm_done
);
176 kref_put(&io_req
->refcount
,
177 bnx2fc_cmd_release
); /* drop timer hold */
180 set_bit(BNX2FC_FLAG_IO_COMPL
, &io_req
->req_flags
);
181 set_bit(BNX2FC_FLAG_IO_CLEANUP
, &io_req
->req_flags
);
182 rc
= bnx2fc_initiate_cleanup(io_req
);
186 list_for_each_safe(list
, tmp
, &tgt
->els_queue
) {
188 io_req
= (struct bnx2fc_cmd
*)list
;
189 list_del_init(&io_req
->link
);
190 io_req
->on_active_queue
= 0;
192 BNX2FC_IO_DBG(io_req
, "els_queue cleanup\n");
194 if (cancel_delayed_work(&io_req
->timeout_work
))
195 kref_put(&io_req
->refcount
,
196 bnx2fc_cmd_release
); /* drop timer hold */
198 if ((io_req
->cb_func
) && (io_req
->cb_arg
)) {
199 io_req
->cb_func(io_req
->cb_arg
);
200 io_req
->cb_arg
= NULL
;
203 rc
= bnx2fc_initiate_cleanup(io_req
);
207 list_for_each_safe(list
, tmp
, &tgt
->io_retire_queue
) {
209 io_req
= (struct bnx2fc_cmd
*)list
;
210 list_del_init(&io_req
->link
);
212 BNX2FC_IO_DBG(io_req
, "retire_queue flush\n");
214 if (cancel_delayed_work(&io_req
->timeout_work
))
215 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
217 clear_bit(BNX2FC_FLAG_ISSUE_RRQ
, &io_req
->req_flags
);
220 BNX2FC_TGT_DBG(tgt
, "IOs flushed = %d\n", i
);
222 spin_unlock_bh(&tgt
->tgt_lock
);
223 /* wait for active_ios to go to 0 */
224 while ((tgt
->num_active_ios
.counter
!= 0) && (i
++ < BNX2FC_WAIT_CNT
))
226 if (tgt
->num_active_ios
.counter
!= 0)
227 printk(KERN_ERR PFX
"CLEANUP on port 0x%x:"
228 " active_ios = %d\n",
229 tgt
->rdata
->ids
.port_id
, tgt
->num_active_ios
.counter
);
230 spin_lock_bh(&tgt
->tgt_lock
);
231 tgt
->flush_in_prog
= 0;
232 spin_unlock_bh(&tgt
->tgt_lock
);
235 static void bnx2fc_upload_session(struct fcoe_port
*port
,
236 struct bnx2fc_rport
*tgt
)
238 struct bnx2fc_hba
*hba
= port
->priv
;
240 BNX2FC_TGT_DBG(tgt
, "upload_session: active_ios = %d\n",
241 tgt
->num_active_ios
.counter
);
244 * Called with hba->hba_mutex held.
245 * This is a blocking call
247 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
248 bnx2fc_send_session_disable_req(port
, tgt
);
251 * wait for upload to complete. 3 Secs
252 * should be sufficient time for this process to complete.
254 setup_timer(&tgt
->upld_timer
, bnx2fc_upld_timer
, (unsigned long)tgt
);
255 mod_timer(&tgt
->upld_timer
, jiffies
+ BNX2FC_FW_TIMEOUT
);
257 BNX2FC_TGT_DBG(tgt
, "waiting for disable compl\n");
258 wait_event_interruptible(tgt
->upld_wait
,
260 BNX2FC_FLAG_UPLD_REQ_COMPL
,
263 if (signal_pending(current
))
264 flush_signals(current
);
266 del_timer_sync(&tgt
->upld_timer
);
269 * traverse thru the active_q and tmf_q and cleanup
272 BNX2FC_TGT_DBG(tgt
, "flush/upload - disable wait flags = 0x%lx\n",
274 bnx2fc_flush_active_ios(tgt
);
276 /* Issue destroy KWQE */
277 if (test_bit(BNX2FC_FLAG_DISABLED
, &tgt
->flags
)) {
278 BNX2FC_TGT_DBG(tgt
, "send destroy req\n");
279 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
280 bnx2fc_send_session_destroy_req(hba
, tgt
);
282 /* wait for destroy to complete */
283 setup_timer(&tgt
->upld_timer
,
284 bnx2fc_upld_timer
, (unsigned long)tgt
);
285 mod_timer(&tgt
->upld_timer
, jiffies
+ BNX2FC_FW_TIMEOUT
);
287 wait_event_interruptible(tgt
->upld_wait
,
289 BNX2FC_FLAG_UPLD_REQ_COMPL
,
292 if (!(test_bit(BNX2FC_FLAG_DESTROYED
, &tgt
->flags
)))
293 printk(KERN_ERR PFX
"ERROR!! destroy timed out\n");
295 BNX2FC_TGT_DBG(tgt
, "destroy wait complete flags = 0x%lx\n",
297 if (signal_pending(current
))
298 flush_signals(current
);
300 del_timer_sync(&tgt
->upld_timer
);
303 printk(KERN_ERR PFX
"ERROR!! DISABLE req timed out, destroy"
304 " not sent to FW\n");
306 /* Free session resources */
307 spin_lock_bh(&tgt
->cq_lock
);
308 bnx2fc_free_session_resc(hba
, tgt
);
309 bnx2fc_free_conn_id(hba
, tgt
->fcoe_conn_id
);
310 spin_unlock_bh(&tgt
->cq_lock
);
313 static int bnx2fc_init_tgt(struct bnx2fc_rport
*tgt
,
314 struct fcoe_port
*port
,
315 struct fc_rport_priv
*rdata
)
318 struct fc_rport
*rport
= rdata
->rport
;
319 struct bnx2fc_hba
*hba
= port
->priv
;
325 if (hba
->num_ofld_sess
>= BNX2FC_NUM_MAX_SESS
) {
326 BNX2FC_TGT_DBG(tgt
, "exceeded max sessions. logoff this tgt\n");
327 tgt
->fcoe_conn_id
= -1;
331 tgt
->fcoe_conn_id
= bnx2fc_alloc_conn_id(hba
, tgt
);
332 if (tgt
->fcoe_conn_id
== -1)
335 BNX2FC_TGT_DBG(tgt
, "init_tgt - conn_id = 0x%x\n", tgt
->fcoe_conn_id
);
337 tgt
->max_sqes
= BNX2FC_SQ_WQES_MAX
;
338 tgt
->max_rqes
= BNX2FC_RQ_WQES_MAX
;
339 tgt
->max_cqes
= BNX2FC_CQ_WQES_MAX
;
341 /* Initialize the toggle bit */
342 tgt
->sq_curr_toggle_bit
= 1;
343 tgt
->cq_curr_toggle_bit
= 1;
344 tgt
->sq_prod_idx
= 0;
345 tgt
->cq_cons_idx
= 0;
346 tgt
->rq_prod_idx
= 0x8000;
347 tgt
->rq_cons_idx
= 0;
348 atomic_set(&tgt
->num_active_ios
, 0);
350 tgt
->work_time_slice
= 2;
352 spin_lock_init(&tgt
->tgt_lock
);
353 spin_lock_init(&tgt
->cq_lock
);
355 /* Initialize active_cmd_queue list */
356 INIT_LIST_HEAD(&tgt
->active_cmd_queue
);
358 /* Initialize IO retire queue */
359 INIT_LIST_HEAD(&tgt
->io_retire_queue
);
361 INIT_LIST_HEAD(&tgt
->els_queue
);
363 /* Initialize active_tm_queue list */
364 INIT_LIST_HEAD(&tgt
->active_tm_queue
);
366 init_waitqueue_head(&tgt
->ofld_wait
);
367 init_waitqueue_head(&tgt
->upld_wait
);
373 * This event_callback is called after successful completion of libfc
374 * initiated target login. bnx2fc can proceed with initiating the session
377 void bnx2fc_rport_event_handler(struct fc_lport
*lport
,
378 struct fc_rport_priv
*rdata
,
379 enum fc_rport_event event
)
381 struct fcoe_port
*port
= lport_priv(lport
);
382 struct bnx2fc_hba
*hba
= port
->priv
;
383 struct fc_rport
*rport
= rdata
->rport
;
384 struct fc_rport_libfc_priv
*rp
;
385 struct bnx2fc_rport
*tgt
;
388 BNX2FC_HBA_DBG(lport
, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
389 event
, rdata
->ids
.port_id
);
393 printk(KERN_ALERT PFX
"rport is NULL: ERROR!\n");
398 if (rport
->port_id
== FC_FID_DIR_SERV
) {
400 * bnx2fc_rport structure doesnt exist for
402 * We should not come here, as lport will
403 * take care of fabric login
405 printk(KERN_ALERT PFX
"%x - rport_event_handler ERROR\n",
410 if (rdata
->spp_type
!= FC_TYPE_FCP
) {
411 BNX2FC_HBA_DBG(lport
, "not FCP type target."
412 " not offloading\n");
415 if (!(rdata
->ids
.roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
416 BNX2FC_HBA_DBG(lport
, "not FCP_TARGET"
417 " not offloading\n");
422 * Offlaod process is protected with hba mutex.
423 * Use the same mutex_lock for upload process too
425 mutex_lock(&hba
->hba_mutex
);
426 tgt
= (struct bnx2fc_rport
*)&rp
[1];
428 /* This can happen when ADISC finds the same target */
429 if (test_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
)) {
430 BNX2FC_TGT_DBG(tgt
, "already offloaded\n");
431 mutex_unlock(&hba
->hba_mutex
);
436 * Offload the session. This is a blocking call, and will
437 * wait until the session is offloaded.
439 bnx2fc_offload_session(port
, tgt
, rdata
);
441 BNX2FC_TGT_DBG(tgt
, "OFFLOAD num_ofld_sess = %d\n",
444 if (test_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
)) {
446 * Session is offloaded and enabled. Map
447 * doorbell register for this target
449 BNX2FC_TGT_DBG(tgt
, "sess offloaded\n");
450 /* This counter is protected with hba mutex */
451 hba
->num_ofld_sess
++;
453 set_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
);
456 * Offload or enable would have failed.
457 * In offload/enable completion path, the
458 * rport would have already been removed
460 BNX2FC_TGT_DBG(tgt
, "Port is being logged off as "
461 "offloaded flag not set\n");
463 mutex_unlock(&hba
->hba_mutex
);
466 case RPORT_EV_FAILED
:
468 port_id
= rdata
->ids
.port_id
;
469 if (port_id
== FC_FID_DIR_SERV
)
473 printk(KERN_ALERT PFX
"%x - rport not created Yet!!\n",
478 mutex_lock(&hba
->hba_mutex
);
480 * Perform session upload. Note that rdata->peers is already
481 * removed from disc->rports list before we get this event.
483 tgt
= (struct bnx2fc_rport
*)&rp
[1];
485 if (!(test_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
))) {
486 mutex_unlock(&hba
->hba_mutex
);
489 clear_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
);
491 bnx2fc_upload_session(port
, tgt
);
492 hba
->num_ofld_sess
--;
493 BNX2FC_TGT_DBG(tgt
, "UPLOAD num_ofld_sess = %d\n",
496 * Try to wake up the linkdown wait thread. If num_ofld_sess
497 * is 0, the waiting therad wakes up
499 if ((hba
->wait_for_link_down
) &&
500 (hba
->num_ofld_sess
== 0)) {
501 wake_up_interruptible(&hba
->shutdown_wait
);
503 if (test_bit(BNX2FC_FLAG_EXPL_LOGO
, &tgt
->flags
)) {
504 printk(KERN_ERR PFX
"Relogin to the tgt\n");
505 mutex_lock(&lport
->disc
.disc_mutex
);
506 lport
->tt
.rport_login(rdata
);
507 mutex_unlock(&lport
->disc
.disc_mutex
);
509 mutex_unlock(&hba
->hba_mutex
);
519 * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
521 * @port: fcoe_port struct to lookup the target port on
522 * @port_id: The remote port ID to look up
524 struct bnx2fc_rport
*bnx2fc_tgt_lookup(struct fcoe_port
*port
,
527 struct bnx2fc_hba
*hba
= port
->priv
;
528 struct bnx2fc_rport
*tgt
;
529 struct fc_rport_priv
*rdata
;
532 for (i
= 0; i
< BNX2FC_NUM_MAX_SESS
; i
++) {
533 tgt
= hba
->tgt_ofld_list
[i
];
534 if ((tgt
) && (tgt
->port
== port
)) {
536 if (rdata
->ids
.port_id
== port_id
) {
537 if (rdata
->rp_state
!= RPORT_ST_DELETE
) {
538 BNX2FC_TGT_DBG(tgt
, "rport "
542 printk(KERN_ERR PFX
"rport 0x%x "
543 "is in DELETED state\n",
555 * bnx2fc_alloc_conn_id - allocates FCOE Connection id
557 * @hba: pointer to adapter structure
558 * @tgt: pointer to bnx2fc_rport structure
560 static u32
bnx2fc_alloc_conn_id(struct bnx2fc_hba
*hba
,
561 struct bnx2fc_rport
*tgt
)
565 /* called with hba mutex held */
568 * tgt_ofld_list access is synchronized using
569 * both hba mutex and hba lock. Atleast hba mutex or
570 * hba lock needs to be held for read access.
573 spin_lock_bh(&hba
->hba_lock
);
574 next
= hba
->next_conn_id
;
575 conn_id
= hba
->next_conn_id
++;
576 if (hba
->next_conn_id
== BNX2FC_NUM_MAX_SESS
)
577 hba
->next_conn_id
= 0;
579 while (hba
->tgt_ofld_list
[conn_id
] != NULL
) {
581 if (conn_id
== BNX2FC_NUM_MAX_SESS
)
584 if (conn_id
== next
) {
585 /* No free conn_ids are available */
586 spin_unlock_bh(&hba
->hba_lock
);
590 hba
->tgt_ofld_list
[conn_id
] = tgt
;
591 tgt
->fcoe_conn_id
= conn_id
;
592 spin_unlock_bh(&hba
->hba_lock
);
596 static void bnx2fc_free_conn_id(struct bnx2fc_hba
*hba
, u32 conn_id
)
598 /* called with hba mutex held */
599 spin_lock_bh(&hba
->hba_lock
);
600 hba
->tgt_ofld_list
[conn_id
] = NULL
;
601 hba
->next_conn_id
= conn_id
;
602 spin_unlock_bh(&hba
->hba_lock
);
606 *bnx2fc_alloc_session_resc - Allocate qp resources for the session
609 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba
*hba
,
610 struct bnx2fc_rport
*tgt
)
616 /* Allocate and map SQ */
617 tgt
->sq_mem_size
= tgt
->max_sqes
* BNX2FC_SQ_WQE_SIZE
;
618 tgt
->sq_mem_size
= (tgt
->sq_mem_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
620 tgt
->sq
= dma_alloc_coherent(&hba
->pcidev
->dev
, tgt
->sq_mem_size
,
621 &tgt
->sq_dma
, GFP_KERNEL
);
623 printk(KERN_ALERT PFX
"unable to allocate SQ memory %d\n",
625 goto mem_alloc_failure
;
627 memset(tgt
->sq
, 0, tgt
->sq_mem_size
);
629 /* Allocate and map CQ */
630 tgt
->cq_mem_size
= tgt
->max_cqes
* BNX2FC_CQ_WQE_SIZE
;
631 tgt
->cq_mem_size
= (tgt
->cq_mem_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
633 tgt
->cq
= dma_alloc_coherent(&hba
->pcidev
->dev
, tgt
->cq_mem_size
,
634 &tgt
->cq_dma
, GFP_KERNEL
);
636 printk(KERN_ALERT PFX
"unable to allocate CQ memory %d\n",
638 goto mem_alloc_failure
;
640 memset(tgt
->cq
, 0, tgt
->cq_mem_size
);
642 /* Allocate and map RQ and RQ PBL */
643 tgt
->rq_mem_size
= tgt
->max_rqes
* BNX2FC_RQ_WQE_SIZE
;
644 tgt
->rq_mem_size
= (tgt
->rq_mem_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
646 tgt
->rq
= dma_alloc_coherent(&hba
->pcidev
->dev
, tgt
->rq_mem_size
,
647 &tgt
->rq_dma
, GFP_KERNEL
);
649 printk(KERN_ALERT PFX
"unable to allocate RQ memory %d\n",
651 goto mem_alloc_failure
;
653 memset(tgt
->rq
, 0, tgt
->rq_mem_size
);
655 tgt
->rq_pbl_size
= (tgt
->rq_mem_size
/ PAGE_SIZE
) * sizeof(void *);
656 tgt
->rq_pbl_size
= (tgt
->rq_pbl_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
658 tgt
->rq_pbl
= dma_alloc_coherent(&hba
->pcidev
->dev
, tgt
->rq_pbl_size
,
659 &tgt
->rq_pbl_dma
, GFP_KERNEL
);
661 printk(KERN_ALERT PFX
"unable to allocate RQ PBL %d\n",
663 goto mem_alloc_failure
;
666 memset(tgt
->rq_pbl
, 0, tgt
->rq_pbl_size
);
667 num_pages
= tgt
->rq_mem_size
/ PAGE_SIZE
;
669 pbl
= (u32
*)tgt
->rq_pbl
;
671 while (num_pages
--) {
674 *pbl
= (u32
)((u64
)page
>> 32);
679 /* Allocate and map XFERQ */
680 tgt
->xferq_mem_size
= tgt
->max_sqes
* BNX2FC_XFERQ_WQE_SIZE
;
681 tgt
->xferq_mem_size
= (tgt
->xferq_mem_size
+ (PAGE_SIZE
- 1)) &
684 tgt
->xferq
= dma_alloc_coherent(&hba
->pcidev
->dev
, tgt
->xferq_mem_size
,
685 &tgt
->xferq_dma
, GFP_KERNEL
);
687 printk(KERN_ALERT PFX
"unable to allocate XFERQ %d\n",
688 tgt
->xferq_mem_size
);
689 goto mem_alloc_failure
;
691 memset(tgt
->xferq
, 0, tgt
->xferq_mem_size
);
693 /* Allocate and map CONFQ & CONFQ PBL */
694 tgt
->confq_mem_size
= tgt
->max_sqes
* BNX2FC_CONFQ_WQE_SIZE
;
695 tgt
->confq_mem_size
= (tgt
->confq_mem_size
+ (PAGE_SIZE
- 1)) &
698 tgt
->confq
= dma_alloc_coherent(&hba
->pcidev
->dev
, tgt
->confq_mem_size
,
699 &tgt
->confq_dma
, GFP_KERNEL
);
701 printk(KERN_ALERT PFX
"unable to allocate CONFQ %d\n",
702 tgt
->confq_mem_size
);
703 goto mem_alloc_failure
;
705 memset(tgt
->confq
, 0, tgt
->confq_mem_size
);
707 tgt
->confq_pbl_size
=
708 (tgt
->confq_mem_size
/ PAGE_SIZE
) * sizeof(void *);
709 tgt
->confq_pbl_size
=
710 (tgt
->confq_pbl_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
712 tgt
->confq_pbl
= dma_alloc_coherent(&hba
->pcidev
->dev
,
714 &tgt
->confq_pbl_dma
, GFP_KERNEL
);
715 if (!tgt
->confq_pbl
) {
716 printk(KERN_ALERT PFX
"unable to allocate CONFQ PBL %d\n",
717 tgt
->confq_pbl_size
);
718 goto mem_alloc_failure
;
721 memset(tgt
->confq_pbl
, 0, tgt
->confq_pbl_size
);
722 num_pages
= tgt
->confq_mem_size
/ PAGE_SIZE
;
723 page
= tgt
->confq_dma
;
724 pbl
= (u32
*)tgt
->confq_pbl
;
726 while (num_pages
--) {
729 *pbl
= (u32
)((u64
)page
>> 32);
734 /* Allocate and map ConnDB */
735 tgt
->conn_db_mem_size
= sizeof(struct fcoe_conn_db
);
737 tgt
->conn_db
= dma_alloc_coherent(&hba
->pcidev
->dev
,
738 tgt
->conn_db_mem_size
,
739 &tgt
->conn_db_dma
, GFP_KERNEL
);
741 printk(KERN_ALERT PFX
"unable to allocate conn_db %d\n",
742 tgt
->conn_db_mem_size
);
743 goto mem_alloc_failure
;
745 memset(tgt
->conn_db
, 0, tgt
->conn_db_mem_size
);
748 /* Allocate and map LCQ */
749 tgt
->lcq_mem_size
= (tgt
->max_sqes
+ 8) * BNX2FC_SQ_WQE_SIZE
;
750 tgt
->lcq_mem_size
= (tgt
->lcq_mem_size
+ (PAGE_SIZE
- 1)) &
753 tgt
->lcq
= dma_alloc_coherent(&hba
->pcidev
->dev
, tgt
->lcq_mem_size
,
754 &tgt
->lcq_dma
, GFP_KERNEL
);
757 printk(KERN_ALERT PFX
"unable to allocate lcq %d\n",
759 goto mem_alloc_failure
;
761 memset(tgt
->lcq
, 0, tgt
->lcq_mem_size
);
764 tgt
->conn_db
->cq_arm
.lo
= -1;
765 tgt
->conn_db
->rq_prod
= 0x8000;
770 bnx2fc_free_session_resc(hba
, tgt
);
771 bnx2fc_free_conn_id(hba
, tgt
->fcoe_conn_id
);
776 * bnx2i_free_session_resc - free qp resources for the session
778 * @hba: adapter structure pointer
779 * @tgt: bnx2fc_rport structure pointer
781 * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
783 static void bnx2fc_free_session_resc(struct bnx2fc_hba
*hba
,
784 struct bnx2fc_rport
*tgt
)
786 BNX2FC_TGT_DBG(tgt
, "Freeing up session resources\n");
789 iounmap(tgt
->ctx_base
);
790 tgt
->ctx_base
= NULL
;
794 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->lcq_mem_size
,
795 tgt
->lcq
, tgt
->lcq_dma
);
800 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->conn_db_mem_size
,
801 tgt
->conn_db
, tgt
->conn_db_dma
);
804 /* Free confq and confq pbl */
805 if (tgt
->confq_pbl
) {
806 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->confq_pbl_size
,
807 tgt
->confq_pbl
, tgt
->confq_pbl_dma
);
808 tgt
->confq_pbl
= NULL
;
811 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->confq_mem_size
,
812 tgt
->confq
, tgt
->confq_dma
);
817 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->xferq_mem_size
,
818 tgt
->xferq
, tgt
->xferq_dma
);
821 /* Free RQ PBL and RQ */
823 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->rq_pbl_size
,
824 tgt
->rq_pbl
, tgt
->rq_pbl_dma
);
828 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->rq_mem_size
,
829 tgt
->rq
, tgt
->rq_dma
);
834 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->cq_mem_size
,
835 tgt
->cq
, tgt
->cq_dma
);
840 dma_free_coherent(&hba
->pcidev
->dev
, tgt
->sq_mem_size
,
841 tgt
->sq
, tgt
->sq_dma
);