2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/list.h>
36 #include <linux/slab.h>
37 #include <net/neighbour.h>
38 #include <linux/notifier.h>
39 #include <linux/atomic.h>
40 #include <linux/proc_fs.h>
41 #include <linux/if_vlan.h>
42 #include <net/netevent.h>
43 #include <linux/highmem.h>
44 #include <linux/vmalloc.h>
45 #include <linux/export.h>
49 #include "cxgb3_ioctl.h"
50 #include "cxgb3_ctl_defs.h"
51 #include "cxgb3_defs.h"
53 #include "firmware_exports.h"
54 #include "cxgb3_offload.h"
56 static LIST_HEAD(client_list
);
57 static LIST_HEAD(ofld_dev_list
);
58 static DEFINE_MUTEX(cxgb3_db_lock
);
60 static DEFINE_RWLOCK(adapter_list_lock
);
61 static LIST_HEAD(adapter_list
);
63 static const unsigned int MAX_ATIDS
= 64 * 1024;
64 static const unsigned int ATID_BASE
= 0x10000;
66 static void cxgb_neigh_update(struct neighbour
*neigh
);
67 static void cxgb_redirect(struct dst_entry
*old
, struct neighbour
*old_neigh
,
68 struct dst_entry
*new, struct neighbour
*new_neigh
,
71 static inline int offload_activated(struct t3cdev
*tdev
)
73 const struct adapter
*adapter
= tdev2adap(tdev
);
75 return test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
);
79 * cxgb3_register_client - register an offload client
82 * Add the client to the client list,
83 * and call backs the client for each activated offload device
85 void cxgb3_register_client(struct cxgb3_client
*client
)
89 mutex_lock(&cxgb3_db_lock
);
90 list_add_tail(&client
->client_list
, &client_list
);
93 list_for_each_entry(tdev
, &ofld_dev_list
, ofld_dev_list
) {
94 if (offload_activated(tdev
))
98 mutex_unlock(&cxgb3_db_lock
);
101 EXPORT_SYMBOL(cxgb3_register_client
);
104 * cxgb3_unregister_client - unregister an offload client
105 * @client: the client
107 * Remove the client to the client list,
108 * and call backs the client for each activated offload device.
110 void cxgb3_unregister_client(struct cxgb3_client
*client
)
114 mutex_lock(&cxgb3_db_lock
);
115 list_del(&client
->client_list
);
117 if (client
->remove
) {
118 list_for_each_entry(tdev
, &ofld_dev_list
, ofld_dev_list
) {
119 if (offload_activated(tdev
))
120 client
->remove(tdev
);
123 mutex_unlock(&cxgb3_db_lock
);
126 EXPORT_SYMBOL(cxgb3_unregister_client
);
129 * cxgb3_add_clients - activate registered clients for an offload device
130 * @tdev: the offload device
132 * Call backs all registered clients once a offload device is activated
134 void cxgb3_add_clients(struct t3cdev
*tdev
)
136 struct cxgb3_client
*client
;
138 mutex_lock(&cxgb3_db_lock
);
139 list_for_each_entry(client
, &client_list
, client_list
) {
143 mutex_unlock(&cxgb3_db_lock
);
147 * cxgb3_remove_clients - deactivates registered clients
148 * for an offload device
149 * @tdev: the offload device
151 * Call backs all registered clients once a offload device is deactivated
153 void cxgb3_remove_clients(struct t3cdev
*tdev
)
155 struct cxgb3_client
*client
;
157 mutex_lock(&cxgb3_db_lock
);
158 list_for_each_entry(client
, &client_list
, client_list
) {
160 client
->remove(tdev
);
162 mutex_unlock(&cxgb3_db_lock
);
165 void cxgb3_event_notify(struct t3cdev
*tdev
, u32 event
, u32 port
)
167 struct cxgb3_client
*client
;
169 mutex_lock(&cxgb3_db_lock
);
170 list_for_each_entry(client
, &client_list
, client_list
) {
171 if (client
->event_handler
)
172 client
->event_handler(tdev
, event
, port
);
174 mutex_unlock(&cxgb3_db_lock
);
177 static struct net_device
*get_iff_from_mac(struct adapter
*adapter
,
178 const unsigned char *mac
,
183 for_each_port(adapter
, i
) {
184 struct net_device
*dev
= adapter
->port
[i
];
186 if (!memcmp(dev
->dev_addr
, mac
, ETH_ALEN
)) {
188 if (vlan
&& vlan
!= VLAN_VID_MASK
) {
189 dev
= __vlan_find_dev_deep(dev
, vlan
);
190 } else if (netif_is_bond_slave(dev
)) {
191 struct net_device
*upper_dev
;
194 netdev_master_upper_dev_get_rcu(dev
)))
204 static int cxgb_ulp_iscsi_ctl(struct adapter
*adapter
, unsigned int req
,
209 unsigned int val
= 0;
210 struct ulp_iscsi_info
*uiip
= data
;
213 case ULP_ISCSI_GET_PARAMS
:
214 uiip
->pdev
= adapter
->pdev
;
215 uiip
->llimit
= t3_read_reg(adapter
, A_ULPRX_ISCSI_LLIMIT
);
216 uiip
->ulimit
= t3_read_reg(adapter
, A_ULPRX_ISCSI_ULIMIT
);
217 uiip
->tagmask
= t3_read_reg(adapter
, A_ULPRX_ISCSI_TAGMASK
);
219 val
= t3_read_reg(adapter
, A_ULPRX_ISCSI_PSZ
);
220 for (i
= 0; i
< 4; i
++, val
>>= 8)
221 uiip
->pgsz_factor
[i
] = val
& 0xFF;
223 val
= t3_read_reg(adapter
, A_TP_PARA_REG7
);
225 uiip
->max_rxsz
= min((val
>> S_PMMAXXFERLEN0
)&M_PMMAXXFERLEN0
,
226 (val
>> S_PMMAXXFERLEN1
)&M_PMMAXXFERLEN1
);
228 * On tx, the iscsi pdu has to be <= tx page size and has to
229 * fit into the Tx PM FIFO.
231 val
= min(adapter
->params
.tp
.tx_pg_size
,
232 t3_read_reg(adapter
, A_PM1_TX_CFG
) >> 17);
233 uiip
->max_txsz
= min(val
, uiip
->max_txsz
);
235 /* set MaxRxData to 16224 */
236 val
= t3_read_reg(adapter
, A_TP_PARA_REG2
);
237 if ((val
>> S_MAXRXDATA
) != 0x3f60) {
238 val
&= (M_RXCOALESCESIZE
<< S_RXCOALESCESIZE
);
239 val
|= V_MAXRXDATA(0x3f60);
240 pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
242 t3_write_reg(adapter
, A_TP_PARA_REG2
, val
);
246 * on rx, the iscsi pdu has to be < rx page size and the
247 * the max rx data length programmed in TP
249 val
= min(adapter
->params
.tp
.rx_pg_size
,
250 ((t3_read_reg(adapter
, A_TP_PARA_REG2
)) >>
251 S_MAXRXDATA
) & M_MAXRXDATA
);
252 uiip
->max_rxsz
= min(val
, uiip
->max_rxsz
);
254 case ULP_ISCSI_SET_PARAMS
:
255 t3_write_reg(adapter
, A_ULPRX_ISCSI_TAGMASK
, uiip
->tagmask
);
256 /* program the ddp page sizes */
257 for (i
= 0; i
< 4; i
++)
258 val
|= (uiip
->pgsz_factor
[i
] & 0xF) << (8 * i
);
259 if (val
&& (val
!= t3_read_reg(adapter
, A_ULPRX_ISCSI_PSZ
))) {
260 pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
261 adapter
->name
, val
, uiip
->pgsz_factor
[0],
262 uiip
->pgsz_factor
[1], uiip
->pgsz_factor
[2],
263 uiip
->pgsz_factor
[3]);
264 t3_write_reg(adapter
, A_ULPRX_ISCSI_PSZ
, val
);
273 /* Response queue used for RDMA events. */
274 #define ASYNC_NOTIF_RSPQ 0
276 static int cxgb_rdma_ctl(struct adapter
*adapter
, unsigned int req
, void *data
)
281 case RDMA_GET_PARAMS
: {
282 struct rdma_info
*rdma
= data
;
283 struct pci_dev
*pdev
= adapter
->pdev
;
285 rdma
->udbell_physbase
= pci_resource_start(pdev
, 2);
286 rdma
->udbell_len
= pci_resource_len(pdev
, 2);
288 t3_read_reg(adapter
, A_ULPTX_TPT_LLIMIT
);
289 rdma
->tpt_top
= t3_read_reg(adapter
, A_ULPTX_TPT_ULIMIT
);
291 t3_read_reg(adapter
, A_ULPTX_PBL_LLIMIT
);
292 rdma
->pbl_top
= t3_read_reg(adapter
, A_ULPTX_PBL_ULIMIT
);
293 rdma
->rqt_base
= t3_read_reg(adapter
, A_ULPRX_RQ_LLIMIT
);
294 rdma
->rqt_top
= t3_read_reg(adapter
, A_ULPRX_RQ_ULIMIT
);
295 rdma
->kdb_addr
= adapter
->regs
+ A_SG_KDOORBELL
;
301 struct rdma_cq_op
*rdma
= data
;
303 /* may be called in any context */
304 spin_lock_irqsave(&adapter
->sge
.reg_lock
, flags
);
305 ret
= t3_sge_cqcntxt_op(adapter
, rdma
->id
, rdma
->op
,
307 spin_unlock_irqrestore(&adapter
->sge
.reg_lock
, flags
);
311 struct ch_mem_range
*t
= data
;
314 if ((t
->addr
& 7) || (t
->len
& 7))
316 if (t
->mem_id
== MEM_CM
)
318 else if (t
->mem_id
== MEM_PMRX
)
319 mem
= &adapter
->pmrx
;
320 else if (t
->mem_id
== MEM_PMTX
)
321 mem
= &adapter
->pmtx
;
326 t3_mc7_bd_read(mem
, t
->addr
/ 8, t
->len
/ 8,
333 struct rdma_cq_setup
*rdma
= data
;
335 spin_lock_irq(&adapter
->sge
.reg_lock
);
337 t3_sge_init_cqcntxt(adapter
, rdma
->id
,
338 rdma
->base_addr
, rdma
->size
,
340 rdma
->ovfl_mode
, rdma
->credits
,
342 spin_unlock_irq(&adapter
->sge
.reg_lock
);
345 case RDMA_CQ_DISABLE
:
346 spin_lock_irq(&adapter
->sge
.reg_lock
);
347 ret
= t3_sge_disable_cqcntxt(adapter
, *(unsigned int *)data
);
348 spin_unlock_irq(&adapter
->sge
.reg_lock
);
350 case RDMA_CTRL_QP_SETUP
:{
351 struct rdma_ctrlqp_setup
*rdma
= data
;
353 spin_lock_irq(&adapter
->sge
.reg_lock
);
354 ret
= t3_sge_init_ecntxt(adapter
, FW_RI_SGEEC_START
, 0,
357 rdma
->base_addr
, rdma
->size
,
358 FW_RI_TID_START
, 1, 0);
359 spin_unlock_irq(&adapter
->sge
.reg_lock
);
363 spin_lock(&adapter
->stats_lock
);
364 t3_tp_get_mib_stats(adapter
, (struct tp_mib_stats
*)data
);
365 spin_unlock(&adapter
->stats_lock
);
374 static int cxgb_offload_ctl(struct t3cdev
*tdev
, unsigned int req
, void *data
)
376 struct adapter
*adapter
= tdev2adap(tdev
);
377 struct tid_range
*tid
;
379 struct iff_mac
*iffmacp
;
380 struct ddp_params
*ddpp
;
381 struct adap_ports
*ports
;
382 struct ofld_page_info
*rx_page_info
;
383 struct tp_params
*tp
= &adapter
->params
.tp
;
387 case GET_MAX_OUTSTANDING_WR
:
388 *(unsigned int *)data
= FW_WR_NUM
;
391 *(unsigned int *)data
= WR_FLITS
;
393 case GET_TX_MAX_CHUNK
:
394 *(unsigned int *)data
= 1 << 20; /* 1MB */
398 tid
->num
= t3_mc5_size(&adapter
->mc5
) -
399 adapter
->params
.mc5
.nroutes
-
400 adapter
->params
.mc5
.nfilters
- adapter
->params
.mc5
.nservers
;
405 tid
->num
= adapter
->params
.mc5
.nservers
;
406 tid
->base
= t3_mc5_size(&adapter
->mc5
) - tid
->num
-
407 adapter
->params
.mc5
.nfilters
- adapter
->params
.mc5
.nroutes
;
409 case GET_L2T_CAPACITY
:
410 *(unsigned int *)data
= 2048;
415 mtup
->mtus
= adapter
->params
.mtus
;
417 case GET_IFF_FROM_MAC
:
419 iffmacp
->dev
= get_iff_from_mac(adapter
, iffmacp
->mac_addr
,
425 ddpp
->llimit
= t3_read_reg(adapter
, A_ULPRX_TDDP_LLIMIT
);
426 ddpp
->ulimit
= t3_read_reg(adapter
, A_ULPRX_TDDP_ULIMIT
);
427 ddpp
->tag_mask
= t3_read_reg(adapter
, A_ULPRX_TDDP_TAGMASK
);
431 ports
->nports
= adapter
->params
.nports
;
432 for_each_port(adapter
, i
)
433 ports
->lldevs
[i
] = adapter
->port
[i
];
435 case ULP_ISCSI_GET_PARAMS
:
436 case ULP_ISCSI_SET_PARAMS
:
437 if (!offload_running(adapter
))
439 return cxgb_ulp_iscsi_ctl(adapter
, req
, data
);
440 case RDMA_GET_PARAMS
:
443 case RDMA_CQ_DISABLE
:
444 case RDMA_CTRL_QP_SETUP
:
447 if (!offload_running(adapter
))
449 return cxgb_rdma_ctl(adapter
, req
, data
);
450 case GET_RX_PAGE_INFO
:
452 rx_page_info
->page_size
= tp
->rx_pg_size
;
453 rx_page_info
->num
= tp
->rx_num_pgs
;
455 case GET_ISCSI_IPV4ADDR
: {
456 struct iscsi_ipv4addr
*p
= data
;
457 struct port_info
*pi
= netdev_priv(p
->dev
);
458 p
->ipv4addr
= pi
->iscsi_ipv4addr
;
461 case GET_EMBEDDED_INFO
: {
462 struct ch_embedded_info
*e
= data
;
464 spin_lock(&adapter
->stats_lock
);
465 t3_get_fw_version(adapter
, &e
->fw_vers
);
466 t3_get_tp_version(adapter
, &e
->tp_vers
);
467 spin_unlock(&adapter
->stats_lock
);
477 * Dummy handler for Rx offload packets in case we get an offload packet before
478 * proper processing is setup. This complains and drops the packet as it isn't
479 * normal to get offload packets at this stage.
481 static int rx_offload_blackhole(struct t3cdev
*dev
, struct sk_buff
**skbs
,
485 dev_kfree_skb_any(skbs
[n
]);
489 static void dummy_neigh_update(struct t3cdev
*dev
, struct neighbour
*neigh
)
493 void cxgb3_set_dummy_ops(struct t3cdev
*dev
)
495 dev
->recv
= rx_offload_blackhole
;
496 dev
->neigh_update
= dummy_neigh_update
;
500 * Free an active-open TID.
502 void *cxgb3_free_atid(struct t3cdev
*tdev
, int atid
)
504 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
505 union active_open_entry
*p
= atid2entry(t
, atid
);
506 void *ctx
= p
->t3c_tid
.ctx
;
508 spin_lock_bh(&t
->atid_lock
);
512 spin_unlock_bh(&t
->atid_lock
);
517 EXPORT_SYMBOL(cxgb3_free_atid
);
520 * Free a server TID and return it to the free pool.
522 void cxgb3_free_stid(struct t3cdev
*tdev
, int stid
)
524 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
525 union listen_entry
*p
= stid2entry(t
, stid
);
527 spin_lock_bh(&t
->stid_lock
);
531 spin_unlock_bh(&t
->stid_lock
);
534 EXPORT_SYMBOL(cxgb3_free_stid
);
536 void cxgb3_insert_tid(struct t3cdev
*tdev
, struct cxgb3_client
*client
,
537 void *ctx
, unsigned int tid
)
539 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
541 t
->tid_tab
[tid
].client
= client
;
542 t
->tid_tab
[tid
].ctx
= ctx
;
543 atomic_inc(&t
->tids_in_use
);
546 EXPORT_SYMBOL(cxgb3_insert_tid
);
549 * Populate a TID_RELEASE WR. The skb must be already propely sized.
551 static inline void mk_tid_release(struct sk_buff
*skb
, unsigned int tid
)
553 struct cpl_tid_release
*req
;
555 skb
->priority
= CPL_PRIORITY_SETUP
;
556 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
557 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
558 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
561 static void t3_process_tid_release_list(struct work_struct
*work
)
563 struct t3c_data
*td
= container_of(work
, struct t3c_data
,
566 struct t3cdev
*tdev
= td
->dev
;
569 spin_lock_bh(&td
->tid_release_lock
);
570 while (td
->tid_release_list
) {
571 struct t3c_tid_entry
*p
= td
->tid_release_list
;
573 td
->tid_release_list
= p
->ctx
;
574 spin_unlock_bh(&td
->tid_release_lock
);
576 skb
= alloc_skb(sizeof(struct cpl_tid_release
),
579 skb
= td
->nofail_skb
;
581 spin_lock_bh(&td
->tid_release_lock
);
582 p
->ctx
= (void *)td
->tid_release_list
;
583 td
->tid_release_list
= p
;
586 mk_tid_release(skb
, p
- td
->tid_maps
.tid_tab
);
587 cxgb3_ofld_send(tdev
, skb
);
589 if (skb
== td
->nofail_skb
)
591 alloc_skb(sizeof(struct cpl_tid_release
),
593 spin_lock_bh(&td
->tid_release_lock
);
595 td
->release_list_incomplete
= (td
->tid_release_list
== NULL
) ? 0 : 1;
596 spin_unlock_bh(&td
->tid_release_lock
);
600 alloc_skb(sizeof(struct cpl_tid_release
),
604 /* use ctx as a next pointer in the tid release list */
605 void cxgb3_queue_tid_release(struct t3cdev
*tdev
, unsigned int tid
)
607 struct t3c_data
*td
= T3C_DATA(tdev
);
608 struct t3c_tid_entry
*p
= &td
->tid_maps
.tid_tab
[tid
];
610 spin_lock_bh(&td
->tid_release_lock
);
611 p
->ctx
= (void *)td
->tid_release_list
;
613 td
->tid_release_list
= p
;
614 if (!p
->ctx
|| td
->release_list_incomplete
)
615 schedule_work(&td
->tid_release_task
);
616 spin_unlock_bh(&td
->tid_release_lock
);
619 EXPORT_SYMBOL(cxgb3_queue_tid_release
);
622 * Remove a tid from the TID table. A client may defer processing its last
623 * CPL message if it is locked at the time it arrives, and while the message
624 * sits in the client's backlog the TID may be reused for another connection.
625 * To handle this we atomically switch the TID association if it still points
626 * to the original client context.
628 void cxgb3_remove_tid(struct t3cdev
*tdev
, void *ctx
, unsigned int tid
)
630 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
632 BUG_ON(tid
>= t
->ntids
);
633 if (tdev
->type
== T3A
)
634 (void)cmpxchg(&t
->tid_tab
[tid
].ctx
, ctx
, NULL
);
638 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
640 mk_tid_release(skb
, tid
);
641 cxgb3_ofld_send(tdev
, skb
);
642 t
->tid_tab
[tid
].ctx
= NULL
;
644 cxgb3_queue_tid_release(tdev
, tid
);
646 atomic_dec(&t
->tids_in_use
);
649 EXPORT_SYMBOL(cxgb3_remove_tid
);
651 int cxgb3_alloc_atid(struct t3cdev
*tdev
, struct cxgb3_client
*client
,
655 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
657 spin_lock_bh(&t
->atid_lock
);
659 t
->atids_in_use
+ atomic_read(&t
->tids_in_use
) + MC5_MIN_TIDS
<=
661 union active_open_entry
*p
= t
->afree
;
663 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
665 p
->t3c_tid
.ctx
= ctx
;
666 p
->t3c_tid
.client
= client
;
669 spin_unlock_bh(&t
->atid_lock
);
673 EXPORT_SYMBOL(cxgb3_alloc_atid
);
675 int cxgb3_alloc_stid(struct t3cdev
*tdev
, struct cxgb3_client
*client
,
679 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
681 spin_lock_bh(&t
->stid_lock
);
683 union listen_entry
*p
= t
->sfree
;
685 stid
= (p
- t
->stid_tab
) + t
->stid_base
;
687 p
->t3c_tid
.ctx
= ctx
;
688 p
->t3c_tid
.client
= client
;
691 spin_unlock_bh(&t
->stid_lock
);
695 EXPORT_SYMBOL(cxgb3_alloc_stid
);
697 /* Get the t3cdev associated with a net_device */
698 struct t3cdev
*dev2t3cdev(struct net_device
*dev
)
700 const struct port_info
*pi
= netdev_priv(dev
);
702 return (struct t3cdev
*)pi
->adapter
;
705 EXPORT_SYMBOL(dev2t3cdev
);
707 static int do_smt_write_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
709 struct cpl_smt_write_rpl
*rpl
= cplhdr(skb
);
711 if (rpl
->status
!= CPL_ERR_NONE
)
712 pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
713 rpl
->status
, GET_TID(rpl
));
715 return CPL_RET_BUF_DONE
;
718 static int do_l2t_write_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
720 struct cpl_l2t_write_rpl
*rpl
= cplhdr(skb
);
722 if (rpl
->status
!= CPL_ERR_NONE
)
723 pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
724 rpl
->status
, GET_TID(rpl
));
726 return CPL_RET_BUF_DONE
;
729 static int do_rte_write_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
731 struct cpl_rte_write_rpl
*rpl
= cplhdr(skb
);
733 if (rpl
->status
!= CPL_ERR_NONE
)
734 pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
735 rpl
->status
, GET_TID(rpl
));
737 return CPL_RET_BUF_DONE
;
740 static int do_act_open_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
742 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
743 unsigned int atid
= G_TID(ntohl(rpl
->atid
));
744 struct t3c_tid_entry
*t3c_tid
;
746 t3c_tid
= lookup_atid(&(T3C_DATA(dev
))->tid_maps
, atid
);
747 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
&&
748 t3c_tid
->client
->handlers
&&
749 t3c_tid
->client
->handlers
[CPL_ACT_OPEN_RPL
]) {
750 return t3c_tid
->client
->handlers
[CPL_ACT_OPEN_RPL
] (dev
, skb
,
754 pr_err("%s: received clientless CPL command 0x%x\n",
755 dev
->name
, CPL_ACT_OPEN_RPL
);
756 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
760 static int do_stid_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
762 union opcode_tid
*p
= cplhdr(skb
);
763 unsigned int stid
= G_TID(ntohl(p
->opcode_tid
));
764 struct t3c_tid_entry
*t3c_tid
;
766 t3c_tid
= lookup_stid(&(T3C_DATA(dev
))->tid_maps
, stid
);
767 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
768 t3c_tid
->client
->handlers
[p
->opcode
]) {
769 return t3c_tid
->client
->handlers
[p
->opcode
] (dev
, skb
,
772 pr_err("%s: received clientless CPL command 0x%x\n",
773 dev
->name
, p
->opcode
);
774 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
778 static int do_hwtid_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
780 union opcode_tid
*p
= cplhdr(skb
);
781 unsigned int hwtid
= G_TID(ntohl(p
->opcode_tid
));
782 struct t3c_tid_entry
*t3c_tid
;
784 t3c_tid
= lookup_tid(&(T3C_DATA(dev
))->tid_maps
, hwtid
);
785 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
786 t3c_tid
->client
->handlers
[p
->opcode
]) {
787 return t3c_tid
->client
->handlers
[p
->opcode
]
788 (dev
, skb
, t3c_tid
->ctx
);
790 pr_err("%s: received clientless CPL command 0x%x\n",
791 dev
->name
, p
->opcode
);
792 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
796 static int do_cr(struct t3cdev
*dev
, struct sk_buff
*skb
)
798 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
799 unsigned int stid
= G_PASS_OPEN_TID(ntohl(req
->tos_tid
));
800 struct tid_info
*t
= &(T3C_DATA(dev
))->tid_maps
;
801 struct t3c_tid_entry
*t3c_tid
;
802 unsigned int tid
= GET_TID(req
);
804 if (unlikely(tid
>= t
->ntids
)) {
805 printk("%s: passive open TID %u too large\n",
807 t3_fatal_err(tdev2adap(dev
));
808 return CPL_RET_BUF_DONE
;
811 t3c_tid
= lookup_stid(t
, stid
);
812 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
813 t3c_tid
->client
->handlers
[CPL_PASS_ACCEPT_REQ
]) {
814 return t3c_tid
->client
->handlers
[CPL_PASS_ACCEPT_REQ
]
815 (dev
, skb
, t3c_tid
->ctx
);
817 pr_err("%s: received clientless CPL command 0x%x\n",
818 dev
->name
, CPL_PASS_ACCEPT_REQ
);
819 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
824 * Returns an sk_buff for a reply CPL message of size len. If the input
825 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
826 * is allocated. The input skb must be of size at least len. Note that this
827 * operation does not destroy the original skb data even if it decides to reuse
830 static struct sk_buff
*cxgb3_get_cpl_reply_skb(struct sk_buff
*skb
, size_t len
,
833 if (likely(!skb_cloned(skb
))) {
834 BUG_ON(skb
->len
< len
);
835 __skb_trim(skb
, len
);
838 skb
= alloc_skb(len
, gfp
);
845 static int do_abort_req_rss(struct t3cdev
*dev
, struct sk_buff
*skb
)
847 union opcode_tid
*p
= cplhdr(skb
);
848 unsigned int hwtid
= G_TID(ntohl(p
->opcode_tid
));
849 struct t3c_tid_entry
*t3c_tid
;
851 t3c_tid
= lookup_tid(&(T3C_DATA(dev
))->tid_maps
, hwtid
);
852 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
853 t3c_tid
->client
->handlers
[p
->opcode
]) {
854 return t3c_tid
->client
->handlers
[p
->opcode
]
855 (dev
, skb
, t3c_tid
->ctx
);
857 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
858 struct cpl_abort_rpl
*rpl
;
859 struct sk_buff
*reply_skb
;
860 unsigned int tid
= GET_TID(req
);
861 u8 cmd
= req
->status
;
863 if (req
->status
== CPL_ERR_RTX_NEG_ADVICE
||
864 req
->status
== CPL_ERR_PERSIST_NEG_ADVICE
)
867 reply_skb
= cxgb3_get_cpl_reply_skb(skb
,
873 printk("do_abort_req_rss: couldn't get skb!\n");
876 reply_skb
->priority
= CPL_PRIORITY_DATA
;
877 __skb_put(reply_skb
, sizeof(struct cpl_abort_rpl
));
878 rpl
= cplhdr(reply_skb
);
880 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL
));
881 rpl
->wr
.wr_lo
= htonl(V_WR_TID(tid
));
882 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL
, tid
));
884 cxgb3_ofld_send(dev
, reply_skb
);
886 return CPL_RET_BUF_DONE
;
890 static int do_act_establish(struct t3cdev
*dev
, struct sk_buff
*skb
)
892 struct cpl_act_establish
*req
= cplhdr(skb
);
893 unsigned int atid
= G_PASS_OPEN_TID(ntohl(req
->tos_tid
));
894 struct tid_info
*t
= &(T3C_DATA(dev
))->tid_maps
;
895 struct t3c_tid_entry
*t3c_tid
;
896 unsigned int tid
= GET_TID(req
);
898 if (unlikely(tid
>= t
->ntids
)) {
899 printk("%s: active establish TID %u too large\n",
901 t3_fatal_err(tdev2adap(dev
));
902 return CPL_RET_BUF_DONE
;
905 t3c_tid
= lookup_atid(t
, atid
);
906 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
907 t3c_tid
->client
->handlers
[CPL_ACT_ESTABLISH
]) {
908 return t3c_tid
->client
->handlers
[CPL_ACT_ESTABLISH
]
909 (dev
, skb
, t3c_tid
->ctx
);
911 pr_err("%s: received clientless CPL command 0x%x\n",
912 dev
->name
, CPL_ACT_ESTABLISH
);
913 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
917 static int do_trace(struct t3cdev
*dev
, struct sk_buff
*skb
)
919 struct cpl_trace_pkt
*p
= cplhdr(skb
);
921 skb
->protocol
= htons(0xffff);
922 skb
->dev
= dev
->lldev
;
923 skb_pull(skb
, sizeof(*p
));
924 skb_reset_mac_header(skb
);
925 netif_receive_skb(skb
);
930 * That skb would better have come from process_responses() where we abuse
931 * ->priority and ->csum to carry our data. NB: if we get to per-arch
932 * ->csum, the things might get really interesting here.
935 static inline u32
get_hwtid(struct sk_buff
*skb
)
937 return ntohl((__force __be32
)skb
->priority
) >> 8 & 0xfffff;
940 static inline u32
get_opcode(struct sk_buff
*skb
)
942 return G_OPCODE(ntohl((__force __be32
)skb
->csum
));
945 static int do_term(struct t3cdev
*dev
, struct sk_buff
*skb
)
947 unsigned int hwtid
= get_hwtid(skb
);
948 unsigned int opcode
= get_opcode(skb
);
949 struct t3c_tid_entry
*t3c_tid
;
951 t3c_tid
= lookup_tid(&(T3C_DATA(dev
))->tid_maps
, hwtid
);
952 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
953 t3c_tid
->client
->handlers
[opcode
]) {
954 return t3c_tid
->client
->handlers
[opcode
] (dev
, skb
,
957 pr_err("%s: received clientless CPL command 0x%x\n",
959 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
963 static int nb_callback(struct notifier_block
*self
, unsigned long event
,
967 case (NETEVENT_NEIGH_UPDATE
):{
968 cxgb_neigh_update((struct neighbour
*)ctx
);
971 case (NETEVENT_REDIRECT
):{
972 struct netevent_redirect
*nr
= ctx
;
973 cxgb_redirect(nr
->old
, nr
->old_neigh
,
974 nr
->new, nr
->new_neigh
,
976 cxgb_neigh_update(nr
->new_neigh
);
985 static struct notifier_block nb
= {
986 .notifier_call
= nb_callback
990 * Process a received packet with an unknown/unexpected CPL opcode.
992 static int do_bad_cpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
994 pr_err("%s: received bad CPL command 0x%x\n", dev
->name
, *skb
->data
);
995 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
999 * Handlers for each CPL opcode
1001 static cpl_handler_func cpl_handlers
[NUM_CPL_CMDS
];
1004 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1005 * to unregister an existing handler.
1007 void t3_register_cpl_handler(unsigned int opcode
, cpl_handler_func h
)
1009 if (opcode
< NUM_CPL_CMDS
)
1010 cpl_handlers
[opcode
] = h
? h
: do_bad_cpl
;
1012 pr_err("T3C: handler registration for opcode %x failed\n",
1016 EXPORT_SYMBOL(t3_register_cpl_handler
);
1019 * T3CDEV's receive method.
1021 static int process_rx(struct t3cdev
*dev
, struct sk_buff
**skbs
, int n
)
1024 struct sk_buff
*skb
= *skbs
++;
1025 unsigned int opcode
= get_opcode(skb
);
1026 int ret
= cpl_handlers
[opcode
] (dev
, skb
);
1029 if (ret
& CPL_RET_UNKNOWN_TID
) {
1030 union opcode_tid
*p
= cplhdr(skb
);
1032 pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
1033 dev
->name
, opcode
, G_TID(ntohl(p
->opcode_tid
)));
1036 if (ret
& CPL_RET_BUF_DONE
)
1043 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1045 int cxgb3_ofld_send(struct t3cdev
*dev
, struct sk_buff
*skb
)
1050 r
= dev
->send(dev
, skb
);
1055 EXPORT_SYMBOL(cxgb3_ofld_send
);
1057 static int is_offloading(struct net_device
*dev
)
1059 struct adapter
*adapter
;
1062 read_lock_bh(&adapter_list_lock
);
1063 list_for_each_entry(adapter
, &adapter_list
, adapter_list
) {
1064 for_each_port(adapter
, i
) {
1065 if (dev
== adapter
->port
[i
]) {
1066 read_unlock_bh(&adapter_list_lock
);
1071 read_unlock_bh(&adapter_list_lock
);
1075 static void cxgb_neigh_update(struct neighbour
*neigh
)
1077 struct net_device
*dev
;
1082 if (dev
&& (is_offloading(dev
))) {
1083 struct t3cdev
*tdev
= dev2t3cdev(dev
);
1086 t3_l2t_update(tdev
, neigh
);
1090 static void set_l2t_ix(struct t3cdev
*tdev
, u32 tid
, struct l2t_entry
*e
)
1092 struct sk_buff
*skb
;
1093 struct cpl_set_tcb_field
*req
;
1095 skb
= alloc_skb(sizeof(*req
), GFP_ATOMIC
);
1097 pr_err("%s: cannot allocate skb!\n", __func__
);
1100 skb
->priority
= CPL_PRIORITY_CONTROL
;
1101 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
1102 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1103 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
1106 req
->word
= htons(W_TCB_L2T_IX
);
1107 req
->mask
= cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX
));
1108 req
->val
= cpu_to_be64(V_TCB_L2T_IX(e
->idx
));
1109 tdev
->send(tdev
, skb
);
1112 static void cxgb_redirect(struct dst_entry
*old
, struct neighbour
*old_neigh
,
1113 struct dst_entry
*new, struct neighbour
*new_neigh
,
1116 struct net_device
*olddev
, *newdev
;
1117 struct tid_info
*ti
;
1118 struct t3cdev
*tdev
;
1121 struct l2t_entry
*e
;
1122 struct t3c_tid_entry
*te
;
1124 olddev
= old_neigh
->dev
;
1125 newdev
= new_neigh
->dev
;
1127 if (!is_offloading(olddev
))
1129 if (!is_offloading(newdev
)) {
1130 pr_warn("%s: Redirect to non-offload device ignored\n",
1134 tdev
= dev2t3cdev(olddev
);
1136 if (tdev
!= dev2t3cdev(newdev
)) {
1137 pr_warn("%s: Redirect to different offload device ignored\n",
1142 /* Add new L2T entry */
1143 e
= t3_l2t_get(tdev
, new, newdev
, daddr
);
1145 pr_err("%s: couldn't allocate new l2t entry!\n", __func__
);
1149 /* Walk tid table and notify clients of dst change. */
1150 ti
= &(T3C_DATA(tdev
))->tid_maps
;
1151 for (tid
= 0; tid
< ti
->ntids
; tid
++) {
1152 te
= lookup_tid(ti
, tid
);
1154 if (te
&& te
->ctx
&& te
->client
&& te
->client
->redirect
) {
1155 update_tcb
= te
->client
->redirect(te
->ctx
, old
, new, e
);
1158 l2t_hold(L2DATA(tdev
), e
);
1160 set_l2t_ix(tdev
, tid
, e
);
1164 l2t_release(tdev
, e
);
1168 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1169 * The allocated memory is cleared.
1171 void *cxgb_alloc_mem(unsigned long size
)
1173 void *p
= kzalloc(size
, GFP_KERNEL
);
1181 * Free memory allocated through t3_alloc_mem().
1183 void cxgb_free_mem(void *addr
)
1185 if (is_vmalloc_addr(addr
))
1192 * Allocate and initialize the TID tables. Returns 0 on success.
1194 static int init_tid_tabs(struct tid_info
*t
, unsigned int ntids
,
1195 unsigned int natids
, unsigned int nstids
,
1196 unsigned int atid_base
, unsigned int stid_base
)
1198 unsigned long size
= ntids
* sizeof(*t
->tid_tab
) +
1199 natids
* sizeof(*t
->atid_tab
) + nstids
* sizeof(*t
->stid_tab
);
1201 t
->tid_tab
= cxgb_alloc_mem(size
);
1205 t
->stid_tab
= (union listen_entry
*)&t
->tid_tab
[ntids
];
1206 t
->atid_tab
= (union active_open_entry
*)&t
->stid_tab
[nstids
];
1209 t
->stid_base
= stid_base
;
1212 t
->atid_base
= atid_base
;
1214 t
->stids_in_use
= t
->atids_in_use
= 0;
1215 atomic_set(&t
->tids_in_use
, 0);
1216 spin_lock_init(&t
->stid_lock
);
1217 spin_lock_init(&t
->atid_lock
);
1220 * Setup the free lists for stid_tab and atid_tab.
1224 t
->stid_tab
[nstids
- 1].next
= &t
->stid_tab
[nstids
];
1225 t
->sfree
= t
->stid_tab
;
1229 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1230 t
->afree
= t
->atid_tab
;
1235 static void free_tid_maps(struct tid_info
*t
)
1237 cxgb_free_mem(t
->tid_tab
);
1240 static inline void add_adapter(struct adapter
*adap
)
1242 write_lock_bh(&adapter_list_lock
);
1243 list_add_tail(&adap
->adapter_list
, &adapter_list
);
1244 write_unlock_bh(&adapter_list_lock
);
1247 static inline void remove_adapter(struct adapter
*adap
)
1249 write_lock_bh(&adapter_list_lock
);
1250 list_del(&adap
->adapter_list
);
1251 write_unlock_bh(&adapter_list_lock
);
1254 int cxgb3_offload_activate(struct adapter
*adapter
)
1256 struct t3cdev
*dev
= &adapter
->tdev
;
1259 struct tid_range stid_range
, tid_range
;
1260 struct mtutab mtutab
;
1261 unsigned int l2t_capacity
;
1263 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
1268 if (dev
->ctl(dev
, GET_TX_MAX_CHUNK
, &t
->tx_max_chunk
) < 0 ||
1269 dev
->ctl(dev
, GET_MAX_OUTSTANDING_WR
, &t
->max_wrs
) < 0 ||
1270 dev
->ctl(dev
, GET_L2T_CAPACITY
, &l2t_capacity
) < 0 ||
1271 dev
->ctl(dev
, GET_MTUS
, &mtutab
) < 0 ||
1272 dev
->ctl(dev
, GET_TID_RANGE
, &tid_range
) < 0 ||
1273 dev
->ctl(dev
, GET_STID_RANGE
, &stid_range
) < 0)
1277 RCU_INIT_POINTER(dev
->l2opt
, t3_init_l2t(l2t_capacity
));
1281 natids
= min(tid_range
.num
/ 2, MAX_ATIDS
);
1282 err
= init_tid_tabs(&t
->tid_maps
, tid_range
.num
, natids
,
1283 stid_range
.num
, ATID_BASE
, stid_range
.base
);
1287 t
->mtus
= mtutab
.mtus
;
1288 t
->nmtus
= mtutab
.size
;
1290 INIT_WORK(&t
->tid_release_task
, t3_process_tid_release_list
);
1291 spin_lock_init(&t
->tid_release_lock
);
1292 INIT_LIST_HEAD(&t
->list_node
);
1296 dev
->recv
= process_rx
;
1297 dev
->neigh_update
= t3_l2t_update
;
1299 /* Register netevent handler once */
1300 if (list_empty(&adapter_list
))
1301 register_netevent_notifier(&nb
);
1303 t
->nofail_skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_KERNEL
);
1304 t
->release_list_incomplete
= 0;
1306 add_adapter(adapter
);
1310 t3_free_l2t(L2DATA(dev
));
1311 RCU_INIT_POINTER(dev
->l2opt
, NULL
);
1317 static void clean_l2_data(struct rcu_head
*head
)
1319 struct l2t_data
*d
= container_of(head
, struct l2t_data
, rcu_head
);
1324 void cxgb3_offload_deactivate(struct adapter
*adapter
)
1326 struct t3cdev
*tdev
= &adapter
->tdev
;
1327 struct t3c_data
*t
= T3C_DATA(tdev
);
1330 remove_adapter(adapter
);
1331 if (list_empty(&adapter_list
))
1332 unregister_netevent_notifier(&nb
);
1334 free_tid_maps(&t
->tid_maps
);
1335 T3C_DATA(tdev
) = NULL
;
1339 RCU_INIT_POINTER(tdev
->l2opt
, NULL
);
1340 call_rcu(&d
->rcu_head
, clean_l2_data
);
1342 kfree_skb(t
->nofail_skb
);
1346 static inline void register_tdev(struct t3cdev
*tdev
)
1350 mutex_lock(&cxgb3_db_lock
);
1351 snprintf(tdev
->name
, sizeof(tdev
->name
), "ofld_dev%d", unit
++);
1352 list_add_tail(&tdev
->ofld_dev_list
, &ofld_dev_list
);
1353 mutex_unlock(&cxgb3_db_lock
);
1356 static inline void unregister_tdev(struct t3cdev
*tdev
)
1358 mutex_lock(&cxgb3_db_lock
);
1359 list_del(&tdev
->ofld_dev_list
);
1360 mutex_unlock(&cxgb3_db_lock
);
1363 static inline int adap2type(struct adapter
*adapter
)
1367 switch (adapter
->params
.rev
) {
1382 void cxgb3_adapter_ofld(struct adapter
*adapter
)
1384 struct t3cdev
*tdev
= &adapter
->tdev
;
1386 INIT_LIST_HEAD(&tdev
->ofld_dev_list
);
1388 cxgb3_set_dummy_ops(tdev
);
1389 tdev
->send
= t3_offload_tx
;
1390 tdev
->ctl
= cxgb_offload_ctl
;
1391 tdev
->type
= adap2type(adapter
);
1393 register_tdev(tdev
);
1396 void cxgb3_adapter_unofld(struct adapter
*adapter
)
1398 struct t3cdev
*tdev
= &adapter
->tdev
;
1401 tdev
->neigh_update
= NULL
;
1403 unregister_tdev(tdev
);
1406 void __init
cxgb3_offload_init(void)
1410 for (i
= 0; i
< NUM_CPL_CMDS
; ++i
)
1411 cpl_handlers
[i
] = do_bad_cpl
;
1413 t3_register_cpl_handler(CPL_SMT_WRITE_RPL
, do_smt_write_rpl
);
1414 t3_register_cpl_handler(CPL_L2T_WRITE_RPL
, do_l2t_write_rpl
);
1415 t3_register_cpl_handler(CPL_RTE_WRITE_RPL
, do_rte_write_rpl
);
1416 t3_register_cpl_handler(CPL_PASS_OPEN_RPL
, do_stid_rpl
);
1417 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL
, do_stid_rpl
);
1418 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ
, do_cr
);
1419 t3_register_cpl_handler(CPL_PASS_ESTABLISH
, do_hwtid_rpl
);
1420 t3_register_cpl_handler(CPL_ABORT_RPL_RSS
, do_hwtid_rpl
);
1421 t3_register_cpl_handler(CPL_ABORT_RPL
, do_hwtid_rpl
);
1422 t3_register_cpl_handler(CPL_RX_URG_NOTIFY
, do_hwtid_rpl
);
1423 t3_register_cpl_handler(CPL_RX_DATA
, do_hwtid_rpl
);
1424 t3_register_cpl_handler(CPL_TX_DATA_ACK
, do_hwtid_rpl
);
1425 t3_register_cpl_handler(CPL_TX_DMA_ACK
, do_hwtid_rpl
);
1426 t3_register_cpl_handler(CPL_ACT_OPEN_RPL
, do_act_open_rpl
);
1427 t3_register_cpl_handler(CPL_PEER_CLOSE
, do_hwtid_rpl
);
1428 t3_register_cpl_handler(CPL_CLOSE_CON_RPL
, do_hwtid_rpl
);
1429 t3_register_cpl_handler(CPL_ABORT_REQ_RSS
, do_abort_req_rss
);
1430 t3_register_cpl_handler(CPL_ACT_ESTABLISH
, do_act_establish
);
1431 t3_register_cpl_handler(CPL_SET_TCB_RPL
, do_hwtid_rpl
);
1432 t3_register_cpl_handler(CPL_GET_TCB_RPL
, do_hwtid_rpl
);
1433 t3_register_cpl_handler(CPL_RDMA_TERMINATE
, do_term
);
1434 t3_register_cpl_handler(CPL_RDMA_EC_STATUS
, do_hwtid_rpl
);
1435 t3_register_cpl_handler(CPL_TRACE_PKT
, do_trace
);
1436 t3_register_cpl_handler(CPL_RX_DATA_DDP
, do_hwtid_rpl
);
1437 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE
, do_hwtid_rpl
);
1438 t3_register_cpl_handler(CPL_ISCSI_HDR
, do_hwtid_rpl
);