2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
55 static char *states
[] = {
72 module_param(nocong
, int, 0644);
73 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
75 static int enable_ecn
;
76 module_param(enable_ecn
, int, 0644);
77 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
79 static int dack_mode
= 1;
80 module_param(dack_mode
, int, 0644);
81 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
83 uint c4iw_max_read_depth
= 32;
84 module_param(c4iw_max_read_depth
, int, 0644);
85 MODULE_PARM_DESC(c4iw_max_read_depth
,
86 "Per-connection max ORD/IRD (default=32)");
88 static int enable_tcp_timestamps
;
89 module_param(enable_tcp_timestamps
, int, 0644);
90 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
92 static int enable_tcp_sack
;
93 module_param(enable_tcp_sack
, int, 0644);
94 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
96 static int enable_tcp_window_scaling
= 1;
97 module_param(enable_tcp_window_scaling
, int, 0644);
98 MODULE_PARM_DESC(enable_tcp_window_scaling
,
99 "Enable tcp window scaling (default=1)");
102 module_param(c4iw_debug
, int, 0644);
103 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
105 static int peer2peer
= 1;
106 module_param(peer2peer
, int, 0644);
107 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=1)");
109 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
110 module_param(p2p_type
, int, 0644);
111 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
112 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
114 static int ep_timeout_secs
= 60;
115 module_param(ep_timeout_secs
, int, 0644);
116 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
117 "in seconds (default=60)");
119 static int mpa_rev
= 2;
120 module_param(mpa_rev
, int, 0644);
121 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
122 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
123 " compliant (default=2)");
125 static int markers_enabled
;
126 module_param(markers_enabled
, int, 0644);
127 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
129 static int crc_enabled
= 1;
130 module_param(crc_enabled
, int, 0644);
131 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
133 static int rcv_win
= 256 * 1024;
134 module_param(rcv_win
, int, 0644);
135 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
137 static int snd_win
= 128 * 1024;
138 module_param(snd_win
, int, 0644);
139 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
141 static struct workqueue_struct
*workq
;
143 static struct sk_buff_head rxq
;
145 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
146 static void ep_timeout(unsigned long arg
);
147 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
148 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
);
150 static LIST_HEAD(timeout_list
);
151 static spinlock_t timeout_lock
;
153 static void deref_cm_id(struct c4iw_ep_common
*epc
)
155 epc
->cm_id
->rem_ref(epc
->cm_id
);
157 set_bit(CM_ID_DEREFED
, &epc
->history
);
160 static void ref_cm_id(struct c4iw_ep_common
*epc
)
162 set_bit(CM_ID_REFED
, &epc
->history
);
163 epc
->cm_id
->add_ref(epc
->cm_id
);
166 static void deref_qp(struct c4iw_ep
*ep
)
168 c4iw_qp_rem_ref(&ep
->com
.qp
->ibqp
);
169 clear_bit(QP_REFERENCED
, &ep
->com
.flags
);
170 set_bit(QP_DEREFED
, &ep
->com
.history
);
173 static void ref_qp(struct c4iw_ep
*ep
)
175 set_bit(QP_REFERENCED
, &ep
->com
.flags
);
176 set_bit(QP_REFED
, &ep
->com
.history
);
177 c4iw_qp_add_ref(&ep
->com
.qp
->ibqp
);
180 static void start_ep_timer(struct c4iw_ep
*ep
)
182 PDBG("%s ep %p\n", __func__
, ep
);
183 if (timer_pending(&ep
->timer
)) {
184 pr_err("%s timer already started! ep %p\n",
188 clear_bit(TIMEOUT
, &ep
->com
.flags
);
189 c4iw_get_ep(&ep
->com
);
190 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
191 ep
->timer
.data
= (unsigned long)ep
;
192 ep
->timer
.function
= ep_timeout
;
193 add_timer(&ep
->timer
);
196 static int stop_ep_timer(struct c4iw_ep
*ep
)
198 PDBG("%s ep %p stopping\n", __func__
, ep
);
199 del_timer_sync(&ep
->timer
);
200 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
201 c4iw_put_ep(&ep
->com
);
207 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
208 struct l2t_entry
*l2e
)
212 if (c4iw_fatal_error(rdev
)) {
214 PDBG("%s - device in error state - dropping\n", __func__
);
217 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
220 else if (error
== NET_XMIT_DROP
)
222 return error
< 0 ? error
: 0;
225 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
229 if (c4iw_fatal_error(rdev
)) {
231 PDBG("%s - device in error state - dropping\n", __func__
);
234 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
237 return error
< 0 ? error
: 0;
240 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
242 struct cpl_tid_release
*req
;
244 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
247 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
248 INIT_TP_WR(req
, hwtid
);
249 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
250 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
251 c4iw_ofld_send(rdev
, skb
);
255 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
257 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[TCPOPT_MSS_G(opt
)] -
258 ((AF_INET
== ep
->com
.remote_addr
.ss_family
) ?
259 sizeof(struct iphdr
) : sizeof(struct ipv6hdr
)) -
260 sizeof(struct tcphdr
);
262 if (TCPOPT_TSTAMP_G(opt
))
263 ep
->emss
-= round_up(TCPOLEN_TIMESTAMP
, 4);
267 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
268 TCPOPT_MSS_G(opt
), ep
->mss
, ep
->emss
);
269 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, TCPOPT_MSS_G(opt
),
273 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
275 enum c4iw_ep_state state
;
277 mutex_lock(&epc
->mutex
);
279 mutex_unlock(&epc
->mutex
);
283 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
288 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
290 mutex_lock(&epc
->mutex
);
291 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
292 __state_set(epc
, new);
293 mutex_unlock(&epc
->mutex
);
297 static void *alloc_ep(int size
, gfp_t gfp
)
299 struct c4iw_ep_common
*epc
;
301 epc
= kzalloc(size
, gfp
);
303 kref_init(&epc
->kref
);
304 mutex_init(&epc
->mutex
);
305 c4iw_init_wr_wait(&epc
->wr_wait
);
307 PDBG("%s alloc ep %p\n", __func__
, epc
);
311 void _c4iw_free_ep(struct kref
*kref
)
315 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
316 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[ep
->com
.state
]);
317 if (test_bit(QP_REFERENCED
, &ep
->com
.flags
))
319 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
320 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
321 struct sockaddr_in6
*sin6
=
322 (struct sockaddr_in6
*)
326 ep
->com
.dev
->rdev
.lldi
.ports
[0],
327 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
330 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
331 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
332 dst_release(ep
->dst
);
333 cxgb4_l2t_release(ep
->l2t
);
338 static void release_ep_resources(struct c4iw_ep
*ep
)
340 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
341 c4iw_put_ep(&ep
->com
);
344 static int status2errno(int status
)
349 case CPL_ERR_CONN_RESET
:
351 case CPL_ERR_ARP_MISS
:
352 return -EHOSTUNREACH
;
353 case CPL_ERR_CONN_TIMEDOUT
:
355 case CPL_ERR_TCAM_FULL
:
357 case CPL_ERR_CONN_EXIST
:
365 * Try and reuse skbs already allocated...
367 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
369 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
372 skb_reset_transport_header(skb
);
374 skb
= alloc_skb(len
, gfp
);
376 t4_set_arp_err_handler(skb
, NULL
, NULL
);
380 static struct net_device
*get_real_dev(struct net_device
*egress_dev
)
382 return rdma_vlan_dev_real_dev(egress_dev
) ? : egress_dev
;
385 static int our_interface(struct c4iw_dev
*dev
, struct net_device
*egress_dev
)
389 egress_dev
= get_real_dev(egress_dev
);
390 for (i
= 0; i
< dev
->rdev
.lldi
.nports
; i
++)
391 if (dev
->rdev
.lldi
.ports
[i
] == egress_dev
)
396 static struct dst_entry
*find_route6(struct c4iw_dev
*dev
, __u8
*local_ip
,
397 __u8
*peer_ip
, __be16 local_port
,
398 __be16 peer_port
, u8 tos
,
401 struct dst_entry
*dst
= NULL
;
403 if (IS_ENABLED(CONFIG_IPV6
)) {
406 memset(&fl6
, 0, sizeof(fl6
));
407 memcpy(&fl6
.daddr
, peer_ip
, 16);
408 memcpy(&fl6
.saddr
, local_ip
, 16);
409 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
410 fl6
.flowi6_oif
= sin6_scope_id
;
411 dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
414 if (!our_interface(dev
, ip6_dst_idev(dst
)->dev
) &&
415 !(ip6_dst_idev(dst
)->dev
->flags
& IFF_LOOPBACK
)) {
425 static struct dst_entry
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
426 __be32 peer_ip
, __be16 local_port
,
427 __be16 peer_port
, u8 tos
)
433 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
434 peer_port
, local_port
, IPPROTO_TCP
,
438 n
= dst_neigh_lookup(&rt
->dst
, &peer_ip
);
441 if (!our_interface(dev
, n
->dev
) &&
442 !(n
->dev
->flags
& IFF_LOOPBACK
)) {
444 dst_release(&rt
->dst
);
451 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
453 pr_err(MOD
"ARP failure\n");
459 FAKE_CPL_PUT_EP_SAFE
= NUM_CPL_CMDS
+ 0,
460 FAKE_CPL_PASS_PUT_EP_SAFE
= NUM_CPL_CMDS
+ 1,
463 static int _put_ep_safe(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
467 ep
= *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *)));
468 release_ep_resources(ep
);
472 static int _put_pass_ep_safe(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
476 ep
= *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *)));
477 c4iw_put_ep(&ep
->parent_ep
->com
);
478 release_ep_resources(ep
);
483 * Fake up a special CPL opcode and call sched() so process_work() will call
484 * _put_ep_safe() in a safe context to free the ep resources. This is needed
485 * because ARP error handlers are called in an ATOMIC context, and
486 * _c4iw_free_ep() needs to block.
488 static void queue_arp_failure_cpl(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
491 struct cpl_act_establish
*rpl
= cplhdr(skb
);
493 /* Set our special ARP_FAILURE opcode */
494 rpl
->ot
.opcode
= cpl
;
497 * Save ep in the skb->cb area, after where sched() will save the dev
500 *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *))) = ep
;
501 sched(ep
->com
.dev
, skb
);
504 /* Handle an ARP failure for an accept */
505 static void pass_accept_rpl_arp_failure(void *handle
, struct sk_buff
*skb
)
507 struct c4iw_ep
*ep
= handle
;
509 pr_err(MOD
"ARP failure during accept - tid %u -dropping connection\n",
512 __state_set(&ep
->com
, DEAD
);
513 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PASS_PUT_EP_SAFE
);
517 * Handle an ARP failure for an active open.
519 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
521 struct c4iw_ep
*ep
= handle
;
523 printk(KERN_ERR MOD
"ARP failure during connect\n");
524 connect_reply_upcall(ep
, -EHOSTUNREACH
);
525 __state_set(&ep
->com
, DEAD
);
526 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
527 struct sockaddr_in6
*sin6
=
528 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
529 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
530 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
532 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
533 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
534 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PUT_EP_SAFE
);
538 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
541 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
544 struct c4iw_ep
*ep
= handle
;
545 struct c4iw_rdev
*rdev
= &ep
->com
.dev
->rdev
;
546 struct cpl_abort_req
*req
= cplhdr(skb
);
548 PDBG("%s rdev %p\n", __func__
, rdev
);
549 req
->cmd
= CPL_ABORT_NO_RST
;
550 ret
= c4iw_ofld_send(rdev
, skb
);
552 __state_set(&ep
->com
, DEAD
);
553 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PUT_EP_SAFE
);
557 static int send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
559 unsigned int flowclen
= 80;
560 struct fw_flowc_wr
*flowc
;
562 u16 vlan
= ep
->l2t
->vlan
;
565 if (vlan
== CPL_L2T_VLAN_NONE
)
570 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
571 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
573 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR
) |
574 FW_FLOWC_WR_NPARAMS_V(nparams
));
575 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen
,
576 16)) | FW_WR_FLOWID_V(ep
->hwtid
));
578 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
579 flowc
->mnemval
[0].val
= cpu_to_be32(FW_PFVF_CMD_PFN_V
580 (ep
->com
.dev
->rdev
.lldi
.pf
));
581 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
582 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
583 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
584 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
585 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
586 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
587 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
588 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
589 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
590 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
591 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
592 flowc
->mnemval
[6].val
= cpu_to_be32(ep
->snd_win
);
593 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
594 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
598 pri
= (vlan
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
599 flowc
->mnemval
[8].mnemonic
= FW_FLOWC_MNEM_SCHEDCLASS
;
600 flowc
->mnemval
[8].val
= cpu_to_be32(pri
);
602 /* Pad WR to 16 byte boundary */
603 flowc
->mnemval
[8].mnemonic
= 0;
604 flowc
->mnemval
[8].val
= 0;
606 for (i
= 0; i
< 9; i
++) {
607 flowc
->mnemval
[i
].r4
[0] = 0;
608 flowc
->mnemval
[i
].r4
[1] = 0;
609 flowc
->mnemval
[i
].r4
[2] = 0;
612 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
613 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
616 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
618 struct cpl_close_con_req
*req
;
620 int wrlen
= roundup(sizeof *req
, 16);
622 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
623 skb
= get_skb(NULL
, wrlen
, gfp
);
625 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
628 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
629 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
630 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
631 memset(req
, 0, wrlen
);
632 INIT_TP_WR(req
, ep
->hwtid
);
633 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
635 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
638 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
640 struct cpl_abort_req
*req
;
641 int wrlen
= roundup(sizeof *req
, 16);
643 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
644 skb
= get_skb(skb
, wrlen
, gfp
);
646 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
650 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
651 t4_set_arp_err_handler(skb
, ep
, abort_arp_failure
);
652 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
653 memset(req
, 0, wrlen
);
654 INIT_TP_WR(req
, ep
->hwtid
);
655 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
656 req
->cmd
= CPL_ABORT_SEND_RST
;
657 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
660 static void best_mtu(const unsigned short *mtus
, unsigned short mtu
,
661 unsigned int *idx
, int use_ts
, int ipv6
)
663 unsigned short hdr_size
= (ipv6
?
664 sizeof(struct ipv6hdr
) :
665 sizeof(struct iphdr
)) +
666 sizeof(struct tcphdr
) +
668 round_up(TCPOLEN_TIMESTAMP
, 4) : 0);
669 unsigned short data_size
= mtu
- hdr_size
;
671 cxgb4_best_aligned_mtu(mtus
, hdr_size
, data_size
, 8, idx
);
674 static int send_connect(struct c4iw_ep
*ep
)
676 struct cpl_act_open_req
*req
= NULL
;
677 struct cpl_t5_act_open_req
*t5req
= NULL
;
678 struct cpl_t6_act_open_req
*t6req
= NULL
;
679 struct cpl_act_open_req6
*req6
= NULL
;
680 struct cpl_t5_act_open_req6
*t5req6
= NULL
;
681 struct cpl_t6_act_open_req6
*t6req6
= NULL
;
685 unsigned int mtu_idx
;
687 int win
, sizev4
, sizev6
, wrlen
;
688 struct sockaddr_in
*la
= (struct sockaddr_in
*)
690 struct sockaddr_in
*ra
= (struct sockaddr_in
*)
691 &ep
->com
.remote_addr
;
692 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)
694 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)
695 &ep
->com
.remote_addr
;
697 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
698 u32 isn
= (prandom_u32() & ~7UL) - 1;
700 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
702 sizev4
= sizeof(struct cpl_act_open_req
);
703 sizev6
= sizeof(struct cpl_act_open_req6
);
706 sizev4
= sizeof(struct cpl_t5_act_open_req
);
707 sizev6
= sizeof(struct cpl_t5_act_open_req6
);
710 sizev4
= sizeof(struct cpl_t6_act_open_req
);
711 sizev6
= sizeof(struct cpl_t6_act_open_req6
);
714 pr_err("T%d Chip is not supported\n",
715 CHELSIO_CHIP_VERSION(adapter_type
));
719 wrlen
= (ep
->com
.remote_addr
.ss_family
== AF_INET
) ?
720 roundup(sizev4
, 16) :
723 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
725 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
727 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
731 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
733 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
734 enable_tcp_timestamps
,
735 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
736 wscale
= compute_wscale(rcv_win
);
739 * Specify the largest window that will fit in opt0. The
740 * remainder will be specified in the rx_data_ack.
742 win
= ep
->rcv_win
>> 10;
743 if (win
> RCV_BUFSIZ_M
)
746 opt0
= (nocong
? NO_CONG_F
: 0) |
749 WND_SCALE_V(wscale
) |
751 L2T_IDX_V(ep
->l2t
->idx
) |
752 TX_CHAN_V(ep
->tx_chan
) |
753 SMAC_SEL_V(ep
->smac_idx
) |
754 DSCP_V(ep
->tos
>> 2) |
755 ULP_MODE_V(ULP_MODE_TCPDDP
) |
757 opt2
= RX_CHANNEL_V(0) |
758 CCTRL_ECN_V(enable_ecn
) |
759 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
760 if (enable_tcp_timestamps
)
761 opt2
|= TSTAMPS_EN_F
;
764 if (wscale
&& enable_tcp_window_scaling
)
765 opt2
|= WND_SCALE_EN_F
;
766 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
770 opt2
|= T5_OPT_2_VALID_F
;
771 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
775 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
)
776 cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
777 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
779 t4_set_arp_err_handler(skb
, ep
, act_open_req_arp_failure
);
781 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
782 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
784 req
= (struct cpl_act_open_req
*)skb_put(skb
, wrlen
);
788 t5req
= (struct cpl_t5_act_open_req
*)skb_put(skb
,
790 INIT_TP_WR(t5req
, 0);
791 req
= (struct cpl_act_open_req
*)t5req
;
794 t6req
= (struct cpl_t6_act_open_req
*)skb_put(skb
,
796 INIT_TP_WR(t6req
, 0);
797 req
= (struct cpl_act_open_req
*)t6req
;
798 t5req
= (struct cpl_t5_act_open_req
*)t6req
;
801 pr_err("T%d Chip is not supported\n",
802 CHELSIO_CHIP_VERSION(adapter_type
));
807 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
808 ((ep
->rss_qid
<<14) | ep
->atid
)));
809 req
->local_port
= la
->sin_port
;
810 req
->peer_port
= ra
->sin_port
;
811 req
->local_ip
= la
->sin_addr
.s_addr
;
812 req
->peer_ip
= ra
->sin_addr
.s_addr
;
813 req
->opt0
= cpu_to_be64(opt0
);
815 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
816 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
817 ep
->com
.dev
->rdev
.lldi
.ports
[0],
819 req
->opt2
= cpu_to_be32(opt2
);
821 t5req
->params
= cpu_to_be64(FILTER_TUPLE_V(
823 ep
->com
.dev
->rdev
.lldi
.ports
[0],
825 t5req
->rsvd
= cpu_to_be32(isn
);
826 PDBG("%s snd_isn %u\n", __func__
, t5req
->rsvd
);
827 t5req
->opt2
= cpu_to_be32(opt2
);
830 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
832 req6
= (struct cpl_act_open_req6
*)skb_put(skb
, wrlen
);
836 t5req6
= (struct cpl_t5_act_open_req6
*)skb_put(skb
,
838 INIT_TP_WR(t5req6
, 0);
839 req6
= (struct cpl_act_open_req6
*)t5req6
;
842 t6req6
= (struct cpl_t6_act_open_req6
*)skb_put(skb
,
844 INIT_TP_WR(t6req6
, 0);
845 req6
= (struct cpl_act_open_req6
*)t6req6
;
846 t5req6
= (struct cpl_t5_act_open_req6
*)t6req6
;
849 pr_err("T%d Chip is not supported\n",
850 CHELSIO_CHIP_VERSION(adapter_type
));
855 OPCODE_TID(req6
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
856 ((ep
->rss_qid
<<14)|ep
->atid
)));
857 req6
->local_port
= la6
->sin6_port
;
858 req6
->peer_port
= ra6
->sin6_port
;
859 req6
->local_ip_hi
= *((__be64
*)(la6
->sin6_addr
.s6_addr
));
860 req6
->local_ip_lo
= *((__be64
*)(la6
->sin6_addr
.s6_addr
+ 8));
861 req6
->peer_ip_hi
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
));
862 req6
->peer_ip_lo
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
+ 8));
863 req6
->opt0
= cpu_to_be64(opt0
);
865 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
866 req6
->params
= cpu_to_be32(cxgb4_select_ntuple(
867 ep
->com
.dev
->rdev
.lldi
.ports
[0],
869 req6
->opt2
= cpu_to_be32(opt2
);
871 t5req6
->params
= cpu_to_be64(FILTER_TUPLE_V(
873 ep
->com
.dev
->rdev
.lldi
.ports
[0],
875 t5req6
->rsvd
= cpu_to_be32(isn
);
876 PDBG("%s snd_isn %u\n", __func__
, t5req6
->rsvd
);
877 t5req6
->opt2
= cpu_to_be32(opt2
);
881 set_bit(ACT_OPEN_REQ
, &ep
->com
.history
);
882 ret
= c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
884 if (ret
&& ep
->com
.remote_addr
.ss_family
== AF_INET6
)
885 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
886 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
890 static int send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
893 int mpalen
, wrlen
, ret
;
894 struct fw_ofld_tx_data_wr
*req
;
895 struct mpa_message
*mpa
;
896 struct mpa_v2_conn_params mpa_v2_params
;
898 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
900 BUG_ON(skb_cloned(skb
));
902 mpalen
= sizeof(*mpa
) + ep
->plen
;
903 if (mpa_rev_to_use
== 2)
904 mpalen
+= sizeof(struct mpa_v2_conn_params
);
905 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
906 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
908 connect_reply_upcall(ep
, -ENOMEM
);
911 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
913 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
914 memset(req
, 0, wrlen
);
915 req
->op_to_immdlen
= cpu_to_be32(
916 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
918 FW_WR_IMMDLEN_V(mpalen
));
919 req
->flowid_len16
= cpu_to_be32(
920 FW_WR_FLOWID_V(ep
->hwtid
) |
921 FW_WR_LEN16_V(wrlen
>> 4));
922 req
->plen
= cpu_to_be32(mpalen
);
923 req
->tunnel_to_proxy
= cpu_to_be32(
924 FW_OFLD_TX_DATA_WR_FLUSH_F
|
925 FW_OFLD_TX_DATA_WR_SHOVE_F
);
927 mpa
= (struct mpa_message
*)(req
+ 1);
928 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
929 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
930 (markers_enabled
? MPA_MARKERS
: 0) |
931 (mpa_rev_to_use
== 2 ? MPA_ENHANCED_RDMA_CONN
: 0);
932 mpa
->private_data_size
= htons(ep
->plen
);
933 mpa
->revision
= mpa_rev_to_use
;
934 if (mpa_rev_to_use
== 1) {
935 ep
->tried_with_mpa_v1
= 1;
936 ep
->retry_with_mpa_v1
= 0;
939 if (mpa_rev_to_use
== 2) {
940 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
941 sizeof (struct mpa_v2_conn_params
));
942 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
944 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
945 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
948 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
949 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
951 htons(MPA_V2_RDMA_WRITE_RTR
);
952 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
954 htons(MPA_V2_RDMA_READ_RTR
);
956 memcpy(mpa
->private_data
, &mpa_v2_params
,
957 sizeof(struct mpa_v2_conn_params
));
960 memcpy(mpa
->private_data
+
961 sizeof(struct mpa_v2_conn_params
),
962 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
965 memcpy(mpa
->private_data
,
966 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
969 * Reference the mpa skb. This ensures the data area
970 * will remain in memory until the hw acks the tx.
971 * Function fw4_ack() will deref it.
974 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
977 ret
= c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
981 __state_set(&ep
->com
, MPA_REQ_SENT
);
982 ep
->mpa_attr
.initiator
= 1;
983 ep
->snd_seq
+= mpalen
;
987 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
990 struct fw_ofld_tx_data_wr
*req
;
991 struct mpa_message
*mpa
;
993 struct mpa_v2_conn_params mpa_v2_params
;
995 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
997 mpalen
= sizeof(*mpa
) + plen
;
998 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
999 mpalen
+= sizeof(struct mpa_v2_conn_params
);
1000 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
1002 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1004 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
1007 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1009 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
1010 memset(req
, 0, wrlen
);
1011 req
->op_to_immdlen
= cpu_to_be32(
1012 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1014 FW_WR_IMMDLEN_V(mpalen
));
1015 req
->flowid_len16
= cpu_to_be32(
1016 FW_WR_FLOWID_V(ep
->hwtid
) |
1017 FW_WR_LEN16_V(wrlen
>> 4));
1018 req
->plen
= cpu_to_be32(mpalen
);
1019 req
->tunnel_to_proxy
= cpu_to_be32(
1020 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1021 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1023 mpa
= (struct mpa_message
*)(req
+ 1);
1024 memset(mpa
, 0, sizeof(*mpa
));
1025 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1026 mpa
->flags
= MPA_REJECT
;
1027 mpa
->revision
= ep
->mpa_attr
.version
;
1028 mpa
->private_data_size
= htons(plen
);
1030 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1031 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1032 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1033 sizeof (struct mpa_v2_conn_params
));
1034 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
1035 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
1037 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
1039 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
1040 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
1041 FW_RI_INIT_P2PTYPE_READ_REQ
?
1042 MPA_V2_RDMA_READ_RTR
: 0) : 0));
1043 memcpy(mpa
->private_data
, &mpa_v2_params
,
1044 sizeof(struct mpa_v2_conn_params
));
1047 memcpy(mpa
->private_data
+
1048 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1051 memcpy(mpa
->private_data
, pdata
, plen
);
1054 * Reference the mpa skb again. This ensures the data area
1055 * will remain in memory until the hw acks the tx.
1056 * Function fw4_ack() will deref it.
1059 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1060 BUG_ON(ep
->mpa_skb
);
1062 ep
->snd_seq
+= mpalen
;
1063 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1066 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
1069 struct fw_ofld_tx_data_wr
*req
;
1070 struct mpa_message
*mpa
;
1071 struct sk_buff
*skb
;
1072 struct mpa_v2_conn_params mpa_v2_params
;
1074 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
1076 mpalen
= sizeof(*mpa
) + plen
;
1077 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
1078 mpalen
+= sizeof(struct mpa_v2_conn_params
);
1079 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
1081 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1083 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
1086 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1088 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
1089 memset(req
, 0, wrlen
);
1090 req
->op_to_immdlen
= cpu_to_be32(
1091 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1093 FW_WR_IMMDLEN_V(mpalen
));
1094 req
->flowid_len16
= cpu_to_be32(
1095 FW_WR_FLOWID_V(ep
->hwtid
) |
1096 FW_WR_LEN16_V(wrlen
>> 4));
1097 req
->plen
= cpu_to_be32(mpalen
);
1098 req
->tunnel_to_proxy
= cpu_to_be32(
1099 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1100 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1102 mpa
= (struct mpa_message
*)(req
+ 1);
1103 memset(mpa
, 0, sizeof(*mpa
));
1104 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1105 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
1106 (markers_enabled
? MPA_MARKERS
: 0);
1107 mpa
->revision
= ep
->mpa_attr
.version
;
1108 mpa
->private_data_size
= htons(plen
);
1110 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1111 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1112 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1113 sizeof (struct mpa_v2_conn_params
));
1114 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
1115 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
1116 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
1117 FW_RI_INIT_P2PTYPE_DISABLED
)) {
1118 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
1120 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
1121 mpa_v2_params
.ord
|=
1122 htons(MPA_V2_RDMA_WRITE_RTR
);
1123 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
1124 mpa_v2_params
.ord
|=
1125 htons(MPA_V2_RDMA_READ_RTR
);
1128 memcpy(mpa
->private_data
, &mpa_v2_params
,
1129 sizeof(struct mpa_v2_conn_params
));
1132 memcpy(mpa
->private_data
+
1133 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1136 memcpy(mpa
->private_data
, pdata
, plen
);
1139 * Reference the mpa skb. This ensures the data area
1140 * will remain in memory until the hw acks the tx.
1141 * Function fw4_ack() will deref it.
1145 __state_set(&ep
->com
, MPA_REP_SENT
);
1146 ep
->snd_seq
+= mpalen
;
1147 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1150 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1153 struct cpl_act_establish
*req
= cplhdr(skb
);
1154 unsigned int tid
= GET_TID(req
);
1155 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
1156 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1159 ep
= lookup_atid(t
, atid
);
1161 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
1162 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
1164 mutex_lock(&ep
->com
.mutex
);
1165 dst_confirm(ep
->dst
);
1167 /* setup the hwtid for this connection */
1169 cxgb4_insert_tid(t
, ep
, tid
);
1170 insert_handle(dev
, &dev
->hwtid_idr
, ep
, ep
->hwtid
);
1172 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1173 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1175 set_emss(ep
, ntohs(req
->tcp_opt
));
1177 /* dealloc the atid */
1178 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
1179 cxgb4_free_atid(t
, atid
);
1180 set_bit(ACT_ESTAB
, &ep
->com
.history
);
1182 /* start MPA negotiation */
1183 ret
= send_flowc(ep
, NULL
);
1186 if (ep
->retry_with_mpa_v1
)
1187 ret
= send_mpa_req(ep
, skb
, 1);
1189 ret
= send_mpa_req(ep
, skb
, mpa_rev
);
1192 mutex_unlock(&ep
->com
.mutex
);
1195 mutex_unlock(&ep
->com
.mutex
);
1196 connect_reply_upcall(ep
, -ENOMEM
);
1197 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1201 static void close_complete_upcall(struct c4iw_ep
*ep
, int status
)
1203 struct iw_cm_event event
;
1205 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1206 memset(&event
, 0, sizeof(event
));
1207 event
.event
= IW_CM_EVENT_CLOSE
;
1208 event
.status
= status
;
1209 if (ep
->com
.cm_id
) {
1210 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1211 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1212 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1213 deref_cm_id(&ep
->com
);
1214 set_bit(CLOSE_UPCALL
, &ep
->com
.history
);
1218 static void peer_close_upcall(struct c4iw_ep
*ep
)
1220 struct iw_cm_event event
;
1222 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1223 memset(&event
, 0, sizeof(event
));
1224 event
.event
= IW_CM_EVENT_DISCONNECT
;
1225 if (ep
->com
.cm_id
) {
1226 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1227 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1228 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1229 set_bit(DISCONN_UPCALL
, &ep
->com
.history
);
1233 static void peer_abort_upcall(struct c4iw_ep
*ep
)
1235 struct iw_cm_event event
;
1237 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1238 memset(&event
, 0, sizeof(event
));
1239 event
.event
= IW_CM_EVENT_CLOSE
;
1240 event
.status
= -ECONNRESET
;
1241 if (ep
->com
.cm_id
) {
1242 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
1243 ep
->com
.cm_id
, ep
->hwtid
);
1244 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1245 deref_cm_id(&ep
->com
);
1246 set_bit(ABORT_UPCALL
, &ep
->com
.history
);
1250 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
1252 struct iw_cm_event event
;
1254 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
1255 memset(&event
, 0, sizeof(event
));
1256 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
1257 event
.status
= status
;
1258 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1259 sizeof(ep
->com
.local_addr
));
1260 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1261 sizeof(ep
->com
.remote_addr
));
1263 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
1264 if (!ep
->tried_with_mpa_v1
) {
1265 /* this means MPA_v2 is used */
1266 event
.ord
= ep
->ird
;
1267 event
.ird
= ep
->ord
;
1268 event
.private_data_len
= ep
->plen
-
1269 sizeof(struct mpa_v2_conn_params
);
1270 event
.private_data
= ep
->mpa_pkt
+
1271 sizeof(struct mpa_message
) +
1272 sizeof(struct mpa_v2_conn_params
);
1274 /* this means MPA_v1 is used */
1275 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1276 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1277 event
.private_data_len
= ep
->plen
;
1278 event
.private_data
= ep
->mpa_pkt
+
1279 sizeof(struct mpa_message
);
1283 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
1285 set_bit(CONN_RPL_UPCALL
, &ep
->com
.history
);
1286 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1289 deref_cm_id(&ep
->com
);
1292 static int connect_request_upcall(struct c4iw_ep
*ep
)
1294 struct iw_cm_event event
;
1297 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1298 memset(&event
, 0, sizeof(event
));
1299 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
1300 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1301 sizeof(ep
->com
.local_addr
));
1302 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1303 sizeof(ep
->com
.remote_addr
));
1304 event
.provider_data
= ep
;
1305 if (!ep
->tried_with_mpa_v1
) {
1306 /* this means MPA_v2 is used */
1307 event
.ord
= ep
->ord
;
1308 event
.ird
= ep
->ird
;
1309 event
.private_data_len
= ep
->plen
-
1310 sizeof(struct mpa_v2_conn_params
);
1311 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
1312 sizeof(struct mpa_v2_conn_params
);
1314 /* this means MPA_v1 is used. Send max supported */
1315 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1316 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1317 event
.private_data_len
= ep
->plen
;
1318 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
1320 c4iw_get_ep(&ep
->com
);
1321 ret
= ep
->parent_ep
->com
.cm_id
->event_handler(ep
->parent_ep
->com
.cm_id
,
1324 c4iw_put_ep(&ep
->com
);
1325 set_bit(CONNREQ_UPCALL
, &ep
->com
.history
);
1326 c4iw_put_ep(&ep
->parent_ep
->com
);
1330 static void established_upcall(struct c4iw_ep
*ep
)
1332 struct iw_cm_event event
;
1334 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1335 memset(&event
, 0, sizeof(event
));
1336 event
.event
= IW_CM_EVENT_ESTABLISHED
;
1337 event
.ird
= ep
->ord
;
1338 event
.ord
= ep
->ird
;
1339 if (ep
->com
.cm_id
) {
1340 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1341 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1342 set_bit(ESTAB_UPCALL
, &ep
->com
.history
);
1346 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
1348 struct cpl_rx_data_ack
*req
;
1349 struct sk_buff
*skb
;
1350 int wrlen
= roundup(sizeof *req
, 16);
1352 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1353 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1355 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1360 * If we couldn't specify the entire rcv window at connection setup
1361 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1362 * then add the overage in to the credits returned.
1364 if (ep
->rcv_win
> RCV_BUFSIZ_M
* 1024)
1365 credits
+= ep
->rcv_win
- RCV_BUFSIZ_M
* 1024;
1367 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1368 memset(req
, 0, wrlen
);
1369 INIT_TP_WR(req
, ep
->hwtid
);
1370 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1372 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK_F
|
1374 RX_DACK_MODE_V(dack_mode
));
1375 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1376 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1380 #define RELAXED_IRD_NEGOTIATION 1
1383 * process_mpa_reply - process streaming mode MPA reply
1387 * 0 upon success indicating a connect request was delivered to the ULP
1388 * or the mpa request is incomplete but valid so far.
1390 * 1 if a failure requires the caller to close the connection.
1392 * 2 if a failure requires the caller to abort the connection.
1394 static int process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1396 struct mpa_message
*mpa
;
1397 struct mpa_v2_conn_params
*mpa_v2_params
;
1399 u16 resp_ird
, resp_ord
;
1400 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1401 struct c4iw_qp_attributes attrs
;
1402 enum c4iw_qp_attr_mask mask
;
1406 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1409 * If we get more than the supported amount of private data
1410 * then we must fail this connection.
1412 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1414 goto err_stop_timer
;
1418 * copy the new data into our accumulation buffer.
1420 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1422 ep
->mpa_pkt_len
+= skb
->len
;
1425 * if we don't even have the mpa message, then bail.
1427 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1429 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1431 /* Validate MPA header. */
1432 if (mpa
->revision
> mpa_rev
) {
1433 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1434 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1436 goto err_stop_timer
;
1438 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1440 goto err_stop_timer
;
1443 plen
= ntohs(mpa
->private_data_size
);
1446 * Fail if there's too much private data.
1448 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1450 goto err_stop_timer
;
1454 * If plen does not account for pkt size
1456 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1458 goto err_stop_timer
;
1461 ep
->plen
= (u8
) plen
;
1464 * If we don't have all the pdata yet, then bail.
1465 * We'll continue process when more data arrives.
1467 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1470 if (mpa
->flags
& MPA_REJECT
) {
1471 err
= -ECONNREFUSED
;
1472 goto err_stop_timer
;
1476 * Stop mpa timer. If it expired, then
1477 * we ignore the MPA reply. process_timeout()
1478 * will abort the connection.
1480 if (stop_ep_timer(ep
))
1484 * If we get here we have accumulated the entire mpa
1485 * start reply message including private data. And
1486 * the MPA header is valid.
1488 __state_set(&ep
->com
, FPDU_MODE
);
1489 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1490 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1491 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1492 ep
->mpa_attr
.version
= mpa
->revision
;
1493 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1495 if (mpa
->revision
== 2) {
1496 ep
->mpa_attr
.enhanced_rdma_conn
=
1497 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1498 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1499 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1500 (ep
->mpa_pkt
+ sizeof(*mpa
));
1501 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1502 MPA_V2_IRD_ORD_MASK
;
1503 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1504 MPA_V2_IRD_ORD_MASK
;
1505 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1506 __func__
, resp_ird
, resp_ord
, ep
->ird
, ep
->ord
);
1509 * This is a double-check. Ideally, below checks are
1510 * not required since ird/ord stuff has been taken
1511 * care of in c4iw_accept_cr
1513 if (ep
->ird
< resp_ord
) {
1514 if (RELAXED_IRD_NEGOTIATION
&& resp_ord
<=
1515 ep
->com
.dev
->rdev
.lldi
.max_ordird_qp
)
1519 } else if (ep
->ird
> resp_ord
) {
1522 if (ep
->ord
> resp_ird
) {
1523 if (RELAXED_IRD_NEGOTIATION
)
1534 if (ntohs(mpa_v2_params
->ird
) &
1535 MPA_V2_PEER2PEER_MODEL
) {
1536 if (ntohs(mpa_v2_params
->ord
) &
1537 MPA_V2_RDMA_WRITE_RTR
)
1538 ep
->mpa_attr
.p2p_type
=
1539 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1540 else if (ntohs(mpa_v2_params
->ord
) &
1541 MPA_V2_RDMA_READ_RTR
)
1542 ep
->mpa_attr
.p2p_type
=
1543 FW_RI_INIT_P2PTYPE_READ_REQ
;
1546 } else if (mpa
->revision
== 1)
1548 ep
->mpa_attr
.p2p_type
= p2p_type
;
1550 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1551 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1552 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1553 ep
->mpa_attr
.recv_marker_enabled
,
1554 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1555 ep
->mpa_attr
.p2p_type
, p2p_type
);
1558 * If responder's RTR does not match with that of initiator, assign
1559 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1560 * generated when moving QP to RTS state.
1561 * A TERM message will be sent after QP has moved to RTS state
1563 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1564 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1565 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1569 attrs
.mpa_attr
= ep
->mpa_attr
;
1570 attrs
.max_ird
= ep
->ird
;
1571 attrs
.max_ord
= ep
->ord
;
1572 attrs
.llp_stream_handle
= ep
;
1573 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1575 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1576 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1577 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1579 /* bind QP and TID with INIT_WR */
1580 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1581 ep
->com
.qp
, mask
, &attrs
, 1);
1586 * If responder's RTR requirement did not match with what initiator
1587 * supports, generate TERM message
1590 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1591 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1592 attrs
.ecode
= MPA_NOMATCH_RTR
;
1593 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1594 attrs
.send_term
= 1;
1595 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1596 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1603 * Generate TERM if initiator IRD is not sufficient for responder
1604 * provided ORD. Currently, we do the same behaviour even when
1605 * responder provided IRD is also not sufficient as regards to
1609 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1611 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1612 attrs
.ecode
= MPA_INSUFF_IRD
;
1613 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1614 attrs
.send_term
= 1;
1615 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1616 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1627 connect_reply_upcall(ep
, err
);
1632 * process_mpa_request - process streaming mode MPA request
1636 * 0 upon success indicating a connect request was delivered to the ULP
1637 * or the mpa request is incomplete but valid so far.
1639 * 1 if a failure requires the caller to close the connection.
1641 * 2 if a failure requires the caller to abort the connection.
1643 static int process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1645 struct mpa_message
*mpa
;
1646 struct mpa_v2_conn_params
*mpa_v2_params
;
1649 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1652 * If we get more than the supported amount of private data
1653 * then we must fail this connection.
1655 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
))
1656 goto err_stop_timer
;
1658 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1661 * Copy the new data into our accumulation buffer.
1663 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1665 ep
->mpa_pkt_len
+= skb
->len
;
1668 * If we don't even have the mpa message, then bail.
1669 * We'll continue process when more data arrives.
1671 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1674 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1675 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1678 * Validate MPA Header.
1680 if (mpa
->revision
> mpa_rev
) {
1681 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1682 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1683 goto err_stop_timer
;
1686 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
)))
1687 goto err_stop_timer
;
1689 plen
= ntohs(mpa
->private_data_size
);
1692 * Fail if there's too much private data.
1694 if (plen
> MPA_MAX_PRIVATE_DATA
)
1695 goto err_stop_timer
;
1698 * If plen does not account for pkt size
1700 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
))
1701 goto err_stop_timer
;
1702 ep
->plen
= (u8
) plen
;
1705 * If we don't have all the pdata yet, then bail.
1707 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1711 * If we get here we have accumulated the entire mpa
1712 * start reply message including private data.
1714 ep
->mpa_attr
.initiator
= 0;
1715 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1716 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1717 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1718 ep
->mpa_attr
.version
= mpa
->revision
;
1719 if (mpa
->revision
== 1)
1720 ep
->tried_with_mpa_v1
= 1;
1721 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1723 if (mpa
->revision
== 2) {
1724 ep
->mpa_attr
.enhanced_rdma_conn
=
1725 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1726 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1727 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1728 (ep
->mpa_pkt
+ sizeof(*mpa
));
1729 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1730 MPA_V2_IRD_ORD_MASK
;
1731 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1732 MPA_V2_IRD_ORD_MASK
;
1733 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
1735 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1737 if (ntohs(mpa_v2_params
->ord
) &
1738 MPA_V2_RDMA_WRITE_RTR
)
1739 ep
->mpa_attr
.p2p_type
=
1740 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1741 else if (ntohs(mpa_v2_params
->ord
) &
1742 MPA_V2_RDMA_READ_RTR
)
1743 ep
->mpa_attr
.p2p_type
=
1744 FW_RI_INIT_P2PTYPE_READ_REQ
;
1747 } else if (mpa
->revision
== 1)
1749 ep
->mpa_attr
.p2p_type
= p2p_type
;
1751 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1752 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1753 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1754 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1755 ep
->mpa_attr
.p2p_type
);
1757 __state_set(&ep
->com
, MPA_REQ_RCVD
);
1760 mutex_lock_nested(&ep
->parent_ep
->com
.mutex
, SINGLE_DEPTH_NESTING
);
1761 if (ep
->parent_ep
->com
.state
!= DEAD
) {
1762 if (connect_request_upcall(ep
))
1763 goto err_unlock_parent
;
1765 goto err_unlock_parent
;
1767 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1771 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1774 (void)stop_ep_timer(ep
);
1779 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1782 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1783 unsigned int dlen
= ntohs(hdr
->len
);
1784 unsigned int tid
= GET_TID(hdr
);
1785 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1786 __u8 status
= hdr
->status
;
1789 ep
= lookup_tid(t
, tid
);
1792 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1793 skb_pull(skb
, sizeof(*hdr
));
1794 skb_trim(skb
, dlen
);
1795 mutex_lock(&ep
->com
.mutex
);
1797 /* update RX credits */
1798 update_rx_credits(ep
, dlen
);
1800 switch (ep
->com
.state
) {
1802 ep
->rcv_seq
+= dlen
;
1803 disconnect
= process_mpa_reply(ep
, skb
);
1806 ep
->rcv_seq
+= dlen
;
1807 process_mpa_request(ep
, skb
);
1810 struct c4iw_qp_attributes attrs
;
1811 BUG_ON(!ep
->com
.qp
);
1813 pr_err("%s Unexpected streaming data." \
1814 " qpid %u ep %p state %d tid %u status %d\n",
1815 __func__
, ep
->com
.qp
->wq
.sq
.qid
, ep
,
1816 ep
->com
.state
, ep
->hwtid
, status
);
1817 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1818 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1819 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1826 mutex_unlock(&ep
->com
.mutex
);
1828 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1832 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1835 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1837 unsigned int tid
= GET_TID(rpl
);
1838 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1840 ep
= lookup_tid(t
, tid
);
1842 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1845 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1846 mutex_lock(&ep
->com
.mutex
);
1847 switch (ep
->com
.state
) {
1849 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1850 __state_set(&ep
->com
, DEAD
);
1854 printk(KERN_ERR
"%s ep %p state %d\n",
1855 __func__
, ep
, ep
->com
.state
);
1858 mutex_unlock(&ep
->com
.mutex
);
1861 release_ep_resources(ep
);
1865 static int send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1867 struct sk_buff
*skb
;
1868 struct fw_ofld_connection_wr
*req
;
1869 unsigned int mtu_idx
;
1871 struct sockaddr_in
*sin
;
1874 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1875 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1876 memset(req
, 0, sizeof(*req
));
1877 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
));
1878 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
1879 req
->le
.filter
= cpu_to_be32(cxgb4_select_ntuple(
1880 ep
->com
.dev
->rdev
.lldi
.ports
[0],
1882 sin
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
1883 req
->le
.lport
= sin
->sin_port
;
1884 req
->le
.u
.ipv4
.lip
= sin
->sin_addr
.s_addr
;
1885 sin
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
1886 req
->le
.pport
= sin
->sin_port
;
1887 req
->le
.u
.ipv4
.pip
= sin
->sin_addr
.s_addr
;
1888 req
->tcb
.t_state_to_astid
=
1889 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT
) |
1890 FW_OFLD_CONNECTION_WR_ASTID_V(atid
));
1891 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1892 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F
);
1893 req
->tcb
.tx_max
= (__force __be32
) jiffies
;
1894 req
->tcb
.rcv_adv
= htons(1);
1895 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
1896 enable_tcp_timestamps
,
1897 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
1898 wscale
= compute_wscale(rcv_win
);
1901 * Specify the largest window that will fit in opt0. The
1902 * remainder will be specified in the rx_data_ack.
1904 win
= ep
->rcv_win
>> 10;
1905 if (win
> RCV_BUFSIZ_M
)
1908 req
->tcb
.opt0
= (__force __be64
) (TCAM_BYPASS_F
|
1909 (nocong
? NO_CONG_F
: 0) |
1912 WND_SCALE_V(wscale
) |
1913 MSS_IDX_V(mtu_idx
) |
1914 L2T_IDX_V(ep
->l2t
->idx
) |
1915 TX_CHAN_V(ep
->tx_chan
) |
1916 SMAC_SEL_V(ep
->smac_idx
) |
1917 DSCP_V(ep
->tos
>> 2) |
1918 ULP_MODE_V(ULP_MODE_TCPDDP
) |
1920 req
->tcb
.opt2
= (__force __be32
) (PACE_V(1) |
1921 TX_QUEUE_V(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1923 CCTRL_ECN_V(enable_ecn
) |
1924 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
));
1925 if (enable_tcp_timestamps
)
1926 req
->tcb
.opt2
|= (__force __be32
)TSTAMPS_EN_F
;
1927 if (enable_tcp_sack
)
1928 req
->tcb
.opt2
|= (__force __be32
)SACK_EN_F
;
1929 if (wscale
&& enable_tcp_window_scaling
)
1930 req
->tcb
.opt2
|= (__force __be32
)WND_SCALE_EN_F
;
1931 req
->tcb
.opt0
= cpu_to_be64((__force u64
)req
->tcb
.opt0
);
1932 req
->tcb
.opt2
= cpu_to_be32((__force u32
)req
->tcb
.opt2
);
1933 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, ep
->ctrlq_idx
);
1934 set_bit(ACT_OFLD_CONN
, &ep
->com
.history
);
1935 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1939 * Return whether a failed active open has allocated a TID
1941 static inline int act_open_has_tid(int status
)
1943 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1944 status
!= CPL_ERR_ARP_MISS
;
1947 /* Returns whether a CPL status conveys negative advice.
1949 static int is_neg_adv(unsigned int status
)
1951 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1952 status
== CPL_ERR_PERSIST_NEG_ADVICE
||
1953 status
== CPL_ERR_KEEPALV_NEG_ADVICE
;
1956 static char *neg_adv_str(unsigned int status
)
1959 case CPL_ERR_RTX_NEG_ADVICE
:
1960 return "Retransmit timeout";
1961 case CPL_ERR_PERSIST_NEG_ADVICE
:
1962 return "Persist timeout";
1963 case CPL_ERR_KEEPALV_NEG_ADVICE
:
1964 return "Keepalive timeout";
1970 static void set_tcp_window(struct c4iw_ep
*ep
, struct port_info
*pi
)
1972 ep
->snd_win
= snd_win
;
1973 ep
->rcv_win
= rcv_win
;
1974 PDBG("%s snd_win %d rcv_win %d\n", __func__
, ep
->snd_win
, ep
->rcv_win
);
1977 #define ACT_OPEN_RETRY_COUNT 2
1979 static int import_ep(struct c4iw_ep
*ep
, int iptype
, __u8
*peer_ip
,
1980 struct dst_entry
*dst
, struct c4iw_dev
*cdev
,
1981 bool clear_mpa_v1
, enum chip_type adapter_type
, u8 tos
)
1983 struct neighbour
*n
;
1985 struct net_device
*pdev
;
1987 n
= dst_neigh_lookup(dst
, peer_ip
);
1993 if (n
->dev
->flags
& IFF_LOOPBACK
) {
1995 pdev
= ip_dev_find(&init_net
, *(__be32
*)peer_ip
);
1996 else if (IS_ENABLED(CONFIG_IPV6
))
1997 for_each_netdev(&init_net
, pdev
) {
1998 if (ipv6_chk_addr(&init_net
,
1999 (struct in6_addr
*)peer_ip
,
2010 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
2011 n
, pdev
, rt_tos2priority(tos
));
2014 ep
->mtu
= pdev
->mtu
;
2015 ep
->tx_chan
= cxgb4_port_chan(pdev
);
2016 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
2017 cxgb4_port_viid(pdev
));
2018 step
= cdev
->rdev
.lldi
.ntxq
/
2019 cdev
->rdev
.lldi
.nchan
;
2020 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
2021 step
= cdev
->rdev
.lldi
.nrxq
/
2022 cdev
->rdev
.lldi
.nchan
;
2023 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
2024 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
2025 cxgb4_port_idx(pdev
) * step
];
2026 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
2029 pdev
= get_real_dev(n
->dev
);
2030 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
2034 ep
->mtu
= dst_mtu(dst
);
2035 ep
->tx_chan
= cxgb4_port_chan(pdev
);
2036 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
2037 cxgb4_port_viid(pdev
));
2038 step
= cdev
->rdev
.lldi
.ntxq
/
2039 cdev
->rdev
.lldi
.nchan
;
2040 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
2041 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
2042 step
= cdev
->rdev
.lldi
.nrxq
/
2043 cdev
->rdev
.lldi
.nchan
;
2044 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
2045 cxgb4_port_idx(pdev
) * step
];
2046 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
2049 ep
->retry_with_mpa_v1
= 0;
2050 ep
->tried_with_mpa_v1
= 0;
2062 static int c4iw_reconnect(struct c4iw_ep
*ep
)
2065 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)
2066 &ep
->com
.cm_id
->m_local_addr
;
2067 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)
2068 &ep
->com
.cm_id
->m_remote_addr
;
2069 struct sockaddr_in6
*laddr6
= (struct sockaddr_in6
*)
2070 &ep
->com
.cm_id
->m_local_addr
;
2071 struct sockaddr_in6
*raddr6
= (struct sockaddr_in6
*)
2072 &ep
->com
.cm_id
->m_remote_addr
;
2076 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
2077 init_timer(&ep
->timer
);
2080 * Allocate an active TID to initiate a TCP connection.
2082 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
2083 if (ep
->atid
== -1) {
2084 pr_err("%s - cannot alloc atid.\n", __func__
);
2088 insert_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
, ep
->atid
);
2091 if (ep
->com
.cm_id
->m_local_addr
.ss_family
== AF_INET
) {
2092 ep
->dst
= find_route(ep
->com
.dev
, laddr
->sin_addr
.s_addr
,
2093 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
2094 raddr
->sin_port
, ep
->com
.cm_id
->tos
);
2096 ra
= (__u8
*)&raddr
->sin_addr
;
2098 ep
->dst
= find_route6(ep
->com
.dev
, laddr6
->sin6_addr
.s6_addr
,
2099 raddr6
->sin6_addr
.s6_addr
,
2100 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
2101 raddr6
->sin6_scope_id
);
2103 ra
= (__u8
*)&raddr6
->sin6_addr
;
2106 pr_err("%s - cannot find route.\n", __func__
);
2107 err
= -EHOSTUNREACH
;
2110 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, false,
2111 ep
->com
.dev
->rdev
.lldi
.adapter_type
,
2112 ep
->com
.cm_id
->tos
);
2114 pr_err("%s - cannot alloc l2e.\n", __func__
);
2118 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2119 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2122 state_set(&ep
->com
, CONNECTING
);
2123 ep
->tos
= ep
->com
.cm_id
->tos
;
2125 /* send connect request to rnic */
2126 err
= send_connect(ep
);
2130 cxgb4_l2t_release(ep
->l2t
);
2132 dst_release(ep
->dst
);
2134 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
2135 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2138 * remember to send notification to upper layer.
2139 * We are in here so the upper layer is not aware that this is
2140 * re-connect attempt and so, upper layer is still waiting for
2141 * response of 1st connect request.
2143 connect_reply_upcall(ep
, -ECONNRESET
);
2144 c4iw_put_ep(&ep
->com
);
2149 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2152 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
2153 unsigned int atid
= TID_TID_G(AOPEN_ATID_G(
2154 ntohl(rpl
->atid_status
)));
2155 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2156 int status
= AOPEN_STATUS_G(ntohl(rpl
->atid_status
));
2157 struct sockaddr_in
*la
;
2158 struct sockaddr_in
*ra
;
2159 struct sockaddr_in6
*la6
;
2160 struct sockaddr_in6
*ra6
;
2163 ep
= lookup_atid(t
, atid
);
2164 la
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
2165 ra
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
2166 la6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
2167 ra6
= (struct sockaddr_in6
*)&ep
->com
.remote_addr
;
2169 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
2170 status
, status2errno(status
));
2172 if (is_neg_adv(status
)) {
2173 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2174 __func__
, atid
, status
, neg_adv_str(status
));
2175 ep
->stats
.connect_neg_adv
++;
2176 mutex_lock(&dev
->rdev
.stats
.lock
);
2177 dev
->rdev
.stats
.neg_adv
++;
2178 mutex_unlock(&dev
->rdev
.stats
.lock
);
2182 set_bit(ACT_OPEN_RPL
, &ep
->com
.history
);
2185 * Log interesting failures.
2188 case CPL_ERR_CONN_RESET
:
2189 case CPL_ERR_CONN_TIMEDOUT
:
2191 case CPL_ERR_TCAM_FULL
:
2192 mutex_lock(&dev
->rdev
.stats
.lock
);
2193 dev
->rdev
.stats
.tcam_full
++;
2194 mutex_unlock(&dev
->rdev
.stats
.lock
);
2195 if (ep
->com
.local_addr
.ss_family
== AF_INET
&&
2196 dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2197 ret
= send_fw_act_open_req(ep
, TID_TID_G(AOPEN_ATID_G(
2198 ntohl(rpl
->atid_status
))));
2204 case CPL_ERR_CONN_EXIST
:
2205 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
2206 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
2207 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2208 struct sockaddr_in6
*sin6
=
2209 (struct sockaddr_in6
*)
2210 &ep
->com
.local_addr
;
2212 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2214 &sin6
->sin6_addr
.s6_addr
, 1);
2216 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
,
2218 cxgb4_free_atid(t
, atid
);
2219 dst_release(ep
->dst
);
2220 cxgb4_l2t_release(ep
->l2t
);
2226 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
2227 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2228 atid
, status
, status2errno(status
),
2229 &la
->sin_addr
.s_addr
, ntohs(la
->sin_port
),
2230 &ra
->sin_addr
.s_addr
, ntohs(ra
->sin_port
));
2232 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2233 atid
, status
, status2errno(status
),
2234 la6
->sin6_addr
.s6_addr
, ntohs(la6
->sin6_port
),
2235 ra6
->sin6_addr
.s6_addr
, ntohs(ra6
->sin6_port
));
2241 connect_reply_upcall(ep
, status2errno(status
));
2242 state_set(&ep
->com
, DEAD
);
2244 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2245 struct sockaddr_in6
*sin6
=
2246 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
2247 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2248 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2250 if (status
&& act_open_has_tid(status
))
2251 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
2253 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
2254 cxgb4_free_atid(t
, atid
);
2255 dst_release(ep
->dst
);
2256 cxgb4_l2t_release(ep
->l2t
);
2257 c4iw_put_ep(&ep
->com
);
2262 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2264 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
2265 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2266 unsigned int stid
= GET_TID(rpl
);
2267 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
2270 PDBG("%s stid %d lookup failure!\n", __func__
, stid
);
2273 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
2274 rpl
->status
, status2errno(rpl
->status
));
2275 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2281 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2283 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
2284 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2285 unsigned int stid
= GET_TID(rpl
);
2286 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
2288 PDBG("%s ep %p\n", __func__
, ep
);
2289 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2293 static int accept_cr(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
2294 struct cpl_pass_accept_req
*req
)
2296 struct cpl_pass_accept_rpl
*rpl
;
2297 unsigned int mtu_idx
;
2301 struct cpl_t5_pass_accept_rpl
*rpl5
= NULL
;
2303 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
2305 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2306 BUG_ON(skb_cloned(skb
));
2310 if (!is_t4(adapter_type
)) {
2311 skb_trim(skb
, roundup(sizeof(*rpl5
), 16));
2313 INIT_TP_WR(rpl5
, ep
->hwtid
);
2315 skb_trim(skb
, sizeof(*rpl
));
2316 INIT_TP_WR(rpl
, ep
->hwtid
);
2318 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
2321 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
2322 enable_tcp_timestamps
&& req
->tcpopt
.tstamp
,
2323 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
2324 wscale
= compute_wscale(rcv_win
);
2327 * Specify the largest window that will fit in opt0. The
2328 * remainder will be specified in the rx_data_ack.
2330 win
= ep
->rcv_win
>> 10;
2331 if (win
> RCV_BUFSIZ_M
)
2333 opt0
= (nocong
? NO_CONG_F
: 0) |
2336 WND_SCALE_V(wscale
) |
2337 MSS_IDX_V(mtu_idx
) |
2338 L2T_IDX_V(ep
->l2t
->idx
) |
2339 TX_CHAN_V(ep
->tx_chan
) |
2340 SMAC_SEL_V(ep
->smac_idx
) |
2341 DSCP_V(ep
->tos
>> 2) |
2342 ULP_MODE_V(ULP_MODE_TCPDDP
) |
2344 opt2
= RX_CHANNEL_V(0) |
2345 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
2347 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
2348 opt2
|= TSTAMPS_EN_F
;
2349 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
2351 if (wscale
&& enable_tcp_window_scaling
)
2352 opt2
|= WND_SCALE_EN_F
;
2354 const struct tcphdr
*tcph
;
2355 u32 hlen
= ntohl(req
->hdr_len
);
2357 if (CHELSIO_CHIP_VERSION(adapter_type
) <= CHELSIO_T5
)
2358 tcph
= (const void *)(req
+ 1) + ETH_HDR_LEN_G(hlen
) +
2361 tcph
= (const void *)(req
+ 1) +
2362 T6_ETH_HDR_LEN_G(hlen
) + T6_IP_HDR_LEN_G(hlen
);
2363 if (tcph
->ece
&& tcph
->cwr
)
2364 opt2
|= CCTRL_ECN_V(1);
2366 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
2367 u32 isn
= (prandom_u32() & ~7UL) - 1;
2368 opt2
|= T5_OPT_2_VALID_F
;
2369 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
2372 memset(&rpl5
->iss
, 0, roundup(sizeof(*rpl5
)-sizeof(*rpl
), 16));
2375 rpl5
->iss
= cpu_to_be32(isn
);
2376 PDBG("%s iss %u\n", __func__
, be32_to_cpu(rpl5
->iss
));
2379 rpl
->opt0
= cpu_to_be64(opt0
);
2380 rpl
->opt2
= cpu_to_be32(opt2
);
2381 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
2382 t4_set_arp_err_handler(skb
, ep
, pass_accept_rpl_arp_failure
);
2384 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
2387 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, struct sk_buff
*skb
)
2389 PDBG("%s c4iw_dev %p tid %u\n", __func__
, dev
, hwtid
);
2390 BUG_ON(skb_cloned(skb
));
2391 skb_trim(skb
, sizeof(struct cpl_tid_release
));
2392 release_tid(&dev
->rdev
, hwtid
, skb
);
2396 static void get_4tuple(struct cpl_pass_accept_req
*req
, enum chip_type type
,
2397 int *iptype
, __u8
*local_ip
, __u8
*peer_ip
,
2398 __be16
*local_port
, __be16
*peer_port
)
2400 int eth_len
= (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) ?
2401 ETH_HDR_LEN_G(be32_to_cpu(req
->hdr_len
)) :
2402 T6_ETH_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2403 int ip_len
= (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) ?
2404 IP_HDR_LEN_G(be32_to_cpu(req
->hdr_len
)) :
2405 T6_IP_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2406 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
2407 struct ipv6hdr
*ip6
= (struct ipv6hdr
*)((u8
*)(req
+ 1) + eth_len
);
2408 struct tcphdr
*tcp
= (struct tcphdr
*)
2409 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
2411 if (ip
->version
== 4) {
2412 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
2413 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
2416 memcpy(peer_ip
, &ip
->saddr
, 4);
2417 memcpy(local_ip
, &ip
->daddr
, 4);
2419 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__
,
2420 ip6
->saddr
.s6_addr
, ip6
->daddr
.s6_addr
, ntohs(tcp
->source
),
2423 memcpy(peer_ip
, ip6
->saddr
.s6_addr
, 16);
2424 memcpy(local_ip
, ip6
->daddr
.s6_addr
, 16);
2426 *peer_port
= tcp
->source
;
2427 *local_port
= tcp
->dest
;
2432 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2434 struct c4iw_ep
*child_ep
= NULL
, *parent_ep
;
2435 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
2436 unsigned int stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
2437 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2438 unsigned int hwtid
= GET_TID(req
);
2439 struct dst_entry
*dst
;
2440 __u8 local_ip
[16], peer_ip
[16];
2441 __be16 local_port
, peer_port
;
2442 struct sockaddr_in6
*sin6
;
2444 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
2446 unsigned short hdrs
;
2447 u8 tos
= PASS_OPEN_TOS_G(ntohl(req
->tos_stid
));
2449 parent_ep
= lookup_stid(t
, stid
);
2451 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
2455 if (state_read(&parent_ep
->com
) != LISTEN
) {
2456 PDBG("%s - listening ep not in LISTEN\n", __func__
);
2460 get_4tuple(req
, parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
, &iptype
,
2461 local_ip
, peer_ip
, &local_port
, &peer_port
);
2463 /* Find output route */
2465 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2466 , __func__
, parent_ep
, hwtid
,
2467 local_ip
, peer_ip
, ntohs(local_port
),
2468 ntohs(peer_port
), peer_mss
);
2469 dst
= find_route(dev
, *(__be32
*)local_ip
, *(__be32
*)peer_ip
,
2470 local_port
, peer_port
,
2473 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2474 , __func__
, parent_ep
, hwtid
,
2475 local_ip
, peer_ip
, ntohs(local_port
),
2476 ntohs(peer_port
), peer_mss
);
2477 dst
= find_route6(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
2478 PASS_OPEN_TOS_G(ntohl(req
->tos_stid
)),
2479 ((struct sockaddr_in6
*)
2480 &parent_ep
->com
.local_addr
)->sin6_scope_id
);
2483 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
2488 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
2490 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
2496 err
= import_ep(child_ep
, iptype
, peer_ip
, dst
, dev
, false,
2497 parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
, tos
);
2499 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
2506 hdrs
= sizeof(struct iphdr
) + sizeof(struct tcphdr
) +
2507 ((enable_tcp_timestamps
&& req
->tcpopt
.tstamp
) ? 12 : 0);
2508 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ hdrs
))
2509 child_ep
->mtu
= peer_mss
+ hdrs
;
2511 state_set(&child_ep
->com
, CONNECTING
);
2512 child_ep
->com
.dev
= dev
;
2513 child_ep
->com
.cm_id
= NULL
;
2516 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
2517 &child_ep
->com
.local_addr
;
2519 sin
->sin_family
= PF_INET
;
2520 sin
->sin_port
= local_port
;
2521 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2523 sin
= (struct sockaddr_in
*)&child_ep
->com
.local_addr
;
2524 sin
->sin_family
= PF_INET
;
2525 sin
->sin_port
= ((struct sockaddr_in
*)
2526 &parent_ep
->com
.local_addr
)->sin_port
;
2527 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2529 sin
= (struct sockaddr_in
*)&child_ep
->com
.remote_addr
;
2530 sin
->sin_family
= PF_INET
;
2531 sin
->sin_port
= peer_port
;
2532 sin
->sin_addr
.s_addr
= *(__be32
*)peer_ip
;
2534 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2535 sin6
->sin6_family
= PF_INET6
;
2536 sin6
->sin6_port
= local_port
;
2537 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2539 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2540 sin6
->sin6_family
= PF_INET6
;
2541 sin6
->sin6_port
= ((struct sockaddr_in6
*)
2542 &parent_ep
->com
.local_addr
)->sin6_port
;
2543 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2545 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.remote_addr
;
2546 sin6
->sin6_family
= PF_INET6
;
2547 sin6
->sin6_port
= peer_port
;
2548 memcpy(sin6
->sin6_addr
.s6_addr
, peer_ip
, 16);
2551 c4iw_get_ep(&parent_ep
->com
);
2552 child_ep
->parent_ep
= parent_ep
;
2553 child_ep
->tos
= tos
;
2554 child_ep
->dst
= dst
;
2555 child_ep
->hwtid
= hwtid
;
2557 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
2558 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
2560 init_timer(&child_ep
->timer
);
2561 cxgb4_insert_tid(t
, child_ep
, hwtid
);
2562 insert_handle(dev
, &dev
->hwtid_idr
, child_ep
, child_ep
->hwtid
);
2563 if (accept_cr(child_ep
, skb
, req
)) {
2564 c4iw_put_ep(&parent_ep
->com
);
2565 release_ep_resources(child_ep
);
2567 set_bit(PASS_ACCEPT_REQ
, &child_ep
->com
.history
);
2570 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2571 cxgb4_clip_get(child_ep
->com
.dev
->rdev
.lldi
.ports
[0],
2572 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2576 reject_cr(dev
, hwtid
, skb
);
2581 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2584 struct cpl_pass_establish
*req
= cplhdr(skb
);
2585 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2586 unsigned int tid
= GET_TID(req
);
2589 ep
= lookup_tid(t
, tid
);
2590 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2591 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
2592 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
2594 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
2595 ntohs(req
->tcp_opt
));
2597 set_emss(ep
, ntohs(req
->tcp_opt
));
2599 dst_confirm(ep
->dst
);
2600 mutex_lock(&ep
->com
.mutex
);
2601 ep
->com
.state
= MPA_REQ_WAIT
;
2603 set_bit(PASS_ESTAB
, &ep
->com
.history
);
2604 ret
= send_flowc(ep
, skb
);
2605 mutex_unlock(&ep
->com
.mutex
);
2607 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
2612 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2614 struct cpl_peer_close
*hdr
= cplhdr(skb
);
2616 struct c4iw_qp_attributes attrs
;
2619 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2620 unsigned int tid
= GET_TID(hdr
);
2623 ep
= lookup_tid(t
, tid
);
2624 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2625 dst_confirm(ep
->dst
);
2627 set_bit(PEER_CLOSE
, &ep
->com
.history
);
2628 mutex_lock(&ep
->com
.mutex
);
2629 switch (ep
->com
.state
) {
2631 __state_set(&ep
->com
, CLOSING
);
2634 __state_set(&ep
->com
, CLOSING
);
2635 connect_reply_upcall(ep
, -ECONNRESET
);
2640 * We're gonna mark this puppy DEAD, but keep
2641 * the reference on it until the ULP accepts or
2642 * rejects the CR. Also wake up anyone waiting
2643 * in rdma connection migration (see c4iw_accept_cr()).
2645 __state_set(&ep
->com
, CLOSING
);
2646 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2647 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2650 __state_set(&ep
->com
, CLOSING
);
2651 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2652 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2656 __state_set(&ep
->com
, CLOSING
);
2657 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
2658 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2659 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2660 if (ret
!= -ECONNRESET
) {
2661 peer_close_upcall(ep
);
2669 __state_set(&ep
->com
, MORIBUND
);
2673 (void)stop_ep_timer(ep
);
2674 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2675 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2676 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2677 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2679 close_complete_upcall(ep
, 0);
2680 __state_set(&ep
->com
, DEAD
);
2690 mutex_unlock(&ep
->com
.mutex
);
2692 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2694 release_ep_resources(ep
);
2698 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2700 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2702 struct cpl_abort_rpl
*rpl
;
2703 struct sk_buff
*rpl_skb
;
2704 struct c4iw_qp_attributes attrs
;
2707 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2708 unsigned int tid
= GET_TID(req
);
2710 ep
= lookup_tid(t
, tid
);
2711 if (is_neg_adv(req
->status
)) {
2712 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2713 __func__
, ep
->hwtid
, req
->status
,
2714 neg_adv_str(req
->status
));
2715 ep
->stats
.abort_neg_adv
++;
2716 mutex_lock(&dev
->rdev
.stats
.lock
);
2717 dev
->rdev
.stats
.neg_adv
++;
2718 mutex_unlock(&dev
->rdev
.stats
.lock
);
2721 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2723 set_bit(PEER_ABORT
, &ep
->com
.history
);
2726 * Wake up any threads in rdma_init() or rdma_fini().
2727 * However, this is not needed if com state is just
2730 if (ep
->com
.state
!= MPA_REQ_SENT
)
2731 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2733 mutex_lock(&ep
->com
.mutex
);
2734 switch (ep
->com
.state
) {
2736 c4iw_put_ep(&ep
->parent_ep
->com
);
2739 (void)stop_ep_timer(ep
);
2742 (void)stop_ep_timer(ep
);
2743 if (mpa_rev
== 1 || (mpa_rev
== 2 && ep
->tried_with_mpa_v1
))
2744 connect_reply_upcall(ep
, -ECONNRESET
);
2747 * we just don't send notification upwards because we
2748 * want to retry with mpa_v1 without upper layers even
2751 * do some housekeeping so as to re-initiate the
2754 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2756 ep
->retry_with_mpa_v1
= 1;
2768 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2769 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2770 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2771 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2775 "%s - qp <- error failed!\n",
2778 peer_abort_upcall(ep
);
2783 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2784 mutex_unlock(&ep
->com
.mutex
);
2790 dst_confirm(ep
->dst
);
2791 if (ep
->com
.state
!= ABORTING
) {
2792 __state_set(&ep
->com
, DEAD
);
2793 /* we don't release if we want to retry with mpa_v1 */
2794 if (!ep
->retry_with_mpa_v1
)
2797 mutex_unlock(&ep
->com
.mutex
);
2799 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
2801 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
2806 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2807 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2808 INIT_TP_WR(rpl
, ep
->hwtid
);
2809 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2810 rpl
->cmd
= CPL_ABORT_NO_RST
;
2811 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2814 release_ep_resources(ep
);
2815 else if (ep
->retry_with_mpa_v1
) {
2816 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2817 struct sockaddr_in6
*sin6
=
2818 (struct sockaddr_in6
*)
2819 &ep
->com
.local_addr
;
2821 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2822 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
2825 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
2826 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2827 dst_release(ep
->dst
);
2828 cxgb4_l2t_release(ep
->l2t
);
2835 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2838 struct c4iw_qp_attributes attrs
;
2839 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2841 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2842 unsigned int tid
= GET_TID(rpl
);
2844 ep
= lookup_tid(t
, tid
);
2846 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2849 /* The cm_id may be null if we failed to connect */
2850 mutex_lock(&ep
->com
.mutex
);
2851 set_bit(CLOSE_CON_RPL
, &ep
->com
.history
);
2852 switch (ep
->com
.state
) {
2854 __state_set(&ep
->com
, MORIBUND
);
2857 (void)stop_ep_timer(ep
);
2858 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2859 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2860 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2862 C4IW_QP_ATTR_NEXT_STATE
,
2865 close_complete_upcall(ep
, 0);
2866 __state_set(&ep
->com
, DEAD
);
2876 mutex_unlock(&ep
->com
.mutex
);
2878 release_ep_resources(ep
);
2882 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2884 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2885 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2886 unsigned int tid
= GET_TID(rpl
);
2888 struct c4iw_qp_attributes attrs
;
2890 ep
= lookup_tid(t
, tid
);
2893 if (ep
&& ep
->com
.qp
) {
2894 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
2895 ep
->com
.qp
->wq
.sq
.qid
);
2896 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2897 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2898 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2900 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
2906 * Upcall from the adapter indicating data has been transmitted.
2907 * For us its just the single MPA request or reply. We can now free
2908 * the skb holding the mpa message.
2910 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2913 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2914 u8 credits
= hdr
->credits
;
2915 unsigned int tid
= GET_TID(hdr
);
2916 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2919 ep
= lookup_tid(t
, tid
);
2920 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
2922 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2923 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2927 dst_confirm(ep
->dst
);
2929 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2930 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
2931 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2932 kfree_skb(ep
->mpa_skb
);
2934 mutex_lock(&ep
->com
.mutex
);
2935 if (test_bit(STOP_MPA_TIMER
, &ep
->com
.flags
))
2937 mutex_unlock(&ep
->com
.mutex
);
2942 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2946 struct c4iw_ep
*ep
= to_ep(cm_id
);
2947 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2949 mutex_lock(&ep
->com
.mutex
);
2950 if (ep
->com
.state
== DEAD
) {
2951 mutex_unlock(&ep
->com
.mutex
);
2952 c4iw_put_ep(&ep
->com
);
2955 set_bit(ULP_REJECT
, &ep
->com
.history
);
2956 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2960 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
2963 mutex_unlock(&ep
->com
.mutex
);
2966 err
= c4iw_ep_disconnect(ep
, disconnect
== 2, GFP_KERNEL
);
2968 c4iw_put_ep(&ep
->com
);
2972 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2975 struct c4iw_qp_attributes attrs
;
2976 enum c4iw_qp_attr_mask mask
;
2977 struct c4iw_ep
*ep
= to_ep(cm_id
);
2978 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
2979 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
2982 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2984 mutex_lock(&ep
->com
.mutex
);
2985 if (ep
->com
.state
== DEAD
) {
2990 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2993 set_bit(ULP_ACCEPT
, &ep
->com
.history
);
2994 if ((conn_param
->ord
> cur_max_read_depth(ep
->com
.dev
)) ||
2995 (conn_param
->ird
> cur_max_read_depth(ep
->com
.dev
))) {
3000 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
3001 if (conn_param
->ord
> ep
->ird
) {
3002 if (RELAXED_IRD_NEGOTIATION
) {
3005 ep
->ird
= conn_param
->ird
;
3006 ep
->ord
= conn_param
->ord
;
3007 send_mpa_reject(ep
, conn_param
->private_data
,
3008 conn_param
->private_data_len
);
3013 if (conn_param
->ird
< ep
->ord
) {
3014 if (RELAXED_IRD_NEGOTIATION
&&
3015 ep
->ord
<= h
->rdev
.lldi
.max_ordird_qp
) {
3016 conn_param
->ird
= ep
->ord
;
3023 ep
->ird
= conn_param
->ird
;
3024 ep
->ord
= conn_param
->ord
;
3026 if (ep
->mpa_attr
.version
== 1) {
3027 if (peer2peer
&& ep
->ird
== 0)
3031 (ep
->mpa_attr
.p2p_type
!= FW_RI_INIT_P2PTYPE_DISABLED
) &&
3032 (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
) && ep
->ird
== 0)
3036 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
3038 ep
->com
.cm_id
= cm_id
;
3039 ref_cm_id(&ep
->com
);
3043 /* bind QP to EP and move to RTS */
3044 attrs
.mpa_attr
= ep
->mpa_attr
;
3045 attrs
.max_ird
= ep
->ird
;
3046 attrs
.max_ord
= ep
->ord
;
3047 attrs
.llp_stream_handle
= ep
;
3048 attrs
.next_state
= C4IW_QP_STATE_RTS
;
3050 /* bind QP and TID with INIT_WR */
3051 mask
= C4IW_QP_ATTR_NEXT_STATE
|
3052 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
3053 C4IW_QP_ATTR_MPA_ATTR
|
3054 C4IW_QP_ATTR_MAX_IRD
|
3055 C4IW_QP_ATTR_MAX_ORD
;
3057 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
3058 ep
->com
.qp
, mask
, &attrs
, 1);
3060 goto err_deref_cm_id
;
3062 set_bit(STOP_MPA_TIMER
, &ep
->com
.flags
);
3063 err
= send_mpa_reply(ep
, conn_param
->private_data
,
3064 conn_param
->private_data_len
);
3066 goto err_deref_cm_id
;
3068 __state_set(&ep
->com
, FPDU_MODE
);
3069 established_upcall(ep
);
3070 mutex_unlock(&ep
->com
.mutex
);
3071 c4iw_put_ep(&ep
->com
);
3074 deref_cm_id(&ep
->com
);
3078 mutex_unlock(&ep
->com
.mutex
);
3080 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
3081 c4iw_put_ep(&ep
->com
);
3085 static int pick_local_ipaddrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3087 struct in_device
*ind
;
3089 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)&cm_id
->m_local_addr
;
3090 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)&cm_id
->m_remote_addr
;
3092 ind
= in_dev_get(dev
->rdev
.lldi
.ports
[0]);
3094 return -EADDRNOTAVAIL
;
3095 for_primary_ifa(ind
) {
3096 laddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3097 raddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3103 return found
? 0 : -EADDRNOTAVAIL
;
3106 static int get_lladdr(struct net_device
*dev
, struct in6_addr
*addr
,
3107 unsigned char banned_flags
)
3109 struct inet6_dev
*idev
;
3110 int err
= -EADDRNOTAVAIL
;
3113 idev
= __in6_dev_get(dev
);
3115 struct inet6_ifaddr
*ifp
;
3117 read_lock_bh(&idev
->lock
);
3118 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
3119 if (ifp
->scope
== IFA_LINK
&&
3120 !(ifp
->flags
& banned_flags
)) {
3121 memcpy(addr
, &ifp
->addr
, 16);
3126 read_unlock_bh(&idev
->lock
);
3132 static int pick_local_ip6addrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3134 struct in6_addr
uninitialized_var(addr
);
3135 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)&cm_id
->m_local_addr
;
3136 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)&cm_id
->m_remote_addr
;
3138 if (!get_lladdr(dev
->rdev
.lldi
.ports
[0], &addr
, IFA_F_TENTATIVE
)) {
3139 memcpy(la6
->sin6_addr
.s6_addr
, &addr
, 16);
3140 memcpy(ra6
->sin6_addr
.s6_addr
, &addr
, 16);
3143 return -EADDRNOTAVAIL
;
3146 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
3148 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3151 struct sockaddr_in
*laddr
;
3152 struct sockaddr_in
*raddr
;
3153 struct sockaddr_in6
*laddr6
;
3154 struct sockaddr_in6
*raddr6
;
3158 if ((conn_param
->ord
> cur_max_read_depth(dev
)) ||
3159 (conn_param
->ird
> cur_max_read_depth(dev
))) {
3163 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3165 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3169 init_timer(&ep
->timer
);
3170 ep
->plen
= conn_param
->private_data_len
;
3172 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
3173 conn_param
->private_data
, ep
->plen
);
3174 ep
->ird
= conn_param
->ird
;
3175 ep
->ord
= conn_param
->ord
;
3177 if (peer2peer
&& ep
->ord
== 0)
3180 ep
->com
.cm_id
= cm_id
;
3181 ref_cm_id(&ep
->com
);
3183 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
3185 PDBG("%s qpn 0x%x not found!\n", __func__
, conn_param
->qpn
);
3190 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
3194 * Allocate an active TID to initiate a TCP connection.
3196 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
3197 if (ep
->atid
== -1) {
3198 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
3202 insert_handle(dev
, &dev
->atid_idr
, ep
, ep
->atid
);
3204 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3205 sizeof(ep
->com
.local_addr
));
3206 memcpy(&ep
->com
.remote_addr
, &cm_id
->m_remote_addr
,
3207 sizeof(ep
->com
.remote_addr
));
3209 laddr
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
3210 raddr
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
3211 laddr6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3212 raddr6
= (struct sockaddr_in6
*) &ep
->com
.remote_addr
;
3214 if (cm_id
->m_remote_addr
.ss_family
== AF_INET
) {
3216 ra
= (__u8
*)&raddr
->sin_addr
;
3219 * Handle loopback requests to INADDR_ANY.
3221 if ((__force
int)raddr
->sin_addr
.s_addr
== INADDR_ANY
) {
3222 err
= pick_local_ipaddrs(dev
, cm_id
);
3228 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3229 __func__
, &laddr
->sin_addr
, ntohs(laddr
->sin_port
),
3230 ra
, ntohs(raddr
->sin_port
));
3231 ep
->dst
= find_route(dev
, laddr
->sin_addr
.s_addr
,
3232 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
3233 raddr
->sin_port
, cm_id
->tos
);
3236 ra
= (__u8
*)&raddr6
->sin6_addr
;
3239 * Handle loopback requests to INADDR_ANY.
3241 if (ipv6_addr_type(&raddr6
->sin6_addr
) == IPV6_ADDR_ANY
) {
3242 err
= pick_local_ip6addrs(dev
, cm_id
);
3248 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3249 __func__
, laddr6
->sin6_addr
.s6_addr
,
3250 ntohs(laddr6
->sin6_port
),
3251 raddr6
->sin6_addr
.s6_addr
, ntohs(raddr6
->sin6_port
));
3252 ep
->dst
= find_route6(dev
, laddr6
->sin6_addr
.s6_addr
,
3253 raddr6
->sin6_addr
.s6_addr
,
3254 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
3255 raddr6
->sin6_scope_id
);
3258 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
3259 err
= -EHOSTUNREACH
;
3263 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, true,
3264 ep
->com
.dev
->rdev
.lldi
.adapter_type
, cm_id
->tos
);
3266 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
3270 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3271 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
3274 state_set(&ep
->com
, CONNECTING
);
3275 ep
->tos
= cm_id
->tos
;
3277 /* send connect request to rnic */
3278 err
= send_connect(ep
);
3282 cxgb4_l2t_release(ep
->l2t
);
3284 dst_release(ep
->dst
);
3286 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
3287 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
3289 deref_cm_id(&ep
->com
);
3290 c4iw_put_ep(&ep
->com
);
3295 static int create_server6(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3298 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)
3299 &ep
->com
.local_addr
;
3301 if (ipv6_addr_type(&sin6
->sin6_addr
) != IPV6_ADDR_ANY
) {
3302 err
= cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3303 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3307 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3308 err
= cxgb4_create_server6(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3309 ep
->stid
, &sin6
->sin6_addr
,
3311 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3313 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3317 err
= net_xmit_errno(err
);
3319 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3320 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3321 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3323 sin6
->sin6_addr
.s6_addr
, ntohs(sin6
->sin6_port
));
3328 static int create_server4(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3331 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
3332 &ep
->com
.local_addr
;
3334 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
3336 err
= cxgb4_create_server_filter(
3337 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3338 sin
->sin_addr
.s_addr
, sin
->sin_port
, 0,
3339 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0, 0);
3340 if (err
== -EBUSY
) {
3341 if (c4iw_fatal_error(&ep
->com
.dev
->rdev
)) {
3345 set_current_state(TASK_UNINTERRUPTIBLE
);
3346 schedule_timeout(usecs_to_jiffies(100));
3348 } while (err
== -EBUSY
);
3350 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3351 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3352 ep
->stid
, sin
->sin_addr
.s_addr
, sin
->sin_port
,
3353 0, ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3355 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3359 err
= net_xmit_errno(err
);
3362 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3364 &sin
->sin_addr
, ntohs(sin
->sin_port
));
3368 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
3371 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3372 struct c4iw_listen_ep
*ep
;
3376 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3378 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3382 PDBG("%s ep %p\n", __func__
, ep
);
3383 ep
->com
.cm_id
= cm_id
;
3384 ref_cm_id(&ep
->com
);
3386 ep
->backlog
= backlog
;
3387 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3388 sizeof(ep
->com
.local_addr
));
3391 * Allocate a server TID.
3393 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3394 ep
->com
.local_addr
.ss_family
== AF_INET
)
3395 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
,
3396 cm_id
->m_local_addr
.ss_family
, ep
);
3398 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
,
3399 cm_id
->m_local_addr
.ss_family
, ep
);
3401 if (ep
->stid
== -1) {
3402 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
3406 insert_handle(dev
, &dev
->stid_idr
, ep
, ep
->stid
);
3408 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3409 sizeof(ep
->com
.local_addr
));
3411 state_set(&ep
->com
, LISTEN
);
3412 if (ep
->com
.local_addr
.ss_family
== AF_INET
)
3413 err
= create_server4(dev
, ep
);
3415 err
= create_server6(dev
, ep
);
3417 cm_id
->provider_data
= ep
;
3421 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3422 ep
->com
.local_addr
.ss_family
);
3424 deref_cm_id(&ep
->com
);
3425 c4iw_put_ep(&ep
->com
);
3431 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
3434 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
3436 PDBG("%s ep %p\n", __func__
, ep
);
3439 state_set(&ep
->com
, DEAD
);
3440 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3441 ep
->com
.local_addr
.ss_family
== AF_INET
) {
3442 err
= cxgb4_remove_server_filter(
3443 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3444 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3446 struct sockaddr_in6
*sin6
;
3447 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3448 err
= cxgb4_remove_server(
3449 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3450 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3453 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
3455 sin6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3456 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3457 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3459 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->stid_idr
, ep
->stid
);
3460 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3461 ep
->com
.local_addr
.ss_family
);
3463 deref_cm_id(&ep
->com
);
3464 c4iw_put_ep(&ep
->com
);
3468 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
3473 struct c4iw_rdev
*rdev
;
3475 mutex_lock(&ep
->com
.mutex
);
3477 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
3478 states
[ep
->com
.state
], abrupt
);
3481 * Ref the ep here in case we have fatal errors causing the
3482 * ep to be released and freed.
3484 c4iw_get_ep(&ep
->com
);
3486 rdev
= &ep
->com
.dev
->rdev
;
3487 if (c4iw_fatal_error(rdev
)) {
3489 close_complete_upcall(ep
, -EIO
);
3490 ep
->com
.state
= DEAD
;
3492 switch (ep
->com
.state
) {
3500 ep
->com
.state
= ABORTING
;
3502 ep
->com
.state
= CLOSING
;
3505 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
3508 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
3511 (void)stop_ep_timer(ep
);
3512 ep
->com
.state
= ABORTING
;
3514 ep
->com
.state
= MORIBUND
;
3520 PDBG("%s ignoring disconnect ep %p state %u\n",
3521 __func__
, ep
, ep
->com
.state
);
3530 set_bit(EP_DISC_ABORT
, &ep
->com
.history
);
3531 close_complete_upcall(ep
, -ECONNRESET
);
3532 ret
= send_abort(ep
, NULL
, gfp
);
3534 set_bit(EP_DISC_CLOSE
, &ep
->com
.history
);
3535 ret
= send_halfclose(ep
, gfp
);
3538 set_bit(EP_DISC_FAIL
, &ep
->com
.history
);
3541 close_complete_upcall(ep
, -EIO
);
3544 struct c4iw_qp_attributes attrs
;
3546 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3547 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
3549 C4IW_QP_ATTR_NEXT_STATE
,
3553 "%s - qp <- error failed!\n",
3559 mutex_unlock(&ep
->com
.mutex
);
3560 c4iw_put_ep(&ep
->com
);
3562 release_ep_resources(ep
);
3566 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3567 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3570 int atid
= be32_to_cpu(req
->tid
);
3572 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
,
3573 (__force u32
) req
->tid
);
3577 switch (req
->retval
) {
3579 set_bit(ACT_RETRY_NOMEM
, &ep
->com
.history
);
3580 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3581 send_fw_act_open_req(ep
, atid
);
3585 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
3586 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3587 send_fw_act_open_req(ep
, atid
);
3592 pr_info("%s unexpected ofld conn wr retval %d\n",
3593 __func__
, req
->retval
);
3596 pr_err("active ofld_connect_wr failure %d atid %d\n",
3598 mutex_lock(&dev
->rdev
.stats
.lock
);
3599 dev
->rdev
.stats
.act_ofld_conn_fails
++;
3600 mutex_unlock(&dev
->rdev
.stats
.lock
);
3601 connect_reply_upcall(ep
, status2errno(req
->retval
));
3602 state_set(&ep
->com
, DEAD
);
3603 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
3604 struct sockaddr_in6
*sin6
=
3605 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3606 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3607 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3609 remove_handle(dev
, &dev
->atid_idr
, atid
);
3610 cxgb4_free_atid(dev
->rdev
.lldi
.tids
, atid
);
3611 dst_release(ep
->dst
);
3612 cxgb4_l2t_release(ep
->l2t
);
3613 c4iw_put_ep(&ep
->com
);
3616 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3617 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3619 struct sk_buff
*rpl_skb
;
3620 struct cpl_pass_accept_req
*cpl
;
3623 rpl_skb
= (struct sk_buff
*)(unsigned long)req
->cookie
;
3626 PDBG("%s passive open failure %d\n", __func__
, req
->retval
);
3627 mutex_lock(&dev
->rdev
.stats
.lock
);
3628 dev
->rdev
.stats
.pas_ofld_conn_fails
++;
3629 mutex_unlock(&dev
->rdev
.stats
.lock
);
3632 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
3633 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
3634 (__force u32
) htonl(
3635 (__force u32
) req
->tid
)));
3636 ret
= pass_accept_req(dev
, rpl_skb
);
3643 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3645 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3646 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
3648 switch (rpl
->type
) {
3650 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
3652 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3653 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
3654 switch (req
->t_state
) {
3656 active_ofld_conn_reply(dev
, skb
, req
);
3659 passive_ofld_conn_reply(dev
, skb
, req
);
3662 pr_err("%s unexpected ofld conn wr state %d\n",
3663 __func__
, req
->t_state
);
3671 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
3674 __be16 hdr_len
, vlantag
, len
;
3676 int tcp_hdr_len
, ip_hdr_len
;
3678 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
3679 struct cpl_pass_accept_req
*req
;
3680 struct tcp_options_received tmp_opt
;
3681 struct c4iw_dev
*dev
;
3682 enum chip_type type
;
3684 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3685 /* Store values from cpl_rx_pkt in temporary location. */
3686 vlantag
= cpl
->vlan
;
3688 l2info
= cpl
->l2info
;
3689 hdr_len
= cpl
->hdr_len
;
3692 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
3695 * We need to parse the TCP options from SYN packet.
3696 * to generate cpl_pass_accept_req.
3698 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
3699 tcp_clear_options(&tmp_opt
);
3700 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
3702 req
= (struct cpl_pass_accept_req
*)__skb_push(skb
, sizeof(*req
));
3703 memset(req
, 0, sizeof(*req
));
3704 req
->l2info
= cpu_to_be16(SYN_INTF_V(intf
) |
3705 SYN_MAC_IDX_V(RX_MACIDX_G(
3706 be32_to_cpu(l2info
))) |
3708 type
= dev
->rdev
.lldi
.adapter_type
;
3709 tcp_hdr_len
= RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len
));
3710 ip_hdr_len
= RX_IPHDR_LEN_G(be16_to_cpu(hdr_len
));
3712 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info
))));
3713 if (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) {
3714 eth_hdr_len
= is_t4(type
) ?
3715 RX_ETHHDR_LEN_G(be32_to_cpu(l2info
)) :
3716 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3717 req
->hdr_len
|= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len
) |
3718 IP_HDR_LEN_V(ip_hdr_len
) |
3719 ETH_HDR_LEN_V(eth_hdr_len
));
3720 } else { /* T6 and later */
3721 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3722 req
->hdr_len
|= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len
) |
3723 T6_IP_HDR_LEN_V(ip_hdr_len
) |
3724 T6_ETH_HDR_LEN_V(eth_hdr_len
));
3726 req
->vlan
= vlantag
;
3728 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID_V(stid
) |
3729 PASS_OPEN_TOS_V(tos
));
3730 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
3731 if (tmp_opt
.wscale_ok
)
3732 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
3733 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
3734 if (tmp_opt
.sack_ok
)
3735 req
->tcpopt
.sack
= 1;
3736 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
3740 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3741 __be32 laddr
, __be16 lport
,
3742 __be32 raddr
, __be16 rport
,
3743 u32 rcv_isn
, u32 filter
, u16 window
,
3744 u32 rss_qid
, u8 port_id
)
3746 struct sk_buff
*req_skb
;
3747 struct fw_ofld_connection_wr
*req
;
3748 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
3751 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
3752 req
= (struct fw_ofld_connection_wr
*)__skb_put(req_skb
, sizeof(*req
));
3753 memset(req
, 0, sizeof(*req
));
3754 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL_F
);
3755 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
3756 req
->le
.version_cpl
= htonl(FW_OFLD_CONNECTION_WR_CPL_F
);
3757 req
->le
.filter
= (__force __be32
) filter
;
3758 req
->le
.lport
= lport
;
3759 req
->le
.pport
= rport
;
3760 req
->le
.u
.ipv4
.lip
= laddr
;
3761 req
->le
.u
.ipv4
.pip
= raddr
;
3762 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
3763 req
->tcb
.rcv_adv
= htons(window
);
3764 req
->tcb
.t_state_to_astid
=
3765 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV
) |
3766 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl
->tcpopt
.wsf
) |
3767 FW_OFLD_CONNECTION_WR_ASTID_V(
3768 PASS_OPEN_TID_G(ntohl(cpl
->tos_stid
))));
3771 * We store the qid in opt2 which will be used by the firmware
3772 * to send us the wr response.
3774 req
->tcb
.opt2
= htonl(RSS_QUEUE_V(rss_qid
));
3777 * We initialize the MSS index in TCB to 0xF.
3778 * So that when driver sends cpl_pass_accept_rpl
3779 * TCB picks up the correct value. If this was 0
3780 * TP will ignore any value > 0 for MSS index.
3782 req
->tcb
.opt0
= cpu_to_be64(MSS_IDX_V(0xF));
3783 req
->cookie
= (uintptr_t)skb
;
3785 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
3786 ret
= cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
3788 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__
,
3796 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3797 * messages when a filter is being used instead of server to
3798 * redirect a syn packet. When packets hit filter they are redirected
3799 * to the offload queue and driver tries to establish the connection
3800 * using firmware work request.
3802 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3805 unsigned int filter
;
3806 struct ethhdr
*eh
= NULL
;
3807 struct vlan_ethhdr
*vlan_eh
= NULL
;
3809 struct tcphdr
*tcph
;
3810 struct rss_header
*rss
= (void *)skb
->data
;
3811 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
3812 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
3813 struct l2t_entry
*e
;
3814 struct dst_entry
*dst
;
3815 struct c4iw_ep
*lep
;
3817 struct port_info
*pi
;
3818 struct net_device
*pdev
;
3819 u16 rss_qid
, eth_hdr_len
;
3822 struct neighbour
*neigh
;
3824 /* Drop all non-SYN packets */
3825 if (!(cpl
->l2info
& cpu_to_be32(RXF_SYN_F
)))
3829 * Drop all packets which did not hit the filter.
3830 * Unlikely to happen.
3832 if (!(rss
->filter_hit
&& rss
->filter_tid
))
3836 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3838 stid
= (__force
int) cpu_to_be32((__force u32
) rss
->hash_val
);
3840 lep
= (struct c4iw_ep
*)lookup_stid(dev
->rdev
.lldi
.tids
, stid
);
3842 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
3846 switch (CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
)) {
3848 eth_hdr_len
= RX_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3851 eth_hdr_len
= RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3854 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3857 pr_err("T%d Chip is not supported\n",
3858 CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
));
3862 if (eth_hdr_len
== ETH_HLEN
) {
3863 eh
= (struct ethhdr
*)(req
+ 1);
3864 iph
= (struct iphdr
*)(eh
+ 1);
3866 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
3867 iph
= (struct iphdr
*)(vlan_eh
+ 1);
3868 skb
->vlan_tci
= ntohs(cpl
->vlan
);
3871 if (iph
->version
!= 0x4)
3874 tcph
= (struct tcphdr
*)(iph
+ 1);
3875 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
3876 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
3879 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
3880 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
3881 ntohs(tcph
->source
), iph
->tos
);
3883 dst
= find_route(dev
, iph
->daddr
, iph
->saddr
, tcph
->dest
, tcph
->source
,
3886 pr_err("%s - failed to find dst entry!\n",
3890 neigh
= dst_neigh_lookup_skb(dst
, skb
);
3893 pr_err("%s - failed to allocate neigh!\n",
3898 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
3899 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
3900 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3902 pi
= (struct port_info
*)netdev_priv(pdev
);
3903 tx_chan
= cxgb4_port_chan(pdev
);
3906 pdev
= get_real_dev(neigh
->dev
);
3907 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3909 pi
= (struct port_info
*)netdev_priv(pdev
);
3910 tx_chan
= cxgb4_port_chan(pdev
);
3912 neigh_release(neigh
);
3914 pr_err("%s - failed to allocate l2t entry!\n",
3919 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
3920 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
3921 window
= (__force u16
) htons((__force u16
)tcph
->window
);
3923 /* Calcuate filter portion for LE region. */
3924 filter
= (__force
unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3925 dev
->rdev
.lldi
.ports
[0],
3929 * Synthesize the cpl_pass_accept_req. We have everything except the
3930 * TID. Once firmware sends a reply with TID we update the TID field
3931 * in cpl and pass it through the regular cpl_pass_accept_req path.
3933 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
3934 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
3935 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
3936 rss_qid
, pi
->port_id
);
3937 cxgb4_l2t_release(e
);
3945 * These are the real handlers that are called from a
3948 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
+ NUM_FAKE_CPLS
] = {
3949 [CPL_ACT_ESTABLISH
] = act_establish
,
3950 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
3951 [CPL_RX_DATA
] = rx_data
,
3952 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
3953 [CPL_ABORT_RPL
] = abort_rpl
,
3954 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
3955 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
3956 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
3957 [CPL_PASS_ESTABLISH
] = pass_establish
,
3958 [CPL_PEER_CLOSE
] = peer_close
,
3959 [CPL_ABORT_REQ_RSS
] = peer_abort
,
3960 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
3961 [CPL_RDMA_TERMINATE
] = terminate
,
3962 [CPL_FW4_ACK
] = fw4_ack
,
3963 [CPL_FW6_MSG
] = deferred_fw6_msg
,
3964 [CPL_RX_PKT
] = rx_pkt
,
3965 [FAKE_CPL_PUT_EP_SAFE
] = _put_ep_safe
,
3966 [FAKE_CPL_PASS_PUT_EP_SAFE
] = _put_pass_ep_safe
3969 static void process_timeout(struct c4iw_ep
*ep
)
3971 struct c4iw_qp_attributes attrs
;
3974 mutex_lock(&ep
->com
.mutex
);
3975 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
3977 set_bit(TIMEDOUT
, &ep
->com
.history
);
3978 switch (ep
->com
.state
) {
3980 connect_reply_upcall(ep
, -ETIMEDOUT
);
3987 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
3988 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3989 c4iw_modify_qp(ep
->com
.qp
->rhp
,
3990 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
3993 close_complete_upcall(ep
, -ETIMEDOUT
);
3999 * These states are expected if the ep timed out at the same
4000 * time as another thread was calling stop_ep_timer().
4001 * So we silently do nothing for these states.
4006 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4007 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
4010 mutex_unlock(&ep
->com
.mutex
);
4012 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
4013 c4iw_put_ep(&ep
->com
);
4016 static void process_timedout_eps(void)
4020 spin_lock_irq(&timeout_lock
);
4021 while (!list_empty(&timeout_list
)) {
4022 struct list_head
*tmp
;
4024 tmp
= timeout_list
.next
;
4028 spin_unlock_irq(&timeout_lock
);
4029 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
4030 process_timeout(ep
);
4031 spin_lock_irq(&timeout_lock
);
4033 spin_unlock_irq(&timeout_lock
);
4036 static void process_work(struct work_struct
*work
)
4038 struct sk_buff
*skb
= NULL
;
4039 struct c4iw_dev
*dev
;
4040 struct cpl_act_establish
*rpl
;
4041 unsigned int opcode
;
4044 process_timedout_eps();
4045 while ((skb
= skb_dequeue(&rxq
))) {
4047 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
4048 opcode
= rpl
->ot
.opcode
;
4050 BUG_ON(!work_handlers
[opcode
]);
4051 ret
= work_handlers
[opcode
](dev
, skb
);
4054 process_timedout_eps();
4058 static DECLARE_WORK(skb_work
, process_work
);
4060 static void ep_timeout(unsigned long arg
)
4062 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
4065 spin_lock(&timeout_lock
);
4066 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
4068 * Only insert if it is not already on the list.
4070 if (!ep
->entry
.next
) {
4071 list_add_tail(&ep
->entry
, &timeout_list
);
4075 spin_unlock(&timeout_lock
);
4077 queue_work(workq
, &skb_work
);
4081 * All the CM events are handled on a work queue to have a safe context.
4083 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4087 * Save dev in the skb->cb area.
4089 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
4092 * Queue the skb and schedule the worker thread.
4094 skb_queue_tail(&rxq
, skb
);
4095 queue_work(workq
, &skb_work
);
4099 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4101 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
4103 if (rpl
->status
!= CPL_ERR_NONE
) {
4104 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
4105 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
4111 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4113 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
4114 struct c4iw_wr_wait
*wr_waitp
;
4117 PDBG("%s type %u\n", __func__
, rpl
->type
);
4119 switch (rpl
->type
) {
4120 case FW6_TYPE_WR_RPL
:
4121 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
4122 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
4123 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
4125 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
4129 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
4133 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
4141 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4143 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
4145 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
4146 unsigned int tid
= GET_TID(req
);
4148 ep
= lookup_tid(t
, tid
);
4150 printk(KERN_WARNING MOD
4151 "Abort on non-existent endpoint, tid %d\n", tid
);
4155 if (is_neg_adv(req
->status
)) {
4156 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
4157 __func__
, ep
->hwtid
, req
->status
,
4158 neg_adv_str(req
->status
));
4159 ep
->stats
.abort_neg_adv
++;
4160 dev
->rdev
.stats
.neg_adv
++;
4164 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
4168 * Wake up any threads in rdma_init() or rdma_fini().
4169 * However, if we are on MPAv2 and want to retry with MPAv1
4170 * then, don't wake up yet.
4172 if (mpa_rev
== 2 && !ep
->tried_with_mpa_v1
) {
4173 if (ep
->com
.state
!= MPA_REQ_SENT
)
4174 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4176 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4182 * Most upcalls from the T4 Core go to sched() to
4183 * schedule the processing on a work queue.
4185 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
4186 [CPL_ACT_ESTABLISH
] = sched
,
4187 [CPL_ACT_OPEN_RPL
] = sched
,
4188 [CPL_RX_DATA
] = sched
,
4189 [CPL_ABORT_RPL_RSS
] = sched
,
4190 [CPL_ABORT_RPL
] = sched
,
4191 [CPL_PASS_OPEN_RPL
] = sched
,
4192 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
4193 [CPL_PASS_ACCEPT_REQ
] = sched
,
4194 [CPL_PASS_ESTABLISH
] = sched
,
4195 [CPL_PEER_CLOSE
] = sched
,
4196 [CPL_CLOSE_CON_RPL
] = sched
,
4197 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
4198 [CPL_RDMA_TERMINATE
] = sched
,
4199 [CPL_FW4_ACK
] = sched
,
4200 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
4201 [CPL_FW6_MSG
] = fw6_msg
,
4202 [CPL_RX_PKT
] = sched
4205 int __init
c4iw_cm_init(void)
4207 spin_lock_init(&timeout_lock
);
4208 skb_queue_head_init(&rxq
);
4210 workq
= create_singlethread_workqueue("iw_cxgb4");
4217 void c4iw_cm_term(void)
4219 WARN_ON(!list_empty(&timeout_list
));
4220 flush_workqueue(workq
);
4221 destroy_workqueue(workq
);