2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
42 #include <net/neighbour.h>
43 #include <net/netevent.h>
44 #include <net/route.h>
48 static char *states
[] = {
65 module_param(dack_mode
, int, 0644);
66 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=0)");
68 int c4iw_max_read_depth
= 8;
69 module_param(c4iw_max_read_depth
, int, 0644);
70 MODULE_PARM_DESC(c4iw_max_read_depth
, "Per-connection max ORD/IRD (default=8)");
72 static int enable_tcp_timestamps
;
73 module_param(enable_tcp_timestamps
, int, 0644);
74 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
76 static int enable_tcp_sack
;
77 module_param(enable_tcp_sack
, int, 0644);
78 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
80 static int enable_tcp_window_scaling
= 1;
81 module_param(enable_tcp_window_scaling
, int, 0644);
82 MODULE_PARM_DESC(enable_tcp_window_scaling
,
83 "Enable tcp window scaling (default=1)");
86 module_param(c4iw_debug
, int, 0644);
87 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
90 module_param(peer2peer
, int, 0644);
91 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
93 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
94 module_param(p2p_type
, int, 0644);
95 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
96 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
98 static int ep_timeout_secs
= 60;
99 module_param(ep_timeout_secs
, int, 0644);
100 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
101 "in seconds (default=60)");
103 static int mpa_rev
= 1;
104 module_param(mpa_rev
, int, 0644);
105 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
106 "1 is spec compliant. (default=1)");
108 static int markers_enabled
;
109 module_param(markers_enabled
, int, 0644);
110 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
112 static int crc_enabled
= 1;
113 module_param(crc_enabled
, int, 0644);
114 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
116 static int rcv_win
= 256 * 1024;
117 module_param(rcv_win
, int, 0644);
118 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
120 static int snd_win
= 32 * 1024;
121 module_param(snd_win
, int, 0644);
122 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=32KB)");
124 static struct workqueue_struct
*workq
;
126 static struct sk_buff_head rxq
;
128 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
129 static void ep_timeout(unsigned long arg
);
130 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
132 static LIST_HEAD(timeout_list
);
133 static spinlock_t timeout_lock
;
135 static void start_ep_timer(struct c4iw_ep
*ep
)
137 PDBG("%s ep %p\n", __func__
, ep
);
138 if (timer_pending(&ep
->timer
)) {
139 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
140 del_timer_sync(&ep
->timer
);
142 c4iw_get_ep(&ep
->com
);
143 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
144 ep
->timer
.data
= (unsigned long)ep
;
145 ep
->timer
.function
= ep_timeout
;
146 add_timer(&ep
->timer
);
149 static void stop_ep_timer(struct c4iw_ep
*ep
)
151 PDBG("%s ep %p\n", __func__
, ep
);
152 if (!timer_pending(&ep
->timer
)) {
153 printk(KERN_ERR
"%s timer stopped when its not running! "
154 "ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
158 del_timer_sync(&ep
->timer
);
159 c4iw_put_ep(&ep
->com
);
162 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
163 struct l2t_entry
*l2e
)
167 if (c4iw_fatal_error(rdev
)) {
169 PDBG("%s - device in error state - dropping\n", __func__
);
172 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
178 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
182 if (c4iw_fatal_error(rdev
)) {
184 PDBG("%s - device in error state - dropping\n", __func__
);
187 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
193 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
195 struct cpl_tid_release
*req
;
197 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
200 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
201 INIT_TP_WR(req
, hwtid
);
202 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
203 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
204 c4iw_ofld_send(rdev
, skb
);
208 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
210 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
212 if (GET_TCPOPT_TSTAMP(opt
))
216 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
220 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
223 enum c4iw_ep_state state
;
225 spin_lock_irqsave(&epc
->lock
, flags
);
227 spin_unlock_irqrestore(&epc
->lock
, flags
);
231 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
236 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
240 spin_lock_irqsave(&epc
->lock
, flags
);
241 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
242 __state_set(epc
, new);
243 spin_unlock_irqrestore(&epc
->lock
, flags
);
247 static void *alloc_ep(int size
, gfp_t gfp
)
249 struct c4iw_ep_common
*epc
;
251 epc
= kzalloc(size
, gfp
);
253 kref_init(&epc
->kref
);
254 spin_lock_init(&epc
->lock
);
255 init_waitqueue_head(&epc
->waitq
);
257 PDBG("%s alloc ep %p\n", __func__
, epc
);
261 void _c4iw_free_ep(struct kref
*kref
)
265 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
266 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
267 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
268 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
269 dst_release(ep
->dst
);
270 cxgb4_l2t_release(ep
->l2t
);
275 static void release_ep_resources(struct c4iw_ep
*ep
)
277 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
278 c4iw_put_ep(&ep
->com
);
281 static int status2errno(int status
)
286 case CPL_ERR_CONN_RESET
:
288 case CPL_ERR_ARP_MISS
:
289 return -EHOSTUNREACH
;
290 case CPL_ERR_CONN_TIMEDOUT
:
292 case CPL_ERR_TCAM_FULL
:
294 case CPL_ERR_CONN_EXIST
:
302 * Try and reuse skbs already allocated...
304 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
306 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
309 skb_reset_transport_header(skb
);
311 skb
= alloc_skb(len
, gfp
);
316 static struct rtable
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
317 __be32 peer_ip
, __be16 local_port
,
318 __be16 peer_port
, u8 tos
)
329 .proto
= IPPROTO_TCP
,
337 if (ip_route_output_flow(&init_net
, &rt
, &fl
, NULL
, 0))
342 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
344 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
349 * Handle an ARP failure for an active open.
351 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
353 printk(KERN_ERR MOD
"ARP failure duing connect\n");
358 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
361 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
363 struct c4iw_rdev
*rdev
= handle
;
364 struct cpl_abort_req
*req
= cplhdr(skb
);
366 PDBG("%s rdev %p\n", __func__
, rdev
);
367 req
->cmd
= CPL_ABORT_NO_RST
;
368 c4iw_ofld_send(rdev
, skb
);
371 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
373 unsigned int flowclen
= 80;
374 struct fw_flowc_wr
*flowc
;
377 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
378 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
380 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
381 FW_FLOWC_WR_NPARAMS(8));
382 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
383 16)) | FW_WR_FLOWID(ep
->hwtid
));
385 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
386 flowc
->mnemval
[0].val
= cpu_to_be32(PCI_FUNC(ep
->com
.dev
->rdev
.lldi
.pdev
->devfn
) << 8);
387 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
388 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
389 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
390 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
391 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
392 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
393 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
394 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
395 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
396 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
397 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
398 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
399 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
400 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
401 /* Pad WR to 16 byte boundary */
402 flowc
->mnemval
[8].mnemonic
= 0;
403 flowc
->mnemval
[8].val
= 0;
404 for (i
= 0; i
< 9; i
++) {
405 flowc
->mnemval
[i
].r4
[0] = 0;
406 flowc
->mnemval
[i
].r4
[1] = 0;
407 flowc
->mnemval
[i
].r4
[2] = 0;
410 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
411 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
414 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
416 struct cpl_close_con_req
*req
;
418 int wrlen
= roundup(sizeof *req
, 16);
420 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
421 skb
= get_skb(NULL
, wrlen
, gfp
);
423 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
426 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
427 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
428 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
429 memset(req
, 0, wrlen
);
430 INIT_TP_WR(req
, ep
->hwtid
);
431 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
433 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
436 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
438 struct cpl_abort_req
*req
;
439 int wrlen
= roundup(sizeof *req
, 16);
441 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
442 skb
= get_skb(skb
, wrlen
, gfp
);
444 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
448 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
449 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
450 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
451 memset(req
, 0, wrlen
);
452 INIT_TP_WR(req
, ep
->hwtid
);
453 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
454 req
->cmd
= CPL_ABORT_SEND_RST
;
455 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
458 static int send_connect(struct c4iw_ep
*ep
)
460 struct cpl_act_open_req
*req
;
464 unsigned int mtu_idx
;
466 int wrlen
= roundup(sizeof *req
, 16);
468 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
470 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
472 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
476 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
478 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
479 wscale
= compute_wscale(rcv_win
);
480 opt0
= KEEP_ALIVE(1) |
484 L2T_IDX(ep
->l2t
->idx
) |
485 TX_CHAN(ep
->tx_chan
) |
486 SMAC_SEL(ep
->smac_idx
) |
488 RCV_BUFSIZ(rcv_win
>>10);
489 opt2
= RX_CHANNEL(0) |
490 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
491 if (enable_tcp_timestamps
)
492 opt2
|= TSTAMPS_EN(1);
495 if (wscale
&& enable_tcp_window_scaling
)
496 opt2
|= WND_SCALE_EN(1);
497 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
499 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
501 OPCODE_TID(req
) = cpu_to_be32(
502 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ((ep
->rss_qid
<<14)|ep
->atid
)));
503 req
->local_port
= ep
->com
.local_addr
.sin_port
;
504 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
505 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
506 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
507 req
->opt0
= cpu_to_be64(opt0
);
509 req
->opt2
= cpu_to_be32(opt2
);
510 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
513 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
516 struct fw_ofld_tx_data_wr
*req
;
517 struct mpa_message
*mpa
;
519 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
521 BUG_ON(skb_cloned(skb
));
523 mpalen
= sizeof(*mpa
) + ep
->plen
;
524 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
525 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
527 connect_reply_upcall(ep
, -ENOMEM
);
530 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
532 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
533 memset(req
, 0, wrlen
);
534 req
->op_to_immdlen
= cpu_to_be32(
535 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
537 FW_WR_IMMDLEN(mpalen
));
538 req
->flowid_len16
= cpu_to_be32(
539 FW_WR_FLOWID(ep
->hwtid
) |
540 FW_WR_LEN16(wrlen
>> 4));
541 req
->plen
= cpu_to_be32(mpalen
);
542 req
->tunnel_to_proxy
= cpu_to_be32(
543 FW_OFLD_TX_DATA_WR_FLUSH(1) |
544 FW_OFLD_TX_DATA_WR_SHOVE(1));
546 mpa
= (struct mpa_message
*)(req
+ 1);
547 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
548 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
549 (markers_enabled
? MPA_MARKERS
: 0);
550 mpa
->private_data_size
= htons(ep
->plen
);
551 mpa
->revision
= mpa_rev
;
554 memcpy(mpa
->private_data
, ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
557 * Reference the mpa skb. This ensures the data area
558 * will remain in memory until the hw acks the tx.
559 * Function fw4_ack() will deref it.
562 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
565 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
567 state_set(&ep
->com
, MPA_REQ_SENT
);
568 ep
->mpa_attr
.initiator
= 1;
572 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
575 struct fw_ofld_tx_data_wr
*req
;
576 struct mpa_message
*mpa
;
579 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
581 mpalen
= sizeof(*mpa
) + plen
;
582 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
584 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
586 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
589 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
591 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
592 memset(req
, 0, wrlen
);
593 req
->op_to_immdlen
= cpu_to_be32(
594 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
596 FW_WR_IMMDLEN(mpalen
));
597 req
->flowid_len16
= cpu_to_be32(
598 FW_WR_FLOWID(ep
->hwtid
) |
599 FW_WR_LEN16(wrlen
>> 4));
600 req
->plen
= cpu_to_be32(mpalen
);
601 req
->tunnel_to_proxy
= cpu_to_be32(
602 FW_OFLD_TX_DATA_WR_FLUSH(1) |
603 FW_OFLD_TX_DATA_WR_SHOVE(1));
605 mpa
= (struct mpa_message
*)(req
+ 1);
606 memset(mpa
, 0, sizeof(*mpa
));
607 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
608 mpa
->flags
= MPA_REJECT
;
609 mpa
->revision
= mpa_rev
;
610 mpa
->private_data_size
= htons(plen
);
612 memcpy(mpa
->private_data
, pdata
, plen
);
615 * Reference the mpa skb again. This ensures the data area
616 * will remain in memory until the hw acks the tx.
617 * Function fw4_ack() will deref it.
620 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
621 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
624 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
627 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
630 struct fw_ofld_tx_data_wr
*req
;
631 struct mpa_message
*mpa
;
634 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
636 mpalen
= sizeof(*mpa
) + plen
;
637 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
639 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
641 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
644 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
646 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
647 memset(req
, 0, wrlen
);
648 req
->op_to_immdlen
= cpu_to_be32(
649 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
651 FW_WR_IMMDLEN(mpalen
));
652 req
->flowid_len16
= cpu_to_be32(
653 FW_WR_FLOWID(ep
->hwtid
) |
654 FW_WR_LEN16(wrlen
>> 4));
655 req
->plen
= cpu_to_be32(mpalen
);
656 req
->tunnel_to_proxy
= cpu_to_be32(
657 FW_OFLD_TX_DATA_WR_FLUSH(1) |
658 FW_OFLD_TX_DATA_WR_SHOVE(1));
660 mpa
= (struct mpa_message
*)(req
+ 1);
661 memset(mpa
, 0, sizeof(*mpa
));
662 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
663 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
664 (markers_enabled
? MPA_MARKERS
: 0);
665 mpa
->revision
= mpa_rev
;
666 mpa
->private_data_size
= htons(plen
);
668 memcpy(mpa
->private_data
, pdata
, plen
);
671 * Reference the mpa skb. This ensures the data area
672 * will remain in memory until the hw acks the tx.
673 * Function fw4_ack() will deref it.
676 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
678 state_set(&ep
->com
, MPA_REP_SENT
);
679 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
682 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
685 struct cpl_act_establish
*req
= cplhdr(skb
);
686 unsigned int tid
= GET_TID(req
);
687 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
688 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
690 ep
= lookup_atid(t
, atid
);
692 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
693 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
695 dst_confirm(ep
->dst
);
697 /* setup the hwtid for this connection */
699 cxgb4_insert_tid(t
, ep
, tid
);
701 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
702 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
704 set_emss(ep
, ntohs(req
->tcp_opt
));
706 /* dealloc the atid */
707 cxgb4_free_atid(t
, atid
);
709 /* start MPA negotiation */
710 send_flowc(ep
, NULL
);
711 send_mpa_req(ep
, skb
);
716 static void close_complete_upcall(struct c4iw_ep
*ep
)
718 struct iw_cm_event event
;
720 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
721 memset(&event
, 0, sizeof(event
));
722 event
.event
= IW_CM_EVENT_CLOSE
;
724 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
725 ep
, ep
->com
.cm_id
, ep
->hwtid
);
726 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
727 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
728 ep
->com
.cm_id
= NULL
;
733 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
735 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
736 close_complete_upcall(ep
);
737 state_set(&ep
->com
, ABORTING
);
738 return send_abort(ep
, skb
, gfp
);
741 static void peer_close_upcall(struct c4iw_ep
*ep
)
743 struct iw_cm_event event
;
745 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
746 memset(&event
, 0, sizeof(event
));
747 event
.event
= IW_CM_EVENT_DISCONNECT
;
749 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
750 ep
, ep
->com
.cm_id
, ep
->hwtid
);
751 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
755 static void peer_abort_upcall(struct c4iw_ep
*ep
)
757 struct iw_cm_event event
;
759 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
760 memset(&event
, 0, sizeof(event
));
761 event
.event
= IW_CM_EVENT_CLOSE
;
762 event
.status
= -ECONNRESET
;
764 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
765 ep
->com
.cm_id
, ep
->hwtid
);
766 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
767 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
768 ep
->com
.cm_id
= NULL
;
773 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
775 struct iw_cm_event event
;
777 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
778 memset(&event
, 0, sizeof(event
));
779 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
780 event
.status
= status
;
781 event
.local_addr
= ep
->com
.local_addr
;
782 event
.remote_addr
= ep
->com
.remote_addr
;
784 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
785 event
.private_data_len
= ep
->plen
;
786 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
789 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
791 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
794 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
795 ep
->com
.cm_id
= NULL
;
800 static void connect_request_upcall(struct c4iw_ep
*ep
)
802 struct iw_cm_event event
;
804 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
805 memset(&event
, 0, sizeof(event
));
806 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
807 event
.local_addr
= ep
->com
.local_addr
;
808 event
.remote_addr
= ep
->com
.remote_addr
;
809 event
.private_data_len
= ep
->plen
;
810 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
811 event
.provider_data
= ep
;
812 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
813 c4iw_get_ep(&ep
->com
);
814 ep
->parent_ep
->com
.cm_id
->event_handler(
815 ep
->parent_ep
->com
.cm_id
,
818 c4iw_put_ep(&ep
->parent_ep
->com
);
819 ep
->parent_ep
= NULL
;
822 static void established_upcall(struct c4iw_ep
*ep
)
824 struct iw_cm_event event
;
826 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
827 memset(&event
, 0, sizeof(event
));
828 event
.event
= IW_CM_EVENT_ESTABLISHED
;
830 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
831 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
835 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
837 struct cpl_rx_data_ack
*req
;
839 int wrlen
= roundup(sizeof *req
, 16);
841 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
842 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
844 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
848 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
849 memset(req
, 0, wrlen
);
850 INIT_TP_WR(req
, ep
->hwtid
);
851 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
853 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK(1) |
855 V_RX_DACK_MODE(dack_mode
));
856 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
857 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
861 static void process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
863 struct mpa_message
*mpa
;
865 struct c4iw_qp_attributes attrs
;
866 enum c4iw_qp_attr_mask mask
;
869 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
872 * Stop mpa timer. If it expired, then the state has
873 * changed and we bail since ep_timeout already aborted
877 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
881 * If we get more than the supported amount of private data
882 * then we must fail this connection.
884 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
890 * copy the new data into our accumulation buffer.
892 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
894 ep
->mpa_pkt_len
+= skb
->len
;
897 * if we don't even have the mpa message, then bail.
899 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
901 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
903 /* Validate MPA header. */
904 if (mpa
->revision
!= mpa_rev
) {
908 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
913 plen
= ntohs(mpa
->private_data_size
);
916 * Fail if there's too much private data.
918 if (plen
> MPA_MAX_PRIVATE_DATA
) {
924 * If plen does not account for pkt size
926 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
931 ep
->plen
= (u8
) plen
;
934 * If we don't have all the pdata yet, then bail.
935 * We'll continue process when more data arrives.
937 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
940 if (mpa
->flags
& MPA_REJECT
) {
946 * If we get here we have accumulated the entire mpa
947 * start reply message including private data. And
948 * the MPA header is valid.
950 state_set(&ep
->com
, FPDU_MODE
);
951 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
952 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
953 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
954 ep
->mpa_attr
.version
= mpa_rev
;
955 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
956 FW_RI_INIT_P2PTYPE_DISABLED
;
957 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
958 "xmit_marker_enabled=%d, version=%d\n", __func__
,
959 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
960 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
962 attrs
.mpa_attr
= ep
->mpa_attr
;
963 attrs
.max_ird
= ep
->ird
;
964 attrs
.max_ord
= ep
->ord
;
965 attrs
.llp_stream_handle
= ep
;
966 attrs
.next_state
= C4IW_QP_STATE_RTS
;
968 mask
= C4IW_QP_ATTR_NEXT_STATE
|
969 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
970 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
972 /* bind QP and TID with INIT_WR */
973 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
974 ep
->com
.qp
, mask
, &attrs
, 1);
979 state_set(&ep
->com
, ABORTING
);
980 send_abort(ep
, skb
, GFP_KERNEL
);
982 connect_reply_upcall(ep
, err
);
986 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
988 struct mpa_message
*mpa
;
991 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
993 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
997 * If we get more than the supported amount of private data
998 * then we must fail this connection.
1000 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1002 abort_connection(ep
, skb
, GFP_KERNEL
);
1006 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1009 * Copy the new data into our accumulation buffer.
1011 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1013 ep
->mpa_pkt_len
+= skb
->len
;
1016 * If we don't even have the mpa message, then bail.
1017 * We'll continue process when more data arrives.
1019 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1022 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1024 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1027 * Validate MPA Header.
1029 if (mpa
->revision
!= mpa_rev
) {
1030 abort_connection(ep
, skb
, GFP_KERNEL
);
1034 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1035 abort_connection(ep
, skb
, GFP_KERNEL
);
1039 plen
= ntohs(mpa
->private_data_size
);
1042 * Fail if there's too much private data.
1044 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1045 abort_connection(ep
, skb
, GFP_KERNEL
);
1050 * If plen does not account for pkt size
1052 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1053 abort_connection(ep
, skb
, GFP_KERNEL
);
1056 ep
->plen
= (u8
) plen
;
1059 * If we don't have all the pdata yet, then bail.
1061 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1065 * If we get here we have accumulated the entire mpa
1066 * start reply message including private data.
1068 ep
->mpa_attr
.initiator
= 0;
1069 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1070 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1071 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1072 ep
->mpa_attr
.version
= mpa_rev
;
1073 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
1074 FW_RI_INIT_P2PTYPE_DISABLED
;
1075 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1076 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1077 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1078 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1079 ep
->mpa_attr
.p2p_type
);
1081 state_set(&ep
->com
, MPA_REQ_RCVD
);
1084 connect_request_upcall(ep
);
1088 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1091 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1092 unsigned int dlen
= ntohs(hdr
->len
);
1093 unsigned int tid
= GET_TID(hdr
);
1094 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1096 ep
= lookup_tid(t
, tid
);
1097 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1098 skb_pull(skb
, sizeof(*hdr
));
1099 skb_trim(skb
, dlen
);
1101 ep
->rcv_seq
+= dlen
;
1102 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1104 /* update RX credits */
1105 update_rx_credits(ep
, dlen
);
1107 switch (state_read(&ep
->com
)) {
1109 process_mpa_reply(ep
, skb
);
1112 process_mpa_request(ep
, skb
);
1117 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1118 " ep %p state %d tid %u\n",
1119 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1122 * The ep will timeout and inform the ULP of the failure.
1130 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1133 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1134 unsigned long flags
;
1136 unsigned int tid
= GET_TID(rpl
);
1137 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1139 ep
= lookup_tid(t
, tid
);
1140 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1142 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1143 switch (ep
->com
.state
) {
1145 __state_set(&ep
->com
, DEAD
);
1149 printk(KERN_ERR
"%s ep %p state %d\n",
1150 __func__
, ep
, ep
->com
.state
);
1153 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1156 release_ep_resources(ep
);
1161 * Return whether a failed active open has allocated a TID
1163 static inline int act_open_has_tid(int status
)
1165 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1166 status
!= CPL_ERR_ARP_MISS
;
1169 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1172 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1173 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1174 ntohl(rpl
->atid_status
)));
1175 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1176 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1178 ep
= lookup_atid(t
, atid
);
1180 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1181 status
, status2errno(status
));
1183 if (status
== CPL_ERR_RTX_NEG_ADVICE
) {
1184 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1189 connect_reply_upcall(ep
, status2errno(status
));
1190 state_set(&ep
->com
, DEAD
);
1192 if (status
&& act_open_has_tid(status
))
1193 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1195 cxgb4_free_atid(t
, atid
);
1196 dst_release(ep
->dst
);
1197 cxgb4_l2t_release(ep
->l2t
);
1198 c4iw_put_ep(&ep
->com
);
1203 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1205 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1206 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1207 unsigned int stid
= GET_TID(rpl
);
1208 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1211 printk(KERN_ERR MOD
"stid %d lookup failure!\n", stid
);
1214 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1215 rpl
->status
, status2errno(rpl
->status
));
1216 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1217 ep
->com
.rpl_done
= 1;
1218 wake_up(&ep
->com
.waitq
);
1223 static int listen_stop(struct c4iw_listen_ep
*ep
)
1225 struct sk_buff
*skb
;
1226 struct cpl_close_listsvr_req
*req
;
1228 PDBG("%s ep %p\n", __func__
, ep
);
1229 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1231 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1234 req
= (struct cpl_close_listsvr_req
*) skb_put(skb
, sizeof(*req
));
1236 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
,
1238 req
->reply_ctrl
= cpu_to_be16(
1239 QUEUENO(ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]));
1240 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
1241 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1244 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1246 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1247 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1248 unsigned int stid
= GET_TID(rpl
);
1249 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1251 PDBG("%s ep %p\n", __func__
, ep
);
1252 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1253 ep
->com
.rpl_done
= 1;
1254 wake_up(&ep
->com
.waitq
);
1258 static void accept_cr(struct c4iw_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
,
1259 struct cpl_pass_accept_req
*req
)
1261 struct cpl_pass_accept_rpl
*rpl
;
1262 unsigned int mtu_idx
;
1267 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1268 BUG_ON(skb_cloned(skb
));
1269 skb_trim(skb
, sizeof(*rpl
));
1271 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1272 wscale
= compute_wscale(rcv_win
);
1273 opt0
= KEEP_ALIVE(1) |
1277 L2T_IDX(ep
->l2t
->idx
) |
1278 TX_CHAN(ep
->tx_chan
) |
1279 SMAC_SEL(ep
->smac_idx
) |
1281 RCV_BUFSIZ(rcv_win
>>10);
1282 opt2
= RX_CHANNEL(0) |
1283 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1285 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
1286 opt2
|= TSTAMPS_EN(1);
1287 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
1289 if (wscale
&& enable_tcp_window_scaling
)
1290 opt2
|= WND_SCALE_EN(1);
1293 INIT_TP_WR(rpl
, ep
->hwtid
);
1294 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1296 rpl
->opt0
= cpu_to_be64(opt0
);
1297 rpl
->opt2
= cpu_to_be32(opt2
);
1298 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
1299 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1304 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, __be32 peer_ip
,
1305 struct sk_buff
*skb
)
1307 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__
, dev
, hwtid
,
1309 BUG_ON(skb_cloned(skb
));
1310 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1312 release_tid(&dev
->rdev
, hwtid
, skb
);
1316 static void get_4tuple(struct cpl_pass_accept_req
*req
,
1317 __be32
*local_ip
, __be32
*peer_ip
,
1318 __be16
*local_port
, __be16
*peer_port
)
1320 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1321 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1322 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
1323 struct tcphdr
*tcp
= (struct tcphdr
*)
1324 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
1326 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
1327 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
1330 *peer_ip
= ip
->saddr
;
1331 *local_ip
= ip
->daddr
;
1332 *peer_port
= tcp
->source
;
1333 *local_port
= tcp
->dest
;
1338 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1340 struct c4iw_ep
*child_ep
, *parent_ep
;
1341 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1342 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
1343 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1344 unsigned int hwtid
= GET_TID(req
);
1345 struct dst_entry
*dst
;
1346 struct l2t_entry
*l2t
;
1348 __be32 local_ip
, peer_ip
;
1349 __be16 local_port
, peer_port
;
1350 struct net_device
*pdev
;
1351 u32 tx_chan
, smac_idx
;
1355 int txq_idx
, ctrlq_idx
;
1357 parent_ep
= lookup_stid(t
, stid
);
1358 PDBG("%s parent ep %p tid %u\n", __func__
, parent_ep
, hwtid
);
1360 get_4tuple(req
, &local_ip
, &peer_ip
, &local_port
, &peer_port
);
1362 if (state_read(&parent_ep
->com
) != LISTEN
) {
1363 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1368 /* Find output route */
1369 rt
= find_route(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
1370 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
1372 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1377 if (dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
1378 pdev
= ip_dev_find(&init_net
, peer_ip
);
1380 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1383 tx_chan
= cxgb4_port_chan(pdev
);
1384 smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1385 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1386 txq_idx
= cxgb4_port_idx(pdev
) * step
;
1387 ctrlq_idx
= cxgb4_port_idx(pdev
);
1388 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1389 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[cxgb4_port_idx(pdev
) * step
];
1392 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1393 dst
->neighbour
->dev
, 0);
1395 tx_chan
= cxgb4_port_chan(dst
->neighbour
->dev
);
1396 smac_idx
= (cxgb4_port_viid(dst
->neighbour
->dev
) & 0x7F) << 1;
1397 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1398 txq_idx
= cxgb4_port_idx(dst
->neighbour
->dev
) * step
;
1399 ctrlq_idx
= cxgb4_port_idx(dst
->neighbour
->dev
);
1400 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1401 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[
1402 cxgb4_port_idx(dst
->neighbour
->dev
) * step
];
1405 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1411 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1413 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1415 cxgb4_l2t_release(l2t
);
1419 state_set(&child_ep
->com
, CONNECTING
);
1420 child_ep
->com
.dev
= dev
;
1421 child_ep
->com
.cm_id
= NULL
;
1422 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1423 child_ep
->com
.local_addr
.sin_port
= local_port
;
1424 child_ep
->com
.local_addr
.sin_addr
.s_addr
= local_ip
;
1425 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1426 child_ep
->com
.remote_addr
.sin_port
= peer_port
;
1427 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= peer_ip
;
1428 c4iw_get_ep(&parent_ep
->com
);
1429 child_ep
->parent_ep
= parent_ep
;
1430 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
1431 child_ep
->l2t
= l2t
;
1432 child_ep
->dst
= dst
;
1433 child_ep
->hwtid
= hwtid
;
1434 child_ep
->tx_chan
= tx_chan
;
1435 child_ep
->smac_idx
= smac_idx
;
1436 child_ep
->rss_qid
= rss_qid
;
1437 child_ep
->mtu
= mtu
;
1438 child_ep
->txq_idx
= txq_idx
;
1439 child_ep
->ctrlq_idx
= ctrlq_idx
;
1441 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
1442 tx_chan
, smac_idx
, rss_qid
);
1444 init_timer(&child_ep
->timer
);
1445 cxgb4_insert_tid(t
, child_ep
, hwtid
);
1446 accept_cr(child_ep
, peer_ip
, skb
, req
);
1449 reject_cr(dev
, hwtid
, peer_ip
, skb
);
1454 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1457 struct cpl_pass_establish
*req
= cplhdr(skb
);
1458 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1459 unsigned int tid
= GET_TID(req
);
1461 ep
= lookup_tid(t
, tid
);
1462 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1463 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1464 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1466 set_emss(ep
, ntohs(req
->tcp_opt
));
1468 dst_confirm(ep
->dst
);
1469 state_set(&ep
->com
, MPA_REQ_WAIT
);
1471 send_flowc(ep
, skb
);
1476 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1478 struct cpl_peer_close
*hdr
= cplhdr(skb
);
1480 struct c4iw_qp_attributes attrs
;
1481 unsigned long flags
;
1485 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1486 unsigned int tid
= GET_TID(hdr
);
1488 ep
= lookup_tid(t
, tid
);
1489 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1490 dst_confirm(ep
->dst
);
1492 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1493 switch (ep
->com
.state
) {
1495 __state_set(&ep
->com
, CLOSING
);
1498 __state_set(&ep
->com
, CLOSING
);
1499 connect_reply_upcall(ep
, -ECONNRESET
);
1504 * We're gonna mark this puppy DEAD, but keep
1505 * the reference on it until the ULP accepts or
1506 * rejects the CR. Also wake up anyone waiting
1507 * in rdma connection migration (see c4iw_accept_cr()).
1509 __state_set(&ep
->com
, CLOSING
);
1510 ep
->com
.rpl_done
= 1;
1511 ep
->com
.rpl_err
= -ECONNRESET
;
1512 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1513 wake_up(&ep
->com
.waitq
);
1516 __state_set(&ep
->com
, CLOSING
);
1517 ep
->com
.rpl_done
= 1;
1518 ep
->com
.rpl_err
= -ECONNRESET
;
1519 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1520 wake_up(&ep
->com
.waitq
);
1524 __state_set(&ep
->com
, CLOSING
);
1526 peer_close_upcall(ep
);
1532 __state_set(&ep
->com
, MORIBUND
);
1537 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1538 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1539 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1540 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1542 close_complete_upcall(ep
);
1543 __state_set(&ep
->com
, DEAD
);
1553 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1555 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
1556 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1557 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1560 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1562 release_ep_resources(ep
);
1567 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1569 static int is_neg_adv_abort(unsigned int status
)
1571 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1572 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1575 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1577 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1579 struct cpl_abort_rpl
*rpl
;
1580 struct sk_buff
*rpl_skb
;
1581 struct c4iw_qp_attributes attrs
;
1584 unsigned long flags
;
1585 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1586 unsigned int tid
= GET_TID(req
);
1588 ep
= lookup_tid(t
, tid
);
1589 if (is_neg_adv_abort(req
->status
)) {
1590 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
1594 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1595 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
1597 switch (ep
->com
.state
) {
1605 connect_reply_upcall(ep
, -ECONNRESET
);
1608 ep
->com
.rpl_done
= 1;
1609 ep
->com
.rpl_err
= -ECONNRESET
;
1610 PDBG("waking up ep %p\n", ep
);
1611 wake_up(&ep
->com
.waitq
);
1616 * We're gonna mark this puppy DEAD, but keep
1617 * the reference on it until the ULP accepts or
1618 * rejects the CR. Also wake up anyone waiting
1619 * in rdma connection migration (see c4iw_accept_cr()).
1621 ep
->com
.rpl_done
= 1;
1622 ep
->com
.rpl_err
= -ECONNRESET
;
1623 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1624 wake_up(&ep
->com
.waitq
);
1631 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1632 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1633 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1634 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
1638 "%s - qp <- error failed!\n",
1641 peer_abort_upcall(ep
);
1646 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
1647 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1653 dst_confirm(ep
->dst
);
1654 if (ep
->com
.state
!= ABORTING
) {
1655 __state_set(&ep
->com
, DEAD
);
1658 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1660 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
1662 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
1667 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1668 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
1669 INIT_TP_WR(rpl
, ep
->hwtid
);
1670 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
1671 rpl
->cmd
= CPL_ABORT_NO_RST
;
1672 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
1675 release_ep_resources(ep
);
1679 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1682 struct c4iw_qp_attributes attrs
;
1683 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
1684 unsigned long flags
;
1686 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1687 unsigned int tid
= GET_TID(rpl
);
1689 ep
= lookup_tid(t
, tid
);
1691 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1694 /* The cm_id may be null if we failed to connect */
1695 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1696 switch (ep
->com
.state
) {
1698 __state_set(&ep
->com
, MORIBUND
);
1702 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
1703 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1704 c4iw_modify_qp(ep
->com
.qp
->rhp
,
1706 C4IW_QP_ATTR_NEXT_STATE
,
1709 close_complete_upcall(ep
);
1710 __state_set(&ep
->com
, DEAD
);
1720 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1722 release_ep_resources(ep
);
1726 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1729 struct cpl_rdma_terminate
*term
= cplhdr(skb
);
1730 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1731 unsigned int tid
= GET_TID(term
);
1733 ep
= lookup_tid(t
, tid
);
1735 if (state_read(&ep
->com
) != FPDU_MODE
)
1738 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1739 skb_pull(skb
, sizeof *term
);
1740 PDBG("%s saving %d bytes of term msg\n", __func__
, skb
->len
);
1741 skb_copy_from_linear_data(skb
, ep
->com
.qp
->attr
.terminate_buffer
,
1743 ep
->com
.qp
->attr
.terminate_msg_len
= skb
->len
;
1744 ep
->com
.qp
->attr
.is_terminate_local
= 0;
1749 * Upcall from the adapter indicating data has been transmitted.
1750 * For us its just the single MPA request or reply. We can now free
1751 * the skb holding the mpa message.
1753 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1756 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
1757 u8 credits
= hdr
->credits
;
1758 unsigned int tid
= GET_TID(hdr
);
1759 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1762 ep
= lookup_tid(t
, tid
);
1763 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1765 PDBG(KERN_ERR
"%s 0 credit ack ep %p tid %u state %u\n",
1766 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
1770 dst_confirm(ep
->dst
);
1772 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1773 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
1774 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
1775 kfree_skb(ep
->mpa_skb
);
1781 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
1784 struct c4iw_ep
*ep
= to_ep(cm_id
);
1785 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1787 if (state_read(&ep
->com
) == DEAD
) {
1788 c4iw_put_ep(&ep
->com
);
1791 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1793 abort_connection(ep
, NULL
, GFP_KERNEL
);
1795 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
1796 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1798 c4iw_put_ep(&ep
->com
);
1802 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1805 struct c4iw_qp_attributes attrs
;
1806 enum c4iw_qp_attr_mask mask
;
1807 struct c4iw_ep
*ep
= to_ep(cm_id
);
1808 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
1809 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
1811 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1812 if (state_read(&ep
->com
) == DEAD
) {
1817 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1820 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
1821 (conn_param
->ird
> c4iw_max_read_depth
)) {
1822 abort_connection(ep
, NULL
, GFP_KERNEL
);
1827 cm_id
->add_ref(cm_id
);
1828 ep
->com
.cm_id
= cm_id
;
1831 ep
->ird
= conn_param
->ird
;
1832 ep
->ord
= conn_param
->ord
;
1834 if (peer2peer
&& ep
->ird
== 0)
1837 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
1839 /* bind QP to EP and move to RTS */
1840 attrs
.mpa_attr
= ep
->mpa_attr
;
1841 attrs
.max_ird
= ep
->ird
;
1842 attrs
.max_ord
= ep
->ord
;
1843 attrs
.llp_stream_handle
= ep
;
1844 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1846 /* bind QP and TID with INIT_WR */
1847 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1848 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
1849 C4IW_QP_ATTR_MPA_ATTR
|
1850 C4IW_QP_ATTR_MAX_IRD
|
1851 C4IW_QP_ATTR_MAX_ORD
;
1853 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1854 ep
->com
.qp
, mask
, &attrs
, 1);
1857 err
= send_mpa_reply(ep
, conn_param
->private_data
,
1858 conn_param
->private_data_len
);
1862 state_set(&ep
->com
, FPDU_MODE
);
1863 established_upcall(ep
);
1864 c4iw_put_ep(&ep
->com
);
1867 ep
->com
.cm_id
= NULL
;
1869 cm_id
->rem_ref(cm_id
);
1871 c4iw_put_ep(&ep
->com
);
1875 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1878 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
1881 struct net_device
*pdev
;
1884 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
1885 (conn_param
->ird
> c4iw_max_read_depth
)) {
1889 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1891 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
1895 init_timer(&ep
->timer
);
1896 ep
->plen
= conn_param
->private_data_len
;
1898 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
1899 conn_param
->private_data
, ep
->plen
);
1900 ep
->ird
= conn_param
->ird
;
1901 ep
->ord
= conn_param
->ord
;
1903 if (peer2peer
&& ep
->ord
== 0)
1906 cm_id
->add_ref(cm_id
);
1908 ep
->com
.cm_id
= cm_id
;
1909 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
1910 BUG_ON(!ep
->com
.qp
);
1911 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
1915 * Allocate an active TID to initiate a TCP connection.
1917 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
1918 if (ep
->atid
== -1) {
1919 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
1924 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__
,
1925 ntohl(cm_id
->local_addr
.sin_addr
.s_addr
),
1926 ntohs(cm_id
->local_addr
.sin_port
),
1927 ntohl(cm_id
->remote_addr
.sin_addr
.s_addr
),
1928 ntohs(cm_id
->remote_addr
.sin_port
));
1931 rt
= find_route(dev
,
1932 cm_id
->local_addr
.sin_addr
.s_addr
,
1933 cm_id
->remote_addr
.sin_addr
.s_addr
,
1934 cm_id
->local_addr
.sin_port
,
1935 cm_id
->remote_addr
.sin_port
, 0);
1937 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
1938 err
= -EHOSTUNREACH
;
1943 /* get a l2t entry */
1944 if (ep
->dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
1945 PDBG("%s LOOPBACK\n", __func__
);
1946 pdev
= ip_dev_find(&init_net
,
1947 cm_id
->remote_addr
.sin_addr
.s_addr
);
1948 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1951 ep
->mtu
= pdev
->mtu
;
1952 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1953 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1954 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
1955 ep
->com
.dev
->rdev
.lldi
.nchan
;
1956 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1957 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
1958 ep
->com
.dev
->rdev
.lldi
.nchan
;
1959 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1960 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
1961 cxgb4_port_idx(pdev
) * step
];
1964 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1966 ep
->dst
->neighbour
->dev
, 0);
1967 ep
->mtu
= dst_mtu(ep
->dst
);
1968 ep
->tx_chan
= cxgb4_port_chan(ep
->dst
->neighbour
->dev
);
1969 ep
->smac_idx
= (cxgb4_port_viid(ep
->dst
->neighbour
->dev
) &
1971 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
1972 ep
->com
.dev
->rdev
.lldi
.nchan
;
1973 ep
->txq_idx
= cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
;
1974 ep
->ctrlq_idx
= cxgb4_port_idx(ep
->dst
->neighbour
->dev
);
1975 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
1976 ep
->com
.dev
->rdev
.lldi
.nchan
;
1977 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
1978 cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
];
1981 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
1986 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1987 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
1990 state_set(&ep
->com
, CONNECTING
);
1992 ep
->com
.local_addr
= cm_id
->local_addr
;
1993 ep
->com
.remote_addr
= cm_id
->remote_addr
;
1995 /* send connect request to rnic */
1996 err
= send_connect(ep
);
2000 cxgb4_l2t_release(ep
->l2t
);
2002 dst_release(ep
->dst
);
2004 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2006 cm_id
->rem_ref(cm_id
);
2007 c4iw_put_ep(&ep
->com
);
2012 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
2015 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2016 struct c4iw_listen_ep
*ep
;
2021 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2023 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2027 PDBG("%s ep %p\n", __func__
, ep
);
2028 cm_id
->add_ref(cm_id
);
2029 ep
->com
.cm_id
= cm_id
;
2031 ep
->backlog
= backlog
;
2032 ep
->com
.local_addr
= cm_id
->local_addr
;
2035 * Allocate a server TID.
2037 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2038 if (ep
->stid
== -1) {
2039 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
2044 state_set(&ep
->com
, LISTEN
);
2045 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2046 ep
->com
.local_addr
.sin_addr
.s_addr
,
2047 ep
->com
.local_addr
.sin_port
,
2048 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2052 /* wait for pass_open_rpl */
2053 wait_event_timeout(ep
->com
.waitq
, ep
->com
.rpl_done
, C4IW_WR_TO
);
2054 if (ep
->com
.rpl_done
)
2055 err
= ep
->com
.rpl_err
;
2057 printk(KERN_ERR MOD
"Device %s not responding!\n",
2058 pci_name(ep
->com
.dev
->rdev
.lldi
.pdev
));
2059 ep
->com
.dev
->rdev
.flags
= T4_FATAL_ERROR
;
2063 cm_id
->provider_data
= ep
;
2067 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2069 cm_id
->rem_ref(cm_id
);
2070 c4iw_put_ep(&ep
->com
);
2076 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2079 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2081 PDBG("%s ep %p\n", __func__
, ep
);
2084 state_set(&ep
->com
, DEAD
);
2085 ep
->com
.rpl_done
= 0;
2086 ep
->com
.rpl_err
= 0;
2087 err
= listen_stop(ep
);
2090 wait_event_timeout(ep
->com
.waitq
, ep
->com
.rpl_done
, C4IW_WR_TO
);
2091 if (ep
->com
.rpl_done
)
2092 err
= ep
->com
.rpl_err
;
2094 printk(KERN_ERR MOD
"Device %s not responding!\n",
2095 pci_name(ep
->com
.dev
->rdev
.lldi
.pdev
));
2096 ep
->com
.dev
->rdev
.flags
= T4_FATAL_ERROR
;
2099 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2101 cm_id
->rem_ref(cm_id
);
2102 c4iw_put_ep(&ep
->com
);
2106 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
2109 unsigned long flags
;
2112 struct c4iw_rdev
*rdev
;
2114 spin_lock_irqsave(&ep
->com
.lock
, flags
);
2116 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2117 states
[ep
->com
.state
], abrupt
);
2119 rdev
= &ep
->com
.dev
->rdev
;
2120 if (c4iw_fatal_error(rdev
)) {
2122 close_complete_upcall(ep
);
2123 ep
->com
.state
= DEAD
;
2125 switch (ep
->com
.state
) {
2133 ep
->com
.state
= ABORTING
;
2135 ep
->com
.state
= CLOSING
;
2138 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2141 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2145 ep
->com
.state
= ABORTING
;
2147 ep
->com
.state
= MORIBUND
;
2153 PDBG("%s ignoring disconnect ep %p state %u\n",
2154 __func__
, ep
, ep
->com
.state
);
2161 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
2164 ret
= abort_connection(ep
, NULL
, gfp
);
2166 ret
= send_halfclose(ep
, gfp
);
2171 release_ep_resources(ep
);
2176 * These are the real handlers that are called from a
2179 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
2180 [CPL_ACT_ESTABLISH
] = act_establish
,
2181 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
2182 [CPL_RX_DATA
] = rx_data
,
2183 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
2184 [CPL_ABORT_RPL
] = abort_rpl
,
2185 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
2186 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
2187 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
2188 [CPL_PASS_ESTABLISH
] = pass_establish
,
2189 [CPL_PEER_CLOSE
] = peer_close
,
2190 [CPL_ABORT_REQ_RSS
] = peer_abort
,
2191 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
2192 [CPL_RDMA_TERMINATE
] = terminate
,
2193 [CPL_FW4_ACK
] = fw4_ack
2196 static void process_timeout(struct c4iw_ep
*ep
)
2198 struct c4iw_qp_attributes attrs
;
2201 spin_lock_irq(&ep
->com
.lock
);
2202 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
2204 switch (ep
->com
.state
) {
2206 __state_set(&ep
->com
, ABORTING
);
2207 connect_reply_upcall(ep
, -ETIMEDOUT
);
2210 __state_set(&ep
->com
, ABORTING
);
2214 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2215 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2216 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2217 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2220 __state_set(&ep
->com
, ABORTING
);
2223 printk(KERN_ERR
"%s unexpected state ep %p tid %u state %u\n",
2224 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
2228 spin_unlock_irq(&ep
->com
.lock
);
2230 abort_connection(ep
, NULL
, GFP_KERNEL
);
2231 c4iw_put_ep(&ep
->com
);
2234 static void process_timedout_eps(void)
2238 spin_lock_irq(&timeout_lock
);
2239 while (!list_empty(&timeout_list
)) {
2240 struct list_head
*tmp
;
2242 tmp
= timeout_list
.next
;
2244 spin_unlock_irq(&timeout_lock
);
2245 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
2246 process_timeout(ep
);
2247 spin_lock_irq(&timeout_lock
);
2249 spin_unlock_irq(&timeout_lock
);
2252 static void process_work(struct work_struct
*work
)
2254 struct sk_buff
*skb
= NULL
;
2255 struct c4iw_dev
*dev
;
2256 struct cpl_act_establish
*rpl
;
2257 unsigned int opcode
;
2260 while ((skb
= skb_dequeue(&rxq
))) {
2262 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
2263 opcode
= rpl
->ot
.opcode
;
2265 BUG_ON(!work_handlers
[opcode
]);
2266 ret
= work_handlers
[opcode
](dev
, skb
);
2270 process_timedout_eps();
2273 static DECLARE_WORK(skb_work
, process_work
);
2275 static void ep_timeout(unsigned long arg
)
2277 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
2279 spin_lock(&timeout_lock
);
2280 list_add_tail(&ep
->entry
, &timeout_list
);
2281 spin_unlock(&timeout_lock
);
2282 queue_work(workq
, &skb_work
);
2286 * All the CM events are handled on a work queue to have a safe context.
2288 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2292 * Save dev in the skb->cb area.
2294 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
2297 * Queue the skb and schedule the worker thread.
2299 skb_queue_tail(&rxq
, skb
);
2300 queue_work(workq
, &skb_work
);
2304 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2306 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
2308 if (rpl
->status
!= CPL_ERR_NONE
) {
2309 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
2310 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
2315 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2317 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2318 struct c4iw_wr_wait
*wr_waitp
;
2321 PDBG("%s type %u\n", __func__
, rpl
->type
);
2323 switch (rpl
->type
) {
2325 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
2326 wr_waitp
= (__force
struct c4iw_wr_wait
*)rpl
->data
[1];
2327 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
2329 wr_waitp
->ret
= ret
;
2331 wake_up(&wr_waitp
->wait
);
2335 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
2338 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
2346 * Most upcalls from the T4 Core go to sched() to
2347 * schedule the processing on a work queue.
2349 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
2350 [CPL_ACT_ESTABLISH
] = sched
,
2351 [CPL_ACT_OPEN_RPL
] = sched
,
2352 [CPL_RX_DATA
] = sched
,
2353 [CPL_ABORT_RPL_RSS
] = sched
,
2354 [CPL_ABORT_RPL
] = sched
,
2355 [CPL_PASS_OPEN_RPL
] = sched
,
2356 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
2357 [CPL_PASS_ACCEPT_REQ
] = sched
,
2358 [CPL_PASS_ESTABLISH
] = sched
,
2359 [CPL_PEER_CLOSE
] = sched
,
2360 [CPL_CLOSE_CON_RPL
] = sched
,
2361 [CPL_ABORT_REQ_RSS
] = sched
,
2362 [CPL_RDMA_TERMINATE
] = sched
,
2363 [CPL_FW4_ACK
] = sched
,
2364 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
2365 [CPL_FW6_MSG
] = fw6_msg
2368 int __init
c4iw_cm_init(void)
2370 spin_lock_init(&timeout_lock
);
2371 skb_queue_head_init(&rxq
);
2373 workq
= create_singlethread_workqueue("iw_cxgb4");
2380 void __exit
c4iw_cm_term(void)
2382 WARN_ON(!list_empty(&timeout_list
));
2383 flush_workqueue(workq
);
2384 destroy_workqueue(workq
);