2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
42 #include <net/neighbour.h>
43 #include <net/netevent.h>
44 #include <net/route.h>
48 static char *states
[] = {
64 int c4iw_max_read_depth
= 8;
65 module_param(c4iw_max_read_depth
, int, 0644);
66 MODULE_PARM_DESC(c4iw_max_read_depth
, "Per-connection max ORD/IRD (default=8)");
68 static int enable_tcp_timestamps
;
69 module_param(enable_tcp_timestamps
, int, 0644);
70 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
72 static int enable_tcp_sack
;
73 module_param(enable_tcp_sack
, int, 0644);
74 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
76 static int enable_tcp_window_scaling
= 1;
77 module_param(enable_tcp_window_scaling
, int, 0644);
78 MODULE_PARM_DESC(enable_tcp_window_scaling
,
79 "Enable tcp window scaling (default=1)");
82 module_param(c4iw_debug
, int, 0644);
83 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
86 module_param(peer2peer
, int, 0644);
87 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
89 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
90 module_param(p2p_type
, int, 0644);
91 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
92 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
94 static int ep_timeout_secs
= 60;
95 module_param(ep_timeout_secs
, int, 0644);
96 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
97 "in seconds (default=60)");
99 static int mpa_rev
= 1;
100 module_param(mpa_rev
, int, 0644);
101 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
102 "1 is spec compliant. (default=1)");
104 static int markers_enabled
;
105 module_param(markers_enabled
, int, 0644);
106 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
108 static int crc_enabled
= 1;
109 module_param(crc_enabled
, int, 0644);
110 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
112 static int rcv_win
= 256 * 1024;
113 module_param(rcv_win
, int, 0644);
114 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
116 static int snd_win
= 32 * 1024;
117 module_param(snd_win
, int, 0644);
118 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=32KB)");
120 static struct workqueue_struct
*workq
;
122 static struct sk_buff_head rxq
;
124 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
125 static void ep_timeout(unsigned long arg
);
126 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
128 static LIST_HEAD(timeout_list
);
129 static spinlock_t timeout_lock
;
131 static void start_ep_timer(struct c4iw_ep
*ep
)
133 PDBG("%s ep %p\n", __func__
, ep
);
134 if (timer_pending(&ep
->timer
)) {
135 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
136 del_timer_sync(&ep
->timer
);
138 c4iw_get_ep(&ep
->com
);
139 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
140 ep
->timer
.data
= (unsigned long)ep
;
141 ep
->timer
.function
= ep_timeout
;
142 add_timer(&ep
->timer
);
145 static void stop_ep_timer(struct c4iw_ep
*ep
)
147 PDBG("%s ep %p\n", __func__
, ep
);
148 if (!timer_pending(&ep
->timer
)) {
149 printk(KERN_ERR
"%s timer stopped when its not running! "
150 "ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
154 del_timer_sync(&ep
->timer
);
155 c4iw_put_ep(&ep
->com
);
158 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
159 struct l2t_entry
*l2e
)
163 if (c4iw_fatal_error(rdev
)) {
165 PDBG("%s - device in error state - dropping\n", __func__
);
168 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
174 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
178 if (c4iw_fatal_error(rdev
)) {
180 PDBG("%s - device in error state - dropping\n", __func__
);
183 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
189 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
191 struct cpl_tid_release
*req
;
193 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
196 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
197 INIT_TP_WR(req
, hwtid
);
198 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
199 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
200 c4iw_ofld_send(rdev
, skb
);
204 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
206 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
208 if (GET_TCPOPT_TSTAMP(opt
))
212 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
216 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
219 enum c4iw_ep_state state
;
221 spin_lock_irqsave(&epc
->lock
, flags
);
223 spin_unlock_irqrestore(&epc
->lock
, flags
);
227 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
232 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
236 spin_lock_irqsave(&epc
->lock
, flags
);
237 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
238 __state_set(epc
, new);
239 spin_unlock_irqrestore(&epc
->lock
, flags
);
243 static void *alloc_ep(int size
, gfp_t gfp
)
245 struct c4iw_ep_common
*epc
;
247 epc
= kzalloc(size
, gfp
);
249 kref_init(&epc
->kref
);
250 spin_lock_init(&epc
->lock
);
251 init_waitqueue_head(&epc
->waitq
);
253 PDBG("%s alloc ep %p\n", __func__
, epc
);
257 void _c4iw_free_ep(struct kref
*kref
)
261 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
262 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
263 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
264 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
265 dst_release(ep
->dst
);
266 cxgb4_l2t_release(ep
->l2t
);
271 static void release_ep_resources(struct c4iw_ep
*ep
)
273 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
274 c4iw_put_ep(&ep
->com
);
277 static int status2errno(int status
)
282 case CPL_ERR_CONN_RESET
:
284 case CPL_ERR_ARP_MISS
:
285 return -EHOSTUNREACH
;
286 case CPL_ERR_CONN_TIMEDOUT
:
288 case CPL_ERR_TCAM_FULL
:
290 case CPL_ERR_CONN_EXIST
:
298 * Try and reuse skbs already allocated...
300 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
302 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
305 skb_reset_transport_header(skb
);
307 skb
= alloc_skb(len
, gfp
);
312 static struct rtable
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
313 __be32 peer_ip
, __be16 local_port
,
314 __be16 peer_port
, u8 tos
)
325 .proto
= IPPROTO_TCP
,
333 if (ip_route_output_flow(&init_net
, &rt
, &fl
, NULL
, 0))
338 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
340 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
345 * Handle an ARP failure for an active open.
347 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
349 printk(KERN_ERR MOD
"ARP failure duing connect\n");
354 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
357 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
359 struct c4iw_rdev
*rdev
= handle
;
360 struct cpl_abort_req
*req
= cplhdr(skb
);
362 PDBG("%s rdev %p\n", __func__
, rdev
);
363 req
->cmd
= CPL_ABORT_NO_RST
;
364 c4iw_ofld_send(rdev
, skb
);
367 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
369 unsigned int flowclen
= 80;
370 struct fw_flowc_wr
*flowc
;
373 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
374 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
376 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
377 FW_FLOWC_WR_NPARAMS(8));
378 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
379 16)) | FW_WR_FLOWID(ep
->hwtid
));
381 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
382 flowc
->mnemval
[0].val
= cpu_to_be32(0);
383 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
384 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
385 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
386 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
387 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
388 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
389 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
390 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
391 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
392 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
393 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
394 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
395 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
396 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
397 /* Pad WR to 16 byte boundary */
398 flowc
->mnemval
[8].mnemonic
= 0;
399 flowc
->mnemval
[8].val
= 0;
400 for (i
= 0; i
< 9; i
++) {
401 flowc
->mnemval
[i
].r4
[0] = 0;
402 flowc
->mnemval
[i
].r4
[1] = 0;
403 flowc
->mnemval
[i
].r4
[2] = 0;
406 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
407 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
410 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
412 struct cpl_close_con_req
*req
;
414 int wrlen
= roundup(sizeof *req
, 16);
416 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
417 skb
= get_skb(NULL
, wrlen
, gfp
);
419 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
422 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
423 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
424 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
425 memset(req
, 0, wrlen
);
426 INIT_TP_WR(req
, ep
->hwtid
);
427 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
429 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
432 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
434 struct cpl_abort_req
*req
;
435 int wrlen
= roundup(sizeof *req
, 16);
437 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
438 skb
= get_skb(skb
, wrlen
, gfp
);
440 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
444 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
445 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
446 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
447 memset(req
, 0, wrlen
);
448 INIT_TP_WR(req
, ep
->hwtid
);
449 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
450 req
->cmd
= CPL_ABORT_SEND_RST
;
451 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
454 static int send_connect(struct c4iw_ep
*ep
)
456 struct cpl_act_open_req
*req
;
460 unsigned int mtu_idx
;
462 int wrlen
= roundup(sizeof *req
, 16);
464 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
466 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
468 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
472 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->txq_idx
);
474 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
475 wscale
= compute_wscale(rcv_win
);
476 opt0
= KEEP_ALIVE(1) |
479 L2T_IDX(ep
->l2t
->idx
) |
480 TX_CHAN(ep
->tx_chan
) |
481 SMAC_SEL(ep
->smac_idx
) |
483 RCV_BUFSIZ(rcv_win
>>10);
484 opt2
= RX_CHANNEL(0) |
485 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
486 if (enable_tcp_timestamps
)
487 opt2
|= TSTAMPS_EN(1);
490 if (wscale
&& enable_tcp_window_scaling
)
491 opt2
|= WND_SCALE_EN(1);
492 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
494 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
496 OPCODE_TID(req
) = cpu_to_be32(
497 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ((ep
->rss_qid
<<14)|ep
->atid
)));
498 req
->local_port
= ep
->com
.local_addr
.sin_port
;
499 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
500 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
501 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
502 req
->opt0
= cpu_to_be64(opt0
);
504 req
->opt2
= cpu_to_be32(opt2
);
505 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
508 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
511 struct fw_ofld_tx_data_wr
*req
;
512 struct mpa_message
*mpa
;
514 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
516 BUG_ON(skb_cloned(skb
));
518 mpalen
= sizeof(*mpa
) + ep
->plen
;
519 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
520 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
522 connect_reply_upcall(ep
, -ENOMEM
);
525 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
527 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
528 memset(req
, 0, wrlen
);
529 req
->op_to_immdlen
= cpu_to_be32(
530 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
532 FW_WR_IMMDLEN(mpalen
));
533 req
->flowid_len16
= cpu_to_be32(
534 FW_WR_FLOWID(ep
->hwtid
) |
535 FW_WR_LEN16(wrlen
>> 4));
536 req
->plen
= cpu_to_be32(mpalen
);
537 req
->tunnel_to_proxy
= cpu_to_be32(
538 FW_OFLD_TX_DATA_WR_FLUSH(1) |
539 FW_OFLD_TX_DATA_WR_SHOVE(1));
541 mpa
= (struct mpa_message
*)(req
+ 1);
542 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
543 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
544 (markers_enabled
? MPA_MARKERS
: 0);
545 mpa
->private_data_size
= htons(ep
->plen
);
546 mpa
->revision
= mpa_rev
;
549 memcpy(mpa
->private_data
, ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
552 * Reference the mpa skb. This ensures the data area
553 * will remain in memory until the hw acks the tx.
554 * Function fw4_ack() will deref it.
557 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
560 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
562 state_set(&ep
->com
, MPA_REQ_SENT
);
563 ep
->mpa_attr
.initiator
= 1;
567 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
570 struct fw_ofld_tx_data_wr
*req
;
571 struct mpa_message
*mpa
;
574 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
576 mpalen
= sizeof(*mpa
) + plen
;
577 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
579 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
581 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
584 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
586 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
587 memset(req
, 0, wrlen
);
588 req
->op_to_immdlen
= cpu_to_be32(
589 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
591 FW_WR_IMMDLEN(mpalen
));
592 req
->flowid_len16
= cpu_to_be32(
593 FW_WR_FLOWID(ep
->hwtid
) |
594 FW_WR_LEN16(wrlen
>> 4));
595 req
->plen
= cpu_to_be32(mpalen
);
596 req
->tunnel_to_proxy
= cpu_to_be32(
597 FW_OFLD_TX_DATA_WR_FLUSH(1) |
598 FW_OFLD_TX_DATA_WR_SHOVE(1));
600 mpa
= (struct mpa_message
*)(req
+ 1);
601 memset(mpa
, 0, sizeof(*mpa
));
602 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
603 mpa
->flags
= MPA_REJECT
;
604 mpa
->revision
= mpa_rev
;
605 mpa
->private_data_size
= htons(plen
);
607 memcpy(mpa
->private_data
, pdata
, plen
);
610 * Reference the mpa skb again. This ensures the data area
611 * will remain in memory until the hw acks the tx.
612 * Function fw4_ack() will deref it.
615 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
616 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
619 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
622 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
625 struct fw_ofld_tx_data_wr
*req
;
626 struct mpa_message
*mpa
;
629 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
631 mpalen
= sizeof(*mpa
) + plen
;
632 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
634 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
636 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
639 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
641 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
642 memset(req
, 0, wrlen
);
643 req
->op_to_immdlen
= cpu_to_be32(
644 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
646 FW_WR_IMMDLEN(mpalen
));
647 req
->flowid_len16
= cpu_to_be32(
648 FW_WR_FLOWID(ep
->hwtid
) |
649 FW_WR_LEN16(wrlen
>> 4));
650 req
->plen
= cpu_to_be32(mpalen
);
651 req
->tunnel_to_proxy
= cpu_to_be32(
652 FW_OFLD_TX_DATA_WR_FLUSH(1) |
653 FW_OFLD_TX_DATA_WR_SHOVE(1));
655 mpa
= (struct mpa_message
*)(req
+ 1);
656 memset(mpa
, 0, sizeof(*mpa
));
657 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
658 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
659 (markers_enabled
? MPA_MARKERS
: 0);
660 mpa
->revision
= mpa_rev
;
661 mpa
->private_data_size
= htons(plen
);
663 memcpy(mpa
->private_data
, pdata
, plen
);
666 * Reference the mpa skb. This ensures the data area
667 * will remain in memory until the hw acks the tx.
668 * Function fw4_ack() will deref it.
671 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
673 state_set(&ep
->com
, MPA_REP_SENT
);
674 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
677 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
680 struct cpl_act_establish
*req
= cplhdr(skb
);
681 unsigned int tid
= GET_TID(req
);
682 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
683 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
685 ep
= lookup_atid(t
, atid
);
687 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
688 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
690 dst_confirm(ep
->dst
);
692 /* setup the hwtid for this connection */
694 cxgb4_insert_tid(t
, ep
, tid
);
696 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
697 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
699 set_emss(ep
, ntohs(req
->tcp_opt
));
701 /* dealloc the atid */
702 cxgb4_free_atid(t
, atid
);
704 /* start MPA negotiation */
705 send_flowc(ep
, NULL
);
706 send_mpa_req(ep
, skb
);
711 static void close_complete_upcall(struct c4iw_ep
*ep
)
713 struct iw_cm_event event
;
715 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
716 memset(&event
, 0, sizeof(event
));
717 event
.event
= IW_CM_EVENT_CLOSE
;
719 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
720 ep
, ep
->com
.cm_id
, ep
->hwtid
);
721 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
722 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
723 ep
->com
.cm_id
= NULL
;
728 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
730 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
731 close_complete_upcall(ep
);
732 state_set(&ep
->com
, ABORTING
);
733 return send_abort(ep
, skb
, gfp
);
736 static void peer_close_upcall(struct c4iw_ep
*ep
)
738 struct iw_cm_event event
;
740 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
741 memset(&event
, 0, sizeof(event
));
742 event
.event
= IW_CM_EVENT_DISCONNECT
;
744 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
745 ep
, ep
->com
.cm_id
, ep
->hwtid
);
746 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
750 static void peer_abort_upcall(struct c4iw_ep
*ep
)
752 struct iw_cm_event event
;
754 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
755 memset(&event
, 0, sizeof(event
));
756 event
.event
= IW_CM_EVENT_CLOSE
;
757 event
.status
= -ECONNRESET
;
759 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
760 ep
->com
.cm_id
, ep
->hwtid
);
761 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
762 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
763 ep
->com
.cm_id
= NULL
;
768 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
770 struct iw_cm_event event
;
772 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
773 memset(&event
, 0, sizeof(event
));
774 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
775 event
.status
= status
;
776 event
.local_addr
= ep
->com
.local_addr
;
777 event
.remote_addr
= ep
->com
.remote_addr
;
779 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
780 event
.private_data_len
= ep
->plen
;
781 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
784 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
786 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
789 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
790 ep
->com
.cm_id
= NULL
;
795 static void connect_request_upcall(struct c4iw_ep
*ep
)
797 struct iw_cm_event event
;
799 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
800 memset(&event
, 0, sizeof(event
));
801 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
802 event
.local_addr
= ep
->com
.local_addr
;
803 event
.remote_addr
= ep
->com
.remote_addr
;
804 event
.private_data_len
= ep
->plen
;
805 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
806 event
.provider_data
= ep
;
807 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
808 c4iw_get_ep(&ep
->com
);
809 ep
->parent_ep
->com
.cm_id
->event_handler(
810 ep
->parent_ep
->com
.cm_id
,
813 c4iw_put_ep(&ep
->parent_ep
->com
);
814 ep
->parent_ep
= NULL
;
817 static void established_upcall(struct c4iw_ep
*ep
)
819 struct iw_cm_event event
;
821 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
822 memset(&event
, 0, sizeof(event
));
823 event
.event
= IW_CM_EVENT_ESTABLISHED
;
825 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
826 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
830 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
832 struct cpl_rx_data_ack
*req
;
834 int wrlen
= roundup(sizeof *req
, 16);
836 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
837 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
839 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
843 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
844 memset(req
, 0, wrlen
);
845 INIT_TP_WR(req
, ep
->hwtid
);
846 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
848 req
->credit_dack
= cpu_to_be32(credits
);
849 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->txq_idx
);
850 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
854 static void process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
856 struct mpa_message
*mpa
;
858 struct c4iw_qp_attributes attrs
;
859 enum c4iw_qp_attr_mask mask
;
862 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
865 * Stop mpa timer. If it expired, then the state has
866 * changed and we bail since ep_timeout already aborted
870 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
874 * If we get more than the supported amount of private data
875 * then we must fail this connection.
877 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
883 * copy the new data into our accumulation buffer.
885 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
887 ep
->mpa_pkt_len
+= skb
->len
;
890 * if we don't even have the mpa message, then bail.
892 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
894 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
896 /* Validate MPA header. */
897 if (mpa
->revision
!= mpa_rev
) {
901 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
906 plen
= ntohs(mpa
->private_data_size
);
909 * Fail if there's too much private data.
911 if (plen
> MPA_MAX_PRIVATE_DATA
) {
917 * If plen does not account for pkt size
919 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
924 ep
->plen
= (u8
) plen
;
927 * If we don't have all the pdata yet, then bail.
928 * We'll continue process when more data arrives.
930 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
933 if (mpa
->flags
& MPA_REJECT
) {
939 * If we get here we have accumulated the entire mpa
940 * start reply message including private data. And
941 * the MPA header is valid.
943 state_set(&ep
->com
, FPDU_MODE
);
944 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
945 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
946 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
947 ep
->mpa_attr
.version
= mpa_rev
;
948 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
949 FW_RI_INIT_P2PTYPE_DISABLED
;
950 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
951 "xmit_marker_enabled=%d, version=%d\n", __func__
,
952 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
953 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
955 attrs
.mpa_attr
= ep
->mpa_attr
;
956 attrs
.max_ird
= ep
->ird
;
957 attrs
.max_ord
= ep
->ord
;
958 attrs
.llp_stream_handle
= ep
;
959 attrs
.next_state
= C4IW_QP_STATE_RTS
;
961 mask
= C4IW_QP_ATTR_NEXT_STATE
|
962 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
963 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
965 /* bind QP and TID with INIT_WR */
966 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
967 ep
->com
.qp
, mask
, &attrs
, 1);
972 abort_connection(ep
, skb
, GFP_KERNEL
);
974 connect_reply_upcall(ep
, err
);
978 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
980 struct mpa_message
*mpa
;
983 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
985 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
989 * If we get more than the supported amount of private data
990 * then we must fail this connection.
992 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
994 abort_connection(ep
, skb
, GFP_KERNEL
);
998 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1001 * Copy the new data into our accumulation buffer.
1003 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1005 ep
->mpa_pkt_len
+= skb
->len
;
1008 * If we don't even have the mpa message, then bail.
1009 * We'll continue process when more data arrives.
1011 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1014 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1016 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1019 * Validate MPA Header.
1021 if (mpa
->revision
!= mpa_rev
) {
1022 abort_connection(ep
, skb
, GFP_KERNEL
);
1026 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1027 abort_connection(ep
, skb
, GFP_KERNEL
);
1031 plen
= ntohs(mpa
->private_data_size
);
1034 * Fail if there's too much private data.
1036 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1037 abort_connection(ep
, skb
, GFP_KERNEL
);
1042 * If plen does not account for pkt size
1044 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1045 abort_connection(ep
, skb
, GFP_KERNEL
);
1048 ep
->plen
= (u8
) plen
;
1051 * If we don't have all the pdata yet, then bail.
1053 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1057 * If we get here we have accumulated the entire mpa
1058 * start reply message including private data.
1060 ep
->mpa_attr
.initiator
= 0;
1061 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1062 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1063 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1064 ep
->mpa_attr
.version
= mpa_rev
;
1065 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
1066 FW_RI_INIT_P2PTYPE_DISABLED
;
1067 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1068 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1069 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1070 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1071 ep
->mpa_attr
.p2p_type
);
1073 state_set(&ep
->com
, MPA_REQ_RCVD
);
1076 connect_request_upcall(ep
);
1080 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1083 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1084 unsigned int dlen
= ntohs(hdr
->len
);
1085 unsigned int tid
= GET_TID(hdr
);
1086 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1088 ep
= lookup_tid(t
, tid
);
1089 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1090 skb_pull(skb
, sizeof(*hdr
));
1091 skb_trim(skb
, dlen
);
1093 ep
->rcv_seq
+= dlen
;
1094 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1096 /* update RX credits */
1097 update_rx_credits(ep
, dlen
);
1099 switch (state_read(&ep
->com
)) {
1101 process_mpa_reply(ep
, skb
);
1104 process_mpa_request(ep
, skb
);
1109 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1110 " ep %p state %d tid %u\n",
1111 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1114 * The ep will timeout and inform the ULP of the failure.
1122 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1125 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1126 unsigned long flags
;
1128 unsigned int tid
= GET_TID(rpl
);
1129 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1131 ep
= lookup_tid(t
, tid
);
1132 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1134 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1135 switch (ep
->com
.state
) {
1137 __state_set(&ep
->com
, DEAD
);
1141 printk(KERN_ERR
"%s ep %p state %d\n",
1142 __func__
, ep
, ep
->com
.state
);
1145 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1148 release_ep_resources(ep
);
1153 * Return whether a failed active open has allocated a TID
1155 static inline int act_open_has_tid(int status
)
1157 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1158 status
!= CPL_ERR_ARP_MISS
;
1161 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1164 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1165 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1166 ntohl(rpl
->atid_status
)));
1167 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1168 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1170 ep
= lookup_atid(t
, atid
);
1172 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1173 status
, status2errno(status
));
1175 if (status
== CPL_ERR_RTX_NEG_ADVICE
) {
1176 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1181 connect_reply_upcall(ep
, status2errno(status
));
1182 state_set(&ep
->com
, DEAD
);
1184 if (status
&& act_open_has_tid(status
))
1185 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1187 cxgb4_free_atid(t
, atid
);
1188 dst_release(ep
->dst
);
1189 cxgb4_l2t_release(ep
->l2t
);
1190 c4iw_put_ep(&ep
->com
);
1195 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1197 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1198 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1199 unsigned int stid
= GET_TID(rpl
);
1200 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1203 printk(KERN_ERR MOD
"stid %d lookup failure!\n", stid
);
1206 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1207 rpl
->status
, status2errno(rpl
->status
));
1208 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1209 ep
->com
.rpl_done
= 1;
1210 wake_up(&ep
->com
.waitq
);
1215 static int listen_stop(struct c4iw_listen_ep
*ep
)
1217 struct sk_buff
*skb
;
1218 struct cpl_close_listsvr_req
*req
;
1220 PDBG("%s ep %p\n", __func__
, ep
);
1221 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1223 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1226 req
= (struct cpl_close_listsvr_req
*) skb_put(skb
, sizeof(*req
));
1228 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
,
1230 req
->reply_ctrl
= cpu_to_be16(
1231 QUEUENO(ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]));
1232 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
1233 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1236 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1238 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1239 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1240 unsigned int stid
= GET_TID(rpl
);
1241 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1243 PDBG("%s ep %p\n", __func__
, ep
);
1244 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1245 ep
->com
.rpl_done
= 1;
1246 wake_up(&ep
->com
.waitq
);
1250 static void accept_cr(struct c4iw_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
,
1251 struct cpl_pass_accept_req
*req
)
1253 struct cpl_pass_accept_rpl
*rpl
;
1254 unsigned int mtu_idx
;
1259 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1260 BUG_ON(skb_cloned(skb
));
1261 skb_trim(skb
, sizeof(*rpl
));
1263 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1264 wscale
= compute_wscale(rcv_win
);
1265 opt0
= KEEP_ALIVE(1) |
1268 L2T_IDX(ep
->l2t
->idx
) |
1269 TX_CHAN(ep
->tx_chan
) |
1270 SMAC_SEL(ep
->smac_idx
) |
1272 RCV_BUFSIZ(rcv_win
>>10);
1273 opt2
= RX_CHANNEL(0) |
1274 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1276 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
1277 opt2
|= TSTAMPS_EN(1);
1278 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
1280 if (wscale
&& enable_tcp_window_scaling
)
1281 opt2
|= WND_SCALE_EN(1);
1284 INIT_TP_WR(rpl
, ep
->hwtid
);
1285 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1287 rpl
->opt0
= cpu_to_be64(opt0
);
1288 rpl
->opt2
= cpu_to_be32(opt2
);
1289 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->txq_idx
);
1290 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1295 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, __be32 peer_ip
,
1296 struct sk_buff
*skb
)
1298 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__
, dev
, hwtid
,
1300 BUG_ON(skb_cloned(skb
));
1301 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1303 release_tid(&dev
->rdev
, hwtid
, skb
);
1307 static void get_4tuple(struct cpl_pass_accept_req
*req
,
1308 __be32
*local_ip
, __be32
*peer_ip
,
1309 __be16
*local_port
, __be16
*peer_port
)
1311 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1312 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1313 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
1314 struct tcphdr
*tcp
= (struct tcphdr
*)
1315 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
1317 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
1318 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
1321 *peer_ip
= ip
->saddr
;
1322 *local_ip
= ip
->daddr
;
1323 *peer_port
= tcp
->source
;
1324 *local_port
= tcp
->dest
;
1329 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1331 struct c4iw_ep
*child_ep
, *parent_ep
;
1332 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1333 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
1334 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1335 unsigned int hwtid
= GET_TID(req
);
1336 struct dst_entry
*dst
;
1337 struct l2t_entry
*l2t
;
1339 __be32 local_ip
, peer_ip
;
1340 __be16 local_port
, peer_port
;
1341 struct net_device
*pdev
;
1342 u32 tx_chan
, smac_idx
;
1348 parent_ep
= lookup_stid(t
, stid
);
1349 PDBG("%s parent ep %p tid %u\n", __func__
, parent_ep
, hwtid
);
1351 get_4tuple(req
, &local_ip
, &peer_ip
, &local_port
, &peer_port
);
1353 if (state_read(&parent_ep
->com
) != LISTEN
) {
1354 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1359 /* Find output route */
1360 rt
= find_route(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
1361 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
1363 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1368 if (dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
1369 pdev
= ip_dev_find(&init_net
, peer_ip
);
1371 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1374 tx_chan
= cxgb4_port_chan(pdev
);
1375 smac_idx
= tx_chan
<< 1;
1376 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1377 txq_idx
= cxgb4_port_idx(pdev
) * step
;
1378 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1379 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[cxgb4_port_idx(pdev
) * step
];
1382 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1383 dst
->neighbour
->dev
, 0);
1385 tx_chan
= cxgb4_port_chan(dst
->neighbour
->dev
);
1386 smac_idx
= tx_chan
<< 1;
1387 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1388 txq_idx
= cxgb4_port_idx(dst
->neighbour
->dev
) * step
;
1389 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1390 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[
1391 cxgb4_port_idx(dst
->neighbour
->dev
) * step
];
1394 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1400 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1402 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1404 cxgb4_l2t_release(l2t
);
1408 state_set(&child_ep
->com
, CONNECTING
);
1409 child_ep
->com
.dev
= dev
;
1410 child_ep
->com
.cm_id
= NULL
;
1411 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1412 child_ep
->com
.local_addr
.sin_port
= local_port
;
1413 child_ep
->com
.local_addr
.sin_addr
.s_addr
= local_ip
;
1414 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1415 child_ep
->com
.remote_addr
.sin_port
= peer_port
;
1416 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= peer_ip
;
1417 c4iw_get_ep(&parent_ep
->com
);
1418 child_ep
->parent_ep
= parent_ep
;
1419 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
1420 child_ep
->l2t
= l2t
;
1421 child_ep
->dst
= dst
;
1422 child_ep
->hwtid
= hwtid
;
1423 child_ep
->tx_chan
= tx_chan
;
1424 child_ep
->smac_idx
= smac_idx
;
1425 child_ep
->rss_qid
= rss_qid
;
1426 child_ep
->mtu
= mtu
;
1427 child_ep
->txq_idx
= txq_idx
;
1429 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
1430 tx_chan
, smac_idx
, rss_qid
);
1432 init_timer(&child_ep
->timer
);
1433 cxgb4_insert_tid(t
, child_ep
, hwtid
);
1434 accept_cr(child_ep
, peer_ip
, skb
, req
);
1437 reject_cr(dev
, hwtid
, peer_ip
, skb
);
1442 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1445 struct cpl_pass_establish
*req
= cplhdr(skb
);
1446 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1447 unsigned int tid
= GET_TID(req
);
1449 ep
= lookup_tid(t
, tid
);
1450 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1451 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1452 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1454 set_emss(ep
, ntohs(req
->tcp_opt
));
1456 dst_confirm(ep
->dst
);
1457 state_set(&ep
->com
, MPA_REQ_WAIT
);
1459 send_flowc(ep
, skb
);
1464 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1466 struct cpl_peer_close
*hdr
= cplhdr(skb
);
1468 struct c4iw_qp_attributes attrs
;
1469 unsigned long flags
;
1473 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1474 unsigned int tid
= GET_TID(hdr
);
1475 int start_timer
= 0;
1478 ep
= lookup_tid(t
, tid
);
1479 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1480 dst_confirm(ep
->dst
);
1482 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1483 switch (ep
->com
.state
) {
1485 __state_set(&ep
->com
, CLOSING
);
1488 __state_set(&ep
->com
, CLOSING
);
1489 connect_reply_upcall(ep
, -ECONNRESET
);
1494 * We're gonna mark this puppy DEAD, but keep
1495 * the reference on it until the ULP accepts or
1496 * rejects the CR. Also wake up anyone waiting
1497 * in rdma connection migration (see c4iw_accept_cr()).
1499 __state_set(&ep
->com
, CLOSING
);
1500 ep
->com
.rpl_done
= 1;
1501 ep
->com
.rpl_err
= -ECONNRESET
;
1502 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1503 wake_up(&ep
->com
.waitq
);
1506 __state_set(&ep
->com
, CLOSING
);
1507 ep
->com
.rpl_done
= 1;
1508 ep
->com
.rpl_err
= -ECONNRESET
;
1509 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1510 wake_up(&ep
->com
.waitq
);
1514 __state_set(&ep
->com
, CLOSING
);
1516 peer_close_upcall(ep
);
1522 __state_set(&ep
->com
, MORIBUND
);
1527 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1528 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1529 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1530 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1532 close_complete_upcall(ep
);
1533 __state_set(&ep
->com
, DEAD
);
1543 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1545 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
1546 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1547 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1554 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1556 release_ep_resources(ep
);
1561 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1563 static int is_neg_adv_abort(unsigned int status
)
1565 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1566 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1569 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1571 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1573 struct cpl_abort_rpl
*rpl
;
1574 struct sk_buff
*rpl_skb
;
1575 struct c4iw_qp_attributes attrs
;
1578 unsigned long flags
;
1579 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1580 unsigned int tid
= GET_TID(req
);
1583 ep
= lookup_tid(t
, tid
);
1584 if (is_neg_adv_abort(req
->status
)) {
1585 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
1589 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1590 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
1592 switch (ep
->com
.state
) {
1600 connect_reply_upcall(ep
, -ECONNRESET
);
1603 ep
->com
.rpl_done
= 1;
1604 ep
->com
.rpl_err
= -ECONNRESET
;
1605 PDBG("waking up ep %p\n", ep
);
1606 wake_up(&ep
->com
.waitq
);
1611 * We're gonna mark this puppy DEAD, but keep
1612 * the reference on it until the ULP accepts or
1613 * rejects the CR. Also wake up anyone waiting
1614 * in rdma connection migration (see c4iw_accept_cr()).
1616 ep
->com
.rpl_done
= 1;
1617 ep
->com
.rpl_err
= -ECONNRESET
;
1618 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1619 wake_up(&ep
->com
.waitq
);
1626 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1627 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1628 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1629 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
1633 "%s - qp <- error failed!\n",
1636 peer_abort_upcall(ep
);
1641 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
1642 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1648 dst_confirm(ep
->dst
);
1649 if (ep
->com
.state
!= ABORTING
) {
1650 __state_set(&ep
->com
, DEAD
);
1653 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1655 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
1657 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
1662 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1663 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
1664 INIT_TP_WR(rpl
, ep
->hwtid
);
1665 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
1666 rpl
->cmd
= CPL_ABORT_NO_RST
;
1667 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
1672 release_ep_resources(ep
);
1676 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1679 struct c4iw_qp_attributes attrs
;
1680 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
1681 unsigned long flags
;
1683 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1684 unsigned int tid
= GET_TID(rpl
);
1687 ep
= lookup_tid(t
, tid
);
1689 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1692 /* The cm_id may be null if we failed to connect */
1693 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1694 switch (ep
->com
.state
) {
1696 __state_set(&ep
->com
, MORIBUND
);
1700 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
1701 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1702 c4iw_modify_qp(ep
->com
.qp
->rhp
,
1704 C4IW_QP_ATTR_NEXT_STATE
,
1707 close_complete_upcall(ep
);
1708 __state_set(&ep
->com
, DEAD
);
1718 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1722 release_ep_resources(ep
);
1726 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1729 struct cpl_rdma_terminate
*term
= cplhdr(skb
);
1730 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1731 unsigned int tid
= GET_TID(term
);
1733 ep
= lookup_tid(t
, tid
);
1735 if (state_read(&ep
->com
) != FPDU_MODE
)
1738 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1739 skb_pull(skb
, sizeof *term
);
1740 PDBG("%s saving %d bytes of term msg\n", __func__
, skb
->len
);
1741 skb_copy_from_linear_data(skb
, ep
->com
.qp
->attr
.terminate_buffer
,
1743 ep
->com
.qp
->attr
.terminate_msg_len
= skb
->len
;
1744 ep
->com
.qp
->attr
.is_terminate_local
= 0;
1749 * Upcall from the adapter indicating data has been transmitted.
1750 * For us its just the single MPA request or reply. We can now free
1751 * the skb holding the mpa message.
1753 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1756 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
1757 u8 credits
= hdr
->credits
;
1758 unsigned int tid
= GET_TID(hdr
);
1759 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1762 ep
= lookup_tid(t
, tid
);
1763 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1765 PDBG(KERN_ERR
"%s 0 credit ack ep %p tid %u state %u\n",
1766 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
1770 dst_confirm(ep
->dst
);
1772 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1773 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
1774 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
1775 kfree_skb(ep
->mpa_skb
);
1781 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
1784 struct c4iw_ep
*ep
= to_ep(cm_id
);
1785 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1787 if (state_read(&ep
->com
) == DEAD
) {
1788 c4iw_put_ep(&ep
->com
);
1791 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1793 abort_connection(ep
, NULL
, GFP_KERNEL
);
1795 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
1796 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1798 c4iw_put_ep(&ep
->com
);
1802 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1805 struct c4iw_qp_attributes attrs
;
1806 enum c4iw_qp_attr_mask mask
;
1807 struct c4iw_ep
*ep
= to_ep(cm_id
);
1808 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
1809 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
1811 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1812 if (state_read(&ep
->com
) == DEAD
) {
1817 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1820 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
1821 (conn_param
->ird
> c4iw_max_read_depth
)) {
1822 abort_connection(ep
, NULL
, GFP_KERNEL
);
1827 cm_id
->add_ref(cm_id
);
1828 ep
->com
.cm_id
= cm_id
;
1831 ep
->ird
= conn_param
->ird
;
1832 ep
->ord
= conn_param
->ord
;
1834 if (peer2peer
&& ep
->ird
== 0)
1837 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
1839 /* bind QP to EP and move to RTS */
1840 attrs
.mpa_attr
= ep
->mpa_attr
;
1841 attrs
.max_ird
= ep
->ird
;
1842 attrs
.max_ord
= ep
->ord
;
1843 attrs
.llp_stream_handle
= ep
;
1844 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1846 /* bind QP and TID with INIT_WR */
1847 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1848 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
1849 C4IW_QP_ATTR_MPA_ATTR
|
1850 C4IW_QP_ATTR_MAX_IRD
|
1851 C4IW_QP_ATTR_MAX_ORD
;
1853 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1854 ep
->com
.qp
, mask
, &attrs
, 1);
1857 err
= send_mpa_reply(ep
, conn_param
->private_data
,
1858 conn_param
->private_data_len
);
1862 state_set(&ep
->com
, FPDU_MODE
);
1863 established_upcall(ep
);
1864 c4iw_put_ep(&ep
->com
);
1867 ep
->com
.cm_id
= NULL
;
1869 cm_id
->rem_ref(cm_id
);
1871 c4iw_put_ep(&ep
->com
);
1875 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1878 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
1881 struct net_device
*pdev
;
1884 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
1885 (conn_param
->ird
> c4iw_max_read_depth
)) {
1889 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1891 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
1895 init_timer(&ep
->timer
);
1896 ep
->plen
= conn_param
->private_data_len
;
1898 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
1899 conn_param
->private_data
, ep
->plen
);
1900 ep
->ird
= conn_param
->ird
;
1901 ep
->ord
= conn_param
->ord
;
1903 if (peer2peer
&& ep
->ord
== 0)
1906 cm_id
->add_ref(cm_id
);
1908 ep
->com
.cm_id
= cm_id
;
1909 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
1910 BUG_ON(!ep
->com
.qp
);
1911 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
1915 * Allocate an active TID to initiate a TCP connection.
1917 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
1918 if (ep
->atid
== -1) {
1919 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
1924 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__
,
1925 ntohl(cm_id
->local_addr
.sin_addr
.s_addr
),
1926 ntohs(cm_id
->local_addr
.sin_port
),
1927 ntohl(cm_id
->remote_addr
.sin_addr
.s_addr
),
1928 ntohs(cm_id
->remote_addr
.sin_port
));
1931 rt
= find_route(dev
,
1932 cm_id
->local_addr
.sin_addr
.s_addr
,
1933 cm_id
->remote_addr
.sin_addr
.s_addr
,
1934 cm_id
->local_addr
.sin_port
,
1935 cm_id
->remote_addr
.sin_port
, 0);
1937 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
1938 err
= -EHOSTUNREACH
;
1943 /* get a l2t entry */
1944 if (ep
->dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
1945 PDBG("%s LOOPBACK\n", __func__
);
1946 pdev
= ip_dev_find(&init_net
,
1947 cm_id
->remote_addr
.sin_addr
.s_addr
);
1948 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1951 ep
->mtu
= pdev
->mtu
;
1952 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1953 ep
->smac_idx
= ep
->tx_chan
<< 1;
1954 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
1955 ep
->com
.dev
->rdev
.lldi
.nchan
;
1956 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1957 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
1958 ep
->com
.dev
->rdev
.lldi
.nchan
;
1959 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
1960 cxgb4_port_idx(pdev
) * step
];
1963 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1965 ep
->dst
->neighbour
->dev
, 0);
1966 ep
->mtu
= dst_mtu(ep
->dst
);
1967 ep
->tx_chan
= cxgb4_port_chan(ep
->dst
->neighbour
->dev
);
1968 ep
->smac_idx
= ep
->tx_chan
<< 1;
1969 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
1970 ep
->com
.dev
->rdev
.lldi
.nchan
;
1971 ep
->txq_idx
= cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
;
1972 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
1973 ep
->com
.dev
->rdev
.lldi
.nchan
;
1974 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
1975 cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
];
1978 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
1983 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1984 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
1987 state_set(&ep
->com
, CONNECTING
);
1989 ep
->com
.local_addr
= cm_id
->local_addr
;
1990 ep
->com
.remote_addr
= cm_id
->remote_addr
;
1992 /* send connect request to rnic */
1993 err
= send_connect(ep
);
1997 cxgb4_l2t_release(ep
->l2t
);
1999 dst_release(ep
->dst
);
2001 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2003 cm_id
->rem_ref(cm_id
);
2004 c4iw_put_ep(&ep
->com
);
2009 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
2012 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2013 struct c4iw_listen_ep
*ep
;
2018 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2020 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2024 PDBG("%s ep %p\n", __func__
, ep
);
2025 cm_id
->add_ref(cm_id
);
2026 ep
->com
.cm_id
= cm_id
;
2028 ep
->backlog
= backlog
;
2029 ep
->com
.local_addr
= cm_id
->local_addr
;
2032 * Allocate a server TID.
2034 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2035 if (ep
->stid
== -1) {
2036 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
2041 state_set(&ep
->com
, LISTEN
);
2042 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2043 ep
->com
.local_addr
.sin_addr
.s_addr
,
2044 ep
->com
.local_addr
.sin_port
,
2045 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2049 /* wait for pass_open_rpl */
2050 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2051 err
= ep
->com
.rpl_err
;
2053 cm_id
->provider_data
= ep
;
2057 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2059 cm_id
->rem_ref(cm_id
);
2060 c4iw_put_ep(&ep
->com
);
2066 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2069 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2071 PDBG("%s ep %p\n", __func__
, ep
);
2074 state_set(&ep
->com
, DEAD
);
2075 ep
->com
.rpl_done
= 0;
2076 ep
->com
.rpl_err
= 0;
2077 err
= listen_stop(ep
);
2080 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2081 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2083 err
= ep
->com
.rpl_err
;
2084 cm_id
->rem_ref(cm_id
);
2085 c4iw_put_ep(&ep
->com
);
2089 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
2092 unsigned long flags
;
2095 struct c4iw_rdev
*rdev
;
2096 int start_timer
= 0;
2099 spin_lock_irqsave(&ep
->com
.lock
, flags
);
2101 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2102 states
[ep
->com
.state
], abrupt
);
2104 rdev
= &ep
->com
.dev
->rdev
;
2105 if (c4iw_fatal_error(rdev
)) {
2107 close_complete_upcall(ep
);
2108 ep
->com
.state
= DEAD
;
2110 switch (ep
->com
.state
) {
2118 ep
->com
.state
= ABORTING
;
2120 ep
->com
.state
= CLOSING
;
2123 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2126 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2130 ep
->com
.state
= ABORTING
;
2132 ep
->com
.state
= MORIBUND
;
2138 PDBG("%s ignoring disconnect ep %p state %u\n",
2139 __func__
, ep
, ep
->com
.state
);
2146 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
2153 ret
= abort_connection(ep
, NULL
, gfp
);
2155 ret
= send_halfclose(ep
, gfp
);
2160 release_ep_resources(ep
);
2165 * These are the real handlers that are called from a
2168 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
2169 [CPL_ACT_ESTABLISH
] = act_establish
,
2170 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
2171 [CPL_RX_DATA
] = rx_data
,
2172 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
2173 [CPL_ABORT_RPL
] = abort_rpl
,
2174 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
2175 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
2176 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
2177 [CPL_PASS_ESTABLISH
] = pass_establish
,
2178 [CPL_PEER_CLOSE
] = peer_close
,
2179 [CPL_ABORT_REQ_RSS
] = peer_abort
,
2180 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
2181 [CPL_RDMA_TERMINATE
] = terminate
,
2182 [CPL_FW4_ACK
] = fw4_ack
2185 static void process_timeout(struct c4iw_ep
*ep
)
2187 struct c4iw_qp_attributes attrs
;
2190 spin_lock_irq(&ep
->com
.lock
);
2191 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
2193 switch (ep
->com
.state
) {
2195 __state_set(&ep
->com
, ABORTING
);
2196 connect_reply_upcall(ep
, -ETIMEDOUT
);
2199 __state_set(&ep
->com
, ABORTING
);
2203 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2204 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2205 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2206 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2209 __state_set(&ep
->com
, ABORTING
);
2212 printk(KERN_ERR
"%s unexpected state ep %p tid %u state %u\n",
2213 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
2217 spin_unlock_irq(&ep
->com
.lock
);
2219 abort_connection(ep
, NULL
, GFP_KERNEL
);
2220 c4iw_put_ep(&ep
->com
);
2223 static void process_timedout_eps(void)
2227 spin_lock_irq(&timeout_lock
);
2228 while (!list_empty(&timeout_list
)) {
2229 struct list_head
*tmp
;
2231 tmp
= timeout_list
.next
;
2233 spin_unlock_irq(&timeout_lock
);
2234 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
2235 process_timeout(ep
);
2236 spin_lock_irq(&timeout_lock
);
2238 spin_unlock_irq(&timeout_lock
);
2241 static void process_work(struct work_struct
*work
)
2243 struct sk_buff
*skb
= NULL
;
2244 struct c4iw_dev
*dev
;
2245 struct cpl_act_establish
*rpl
= cplhdr(skb
);
2246 unsigned int opcode
;
2249 while ((skb
= skb_dequeue(&rxq
))) {
2251 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
2252 opcode
= rpl
->ot
.opcode
;
2254 BUG_ON(!work_handlers
[opcode
]);
2255 ret
= work_handlers
[opcode
](dev
, skb
);
2259 process_timedout_eps();
2262 static DECLARE_WORK(skb_work
, process_work
);
2264 static void ep_timeout(unsigned long arg
)
2266 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
2268 spin_lock(&timeout_lock
);
2269 list_add_tail(&ep
->entry
, &timeout_list
);
2270 spin_unlock(&timeout_lock
);
2271 queue_work(workq
, &skb_work
);
2275 * All the CM events are handled on a work queue to have a safe context.
2277 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2281 * Save dev in the skb->cb area.
2283 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
2286 * Queue the skb and schedule the worker thread.
2288 skb_queue_tail(&rxq
, skb
);
2289 queue_work(workq
, &skb_work
);
2293 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2295 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
2297 if (rpl
->status
!= CPL_ERR_NONE
) {
2298 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
2299 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
2304 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2306 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2307 struct c4iw_wr_wait
*wr_waitp
;
2310 PDBG("%s type %u\n", __func__
, rpl
->type
);
2312 switch (rpl
->type
) {
2314 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
2315 wr_waitp
= (__force
struct c4iw_wr_wait
*)rpl
->data
[1];
2316 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
2318 wr_waitp
->ret
= ret
;
2320 wake_up(&wr_waitp
->wait
);
2324 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
2327 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
2335 * Most upcalls from the T4 Core go to sched() to
2336 * schedule the processing on a work queue.
2338 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
2339 [CPL_ACT_ESTABLISH
] = sched
,
2340 [CPL_ACT_OPEN_RPL
] = sched
,
2341 [CPL_RX_DATA
] = sched
,
2342 [CPL_ABORT_RPL_RSS
] = sched
,
2343 [CPL_ABORT_RPL
] = sched
,
2344 [CPL_PASS_OPEN_RPL
] = sched
,
2345 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
2346 [CPL_PASS_ACCEPT_REQ
] = sched
,
2347 [CPL_PASS_ESTABLISH
] = sched
,
2348 [CPL_PEER_CLOSE
] = sched
,
2349 [CPL_CLOSE_CON_RPL
] = sched
,
2350 [CPL_ABORT_REQ_RSS
] = sched
,
2351 [CPL_RDMA_TERMINATE
] = sched
,
2352 [CPL_FW4_ACK
] = sched
,
2353 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
2354 [CPL_FW6_MSG
] = fw6_msg
2357 int __init
c4iw_cm_init(void)
2359 spin_lock_init(&timeout_lock
);
2360 skb_queue_head_init(&rxq
);
2362 workq
= create_singlethread_workqueue("iw_cxgb4");
2369 void __exit
c4iw_cm_term(void)
2371 WARN_ON(!list_empty(&timeout_list
));
2372 flush_workqueue(workq
);
2373 destroy_workqueue(workq
);