2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
42 #include <net/neighbour.h>
43 #include <net/netevent.h>
44 #include <net/route.h>
48 static char *states
[] = {
64 static int enable_tcp_timestamps
;
65 module_param(enable_tcp_timestamps
, int, 0644);
66 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
68 static int enable_tcp_sack
;
69 module_param(enable_tcp_sack
, int, 0644);
70 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
72 static int enable_tcp_window_scaling
= 1;
73 module_param(enable_tcp_window_scaling
, int, 0644);
74 MODULE_PARM_DESC(enable_tcp_window_scaling
,
75 "Enable tcp window scaling (default=1)");
78 module_param(c4iw_debug
, int, 0644);
79 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
82 module_param(peer2peer
, int, 0644);
83 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
85 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
86 module_param(p2p_type
, int, 0644);
87 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
88 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
90 static int ep_timeout_secs
= 60;
91 module_param(ep_timeout_secs
, int, 0644);
92 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
93 "in seconds (default=60)");
95 static int mpa_rev
= 1;
96 module_param(mpa_rev
, int, 0644);
97 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
98 "1 is spec compliant. (default=1)");
100 static int markers_enabled
;
101 module_param(markers_enabled
, int, 0644);
102 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
104 static int crc_enabled
= 1;
105 module_param(crc_enabled
, int, 0644);
106 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
108 static int rcv_win
= 256 * 1024;
109 module_param(rcv_win
, int, 0644);
110 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
112 static int snd_win
= 32 * 1024;
113 module_param(snd_win
, int, 0644);
114 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=32KB)");
116 static void process_work(struct work_struct
*work
);
117 static struct workqueue_struct
*workq
;
118 static DECLARE_WORK(skb_work
, process_work
);
120 static struct sk_buff_head rxq
;
121 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
];
122 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
];
124 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
125 static void ep_timeout(unsigned long arg
);
126 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
128 static void start_ep_timer(struct c4iw_ep
*ep
)
130 PDBG("%s ep %p\n", __func__
, ep
);
131 if (timer_pending(&ep
->timer
)) {
132 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
133 del_timer_sync(&ep
->timer
);
135 c4iw_get_ep(&ep
->com
);
136 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
137 ep
->timer
.data
= (unsigned long)ep
;
138 ep
->timer
.function
= ep_timeout
;
139 add_timer(&ep
->timer
);
142 static void stop_ep_timer(struct c4iw_ep
*ep
)
144 PDBG("%s ep %p\n", __func__
, ep
);
145 if (!timer_pending(&ep
->timer
)) {
146 printk(KERN_ERR
"%s timer stopped when its not running! "
147 "ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
151 del_timer_sync(&ep
->timer
);
152 c4iw_put_ep(&ep
->com
);
155 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
156 struct l2t_entry
*l2e
)
160 if (c4iw_fatal_error(rdev
)) {
162 PDBG("%s - device in error state - dropping\n", __func__
);
165 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
171 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
175 if (c4iw_fatal_error(rdev
)) {
177 PDBG("%s - device in error state - dropping\n", __func__
);
180 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
186 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
188 struct cpl_tid_release
*req
;
190 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
193 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
194 INIT_TP_WR(req
, hwtid
);
195 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
196 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
197 c4iw_ofld_send(rdev
, skb
);
201 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
203 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
205 if (GET_TCPOPT_TSTAMP(opt
))
209 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
213 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
216 enum c4iw_ep_state state
;
218 spin_lock_irqsave(&epc
->lock
, flags
);
220 spin_unlock_irqrestore(&epc
->lock
, flags
);
224 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
229 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
233 spin_lock_irqsave(&epc
->lock
, flags
);
234 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
235 __state_set(epc
, new);
236 spin_unlock_irqrestore(&epc
->lock
, flags
);
240 static void *alloc_ep(int size
, gfp_t gfp
)
242 struct c4iw_ep_common
*epc
;
244 epc
= kzalloc(size
, gfp
);
246 kref_init(&epc
->kref
);
247 spin_lock_init(&epc
->lock
);
248 init_waitqueue_head(&epc
->waitq
);
250 PDBG("%s alloc ep %p\n", __func__
, epc
);
254 void _c4iw_free_ep(struct kref
*kref
)
258 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
259 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
260 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
261 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
262 dst_release(ep
->dst
);
263 cxgb4_l2t_release(ep
->l2t
);
268 static void release_ep_resources(struct c4iw_ep
*ep
)
270 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
271 c4iw_put_ep(&ep
->com
);
274 static void process_work(struct work_struct
*work
)
276 struct sk_buff
*skb
= NULL
;
277 struct c4iw_dev
*dev
;
278 struct cpl_act_establish
*rpl
= cplhdr(skb
);
282 while ((skb
= skb_dequeue(&rxq
))) {
284 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
285 opcode
= rpl
->ot
.opcode
;
287 BUG_ON(!work_handlers
[opcode
]);
288 ret
= work_handlers
[opcode
](dev
, skb
);
294 static int status2errno(int status
)
299 case CPL_ERR_CONN_RESET
:
301 case CPL_ERR_ARP_MISS
:
302 return -EHOSTUNREACH
;
303 case CPL_ERR_CONN_TIMEDOUT
:
305 case CPL_ERR_TCAM_FULL
:
307 case CPL_ERR_CONN_EXIST
:
315 * Try and reuse skbs already allocated...
317 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
319 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
322 skb_reset_transport_header(skb
);
324 skb
= alloc_skb(len
, gfp
);
329 static struct rtable
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
330 __be32 peer_ip
, __be16 local_port
,
331 __be16 peer_port
, u8 tos
)
342 .proto
= IPPROTO_TCP
,
350 if (ip_route_output_flow(&init_net
, &rt
, &fl
, NULL
, 0))
355 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
357 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
362 * Handle an ARP failure for an active open.
364 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
366 printk(KERN_ERR MOD
"ARP failure duing connect\n");
371 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
374 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
376 struct c4iw_rdev
*rdev
= handle
;
377 struct cpl_abort_req
*req
= cplhdr(skb
);
379 PDBG("%s rdev %p\n", __func__
, rdev
);
380 req
->cmd
= CPL_ABORT_NO_RST
;
381 c4iw_ofld_send(rdev
, skb
);
384 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
386 unsigned int flowclen
= 80;
387 struct fw_flowc_wr
*flowc
;
390 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
391 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
393 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
394 FW_FLOWC_WR_NPARAMS(8));
395 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
396 16)) | FW_WR_FLOWID(ep
->hwtid
));
398 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
399 flowc
->mnemval
[0].val
= cpu_to_be32(0);
400 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
401 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
402 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
403 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
404 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
405 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
406 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
407 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
408 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
409 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
410 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
411 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
412 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
413 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
414 /* Pad WR to 16 byte boundary */
415 flowc
->mnemval
[8].mnemonic
= 0;
416 flowc
->mnemval
[8].val
= 0;
417 for (i
= 0; i
< 9; i
++) {
418 flowc
->mnemval
[i
].r4
[0] = 0;
419 flowc
->mnemval
[i
].r4
[1] = 0;
420 flowc
->mnemval
[i
].r4
[2] = 0;
423 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
424 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
427 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
429 struct cpl_close_con_req
*req
;
431 int wrlen
= roundup(sizeof *req
, 16);
433 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
434 skb
= get_skb(NULL
, wrlen
, gfp
);
436 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
439 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
440 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
441 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
442 memset(req
, 0, wrlen
);
443 INIT_TP_WR(req
, ep
->hwtid
);
444 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
446 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
449 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
451 struct cpl_abort_req
*req
;
452 int wrlen
= roundup(sizeof *req
, 16);
454 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
455 skb
= get_skb(skb
, wrlen
, gfp
);
457 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
461 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
462 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
463 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
464 memset(req
, 0, wrlen
);
465 INIT_TP_WR(req
, ep
->hwtid
);
466 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
467 req
->cmd
= CPL_ABORT_SEND_RST
;
468 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
471 static int send_connect(struct c4iw_ep
*ep
)
473 struct cpl_act_open_req
*req
;
477 unsigned int mtu_idx
;
479 int wrlen
= roundup(sizeof *req
, 16);
481 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
483 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
485 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
489 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->txq_idx
);
491 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
492 wscale
= compute_wscale(rcv_win
);
493 opt0
= KEEP_ALIVE(1) |
496 L2T_IDX(ep
->l2t
->idx
) |
497 TX_CHAN(ep
->tx_chan
) |
498 SMAC_SEL(ep
->smac_idx
) |
500 RCV_BUFSIZ(rcv_win
>>10);
501 opt2
= RX_CHANNEL(0) |
502 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
503 if (enable_tcp_timestamps
)
504 opt2
|= TSTAMPS_EN(1);
507 if (wscale
&& enable_tcp_window_scaling
)
508 opt2
|= WND_SCALE_EN(1);
509 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
511 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
513 OPCODE_TID(req
) = cpu_to_be32(
514 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ((ep
->rss_qid
<<14)|ep
->atid
)));
515 req
->local_port
= ep
->com
.local_addr
.sin_port
;
516 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
517 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
518 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
519 req
->opt0
= cpu_to_be64(opt0
);
521 req
->opt2
= cpu_to_be32(opt2
);
522 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
525 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
528 struct fw_ofld_tx_data_wr
*req
;
529 struct mpa_message
*mpa
;
531 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
533 BUG_ON(skb_cloned(skb
));
535 mpalen
= sizeof(*mpa
) + ep
->plen
;
536 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
537 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
539 connect_reply_upcall(ep
, -ENOMEM
);
542 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
544 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
545 memset(req
, 0, wrlen
);
546 req
->op_to_immdlen
= cpu_to_be32(
547 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
549 FW_WR_IMMDLEN(mpalen
));
550 req
->flowid_len16
= cpu_to_be32(
551 FW_WR_FLOWID(ep
->hwtid
) |
552 FW_WR_LEN16(wrlen
>> 4));
553 req
->plen
= cpu_to_be32(mpalen
);
554 req
->tunnel_to_proxy
= cpu_to_be32(
555 FW_OFLD_TX_DATA_WR_FLUSH(1) |
556 FW_OFLD_TX_DATA_WR_SHOVE(1));
558 mpa
= (struct mpa_message
*)(req
+ 1);
559 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
560 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
561 (markers_enabled
? MPA_MARKERS
: 0);
562 mpa
->private_data_size
= htons(ep
->plen
);
563 mpa
->revision
= mpa_rev
;
566 memcpy(mpa
->private_data
, ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
569 * Reference the mpa skb. This ensures the data area
570 * will remain in memory until the hw acks the tx.
571 * Function fw4_ack() will deref it.
574 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
577 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
579 state_set(&ep
->com
, MPA_REQ_SENT
);
580 ep
->mpa_attr
.initiator
= 1;
584 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
587 struct fw_ofld_tx_data_wr
*req
;
588 struct mpa_message
*mpa
;
591 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
593 mpalen
= sizeof(*mpa
) + plen
;
594 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
596 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
598 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
601 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
603 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
604 memset(req
, 0, wrlen
);
605 req
->op_to_immdlen
= cpu_to_be32(
606 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
608 FW_WR_IMMDLEN(mpalen
));
609 req
->flowid_len16
= cpu_to_be32(
610 FW_WR_FLOWID(ep
->hwtid
) |
611 FW_WR_LEN16(wrlen
>> 4));
612 req
->plen
= cpu_to_be32(mpalen
);
613 req
->tunnel_to_proxy
= cpu_to_be32(
614 FW_OFLD_TX_DATA_WR_FLUSH(1) |
615 FW_OFLD_TX_DATA_WR_SHOVE(1));
617 mpa
= (struct mpa_message
*)(req
+ 1);
618 memset(mpa
, 0, sizeof(*mpa
));
619 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
620 mpa
->flags
= MPA_REJECT
;
621 mpa
->revision
= mpa_rev
;
622 mpa
->private_data_size
= htons(plen
);
624 memcpy(mpa
->private_data
, pdata
, plen
);
627 * Reference the mpa skb again. This ensures the data area
628 * will remain in memory until the hw acks the tx.
629 * Function fw4_ack() will deref it.
632 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
633 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
636 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
639 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
642 struct fw_ofld_tx_data_wr
*req
;
643 struct mpa_message
*mpa
;
646 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
648 mpalen
= sizeof(*mpa
) + plen
;
649 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
651 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
653 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
656 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
658 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
659 memset(req
, 0, wrlen
);
660 req
->op_to_immdlen
= cpu_to_be32(
661 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
663 FW_WR_IMMDLEN(mpalen
));
664 req
->flowid_len16
= cpu_to_be32(
665 FW_WR_FLOWID(ep
->hwtid
) |
666 FW_WR_LEN16(wrlen
>> 4));
667 req
->plen
= cpu_to_be32(mpalen
);
668 req
->tunnel_to_proxy
= cpu_to_be32(
669 FW_OFLD_TX_DATA_WR_FLUSH(1) |
670 FW_OFLD_TX_DATA_WR_SHOVE(1));
672 mpa
= (struct mpa_message
*)(req
+ 1);
673 memset(mpa
, 0, sizeof(*mpa
));
674 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
675 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
676 (markers_enabled
? MPA_MARKERS
: 0);
677 mpa
->revision
= mpa_rev
;
678 mpa
->private_data_size
= htons(plen
);
680 memcpy(mpa
->private_data
, pdata
, plen
);
683 * Reference the mpa skb. This ensures the data area
684 * will remain in memory until the hw acks the tx.
685 * Function fw4_ack() will deref it.
688 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
690 state_set(&ep
->com
, MPA_REP_SENT
);
691 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
694 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
697 struct cpl_act_establish
*req
= cplhdr(skb
);
698 unsigned int tid
= GET_TID(req
);
699 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
700 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
702 ep
= lookup_atid(t
, atid
);
704 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
705 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
707 dst_confirm(ep
->dst
);
709 /* setup the hwtid for this connection */
711 cxgb4_insert_tid(t
, ep
, tid
);
713 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
714 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
716 set_emss(ep
, ntohs(req
->tcp_opt
));
718 /* dealloc the atid */
719 cxgb4_free_atid(t
, atid
);
721 /* start MPA negotiation */
722 send_flowc(ep
, NULL
);
723 send_mpa_req(ep
, skb
);
728 static void close_complete_upcall(struct c4iw_ep
*ep
)
730 struct iw_cm_event event
;
732 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
733 memset(&event
, 0, sizeof(event
));
734 event
.event
= IW_CM_EVENT_CLOSE
;
736 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
737 ep
, ep
->com
.cm_id
, ep
->hwtid
);
738 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
739 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
740 ep
->com
.cm_id
= NULL
;
745 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
747 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
748 close_complete_upcall(ep
);
749 state_set(&ep
->com
, ABORTING
);
750 return send_abort(ep
, skb
, gfp
);
753 static void peer_close_upcall(struct c4iw_ep
*ep
)
755 struct iw_cm_event event
;
757 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
758 memset(&event
, 0, sizeof(event
));
759 event
.event
= IW_CM_EVENT_DISCONNECT
;
761 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
762 ep
, ep
->com
.cm_id
, ep
->hwtid
);
763 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
767 static void peer_abort_upcall(struct c4iw_ep
*ep
)
769 struct iw_cm_event event
;
771 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
772 memset(&event
, 0, sizeof(event
));
773 event
.event
= IW_CM_EVENT_CLOSE
;
774 event
.status
= -ECONNRESET
;
776 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
777 ep
->com
.cm_id
, ep
->hwtid
);
778 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
779 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
780 ep
->com
.cm_id
= NULL
;
785 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
787 struct iw_cm_event event
;
789 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
790 memset(&event
, 0, sizeof(event
));
791 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
792 event
.status
= status
;
793 event
.local_addr
= ep
->com
.local_addr
;
794 event
.remote_addr
= ep
->com
.remote_addr
;
796 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
797 event
.private_data_len
= ep
->plen
;
798 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
801 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
803 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
806 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
807 ep
->com
.cm_id
= NULL
;
812 static void connect_request_upcall(struct c4iw_ep
*ep
)
814 struct iw_cm_event event
;
816 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
817 memset(&event
, 0, sizeof(event
));
818 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
819 event
.local_addr
= ep
->com
.local_addr
;
820 event
.remote_addr
= ep
->com
.remote_addr
;
821 event
.private_data_len
= ep
->plen
;
822 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
823 event
.provider_data
= ep
;
824 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
825 c4iw_get_ep(&ep
->com
);
826 ep
->parent_ep
->com
.cm_id
->event_handler(
827 ep
->parent_ep
->com
.cm_id
,
830 c4iw_put_ep(&ep
->parent_ep
->com
);
831 ep
->parent_ep
= NULL
;
834 static void established_upcall(struct c4iw_ep
*ep
)
836 struct iw_cm_event event
;
838 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
839 memset(&event
, 0, sizeof(event
));
840 event
.event
= IW_CM_EVENT_ESTABLISHED
;
842 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
843 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
847 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
849 struct cpl_rx_data_ack
*req
;
851 int wrlen
= roundup(sizeof *req
, 16);
853 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
854 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
856 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
860 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
861 memset(req
, 0, wrlen
);
862 INIT_TP_WR(req
, ep
->hwtid
);
863 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
865 req
->credit_dack
= cpu_to_be32(credits
);
866 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->txq_idx
);
867 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
871 static void process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
873 struct mpa_message
*mpa
;
875 struct c4iw_qp_attributes attrs
;
876 enum c4iw_qp_attr_mask mask
;
879 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
882 * Stop mpa timer. If it expired, then the state has
883 * changed and we bail since ep_timeout already aborted
887 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
891 * If we get more than the supported amount of private data
892 * then we must fail this connection.
894 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
900 * copy the new data into our accumulation buffer.
902 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
904 ep
->mpa_pkt_len
+= skb
->len
;
907 * if we don't even have the mpa message, then bail.
909 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
911 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
913 /* Validate MPA header. */
914 if (mpa
->revision
!= mpa_rev
) {
918 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
923 plen
= ntohs(mpa
->private_data_size
);
926 * Fail if there's too much private data.
928 if (plen
> MPA_MAX_PRIVATE_DATA
) {
934 * If plen does not account for pkt size
936 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
941 ep
->plen
= (u8
) plen
;
944 * If we don't have all the pdata yet, then bail.
945 * We'll continue process when more data arrives.
947 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
950 if (mpa
->flags
& MPA_REJECT
) {
956 * If we get here we have accumulated the entire mpa
957 * start reply message including private data. And
958 * the MPA header is valid.
960 state_set(&ep
->com
, FPDU_MODE
);
961 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
962 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
963 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
964 ep
->mpa_attr
.version
= mpa_rev
;
965 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
966 FW_RI_INIT_P2PTYPE_DISABLED
;
967 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
968 "xmit_marker_enabled=%d, version=%d\n", __func__
,
969 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
970 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
972 attrs
.mpa_attr
= ep
->mpa_attr
;
973 attrs
.max_ird
= ep
->ird
;
974 attrs
.max_ord
= ep
->ord
;
975 attrs
.llp_stream_handle
= ep
;
976 attrs
.next_state
= C4IW_QP_STATE_RTS
;
978 mask
= C4IW_QP_ATTR_NEXT_STATE
|
979 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
980 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
982 /* bind QP and TID with INIT_WR */
983 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
984 ep
->com
.qp
, mask
, &attrs
, 1);
989 abort_connection(ep
, skb
, GFP_KERNEL
);
991 connect_reply_upcall(ep
, err
);
995 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
997 struct mpa_message
*mpa
;
1000 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1002 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
1006 * If we get more than the supported amount of private data
1007 * then we must fail this connection.
1009 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1011 abort_connection(ep
, skb
, GFP_KERNEL
);
1015 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1018 * Copy the new data into our accumulation buffer.
1020 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1022 ep
->mpa_pkt_len
+= skb
->len
;
1025 * If we don't even have the mpa message, then bail.
1026 * We'll continue process when more data arrives.
1028 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1031 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1033 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1036 * Validate MPA Header.
1038 if (mpa
->revision
!= mpa_rev
) {
1039 abort_connection(ep
, skb
, GFP_KERNEL
);
1043 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1044 abort_connection(ep
, skb
, GFP_KERNEL
);
1048 plen
= ntohs(mpa
->private_data_size
);
1051 * Fail if there's too much private data.
1053 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1054 abort_connection(ep
, skb
, GFP_KERNEL
);
1059 * If plen does not account for pkt size
1061 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1062 abort_connection(ep
, skb
, GFP_KERNEL
);
1065 ep
->plen
= (u8
) plen
;
1068 * If we don't have all the pdata yet, then bail.
1070 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1074 * If we get here we have accumulated the entire mpa
1075 * start reply message including private data.
1077 ep
->mpa_attr
.initiator
= 0;
1078 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1079 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1080 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1081 ep
->mpa_attr
.version
= mpa_rev
;
1082 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
1083 FW_RI_INIT_P2PTYPE_DISABLED
;
1084 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1085 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1086 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1087 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1088 ep
->mpa_attr
.p2p_type
);
1090 state_set(&ep
->com
, MPA_REQ_RCVD
);
1093 connect_request_upcall(ep
);
1097 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1100 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1101 unsigned int dlen
= ntohs(hdr
->len
);
1102 unsigned int tid
= GET_TID(hdr
);
1103 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1105 ep
= lookup_tid(t
, tid
);
1106 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1107 skb_pull(skb
, sizeof(*hdr
));
1108 skb_trim(skb
, dlen
);
1110 ep
->rcv_seq
+= dlen
;
1111 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1113 /* update RX credits */
1114 update_rx_credits(ep
, dlen
);
1116 switch (state_read(&ep
->com
)) {
1118 process_mpa_reply(ep
, skb
);
1121 process_mpa_request(ep
, skb
);
1126 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1127 " ep %p state %d tid %u\n",
1128 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1131 * The ep will timeout and inform the ULP of the failure.
1139 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1142 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1143 unsigned long flags
;
1145 unsigned int tid
= GET_TID(rpl
);
1146 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1148 ep
= lookup_tid(t
, tid
);
1149 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1151 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1152 switch (ep
->com
.state
) {
1154 __state_set(&ep
->com
, DEAD
);
1158 printk(KERN_ERR
"%s ep %p state %d\n",
1159 __func__
, ep
, ep
->com
.state
);
1162 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1165 release_ep_resources(ep
);
1170 * Return whether a failed active open has allocated a TID
1172 static inline int act_open_has_tid(int status
)
1174 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1175 status
!= CPL_ERR_ARP_MISS
;
1178 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1181 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1182 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1183 ntohl(rpl
->atid_status
)));
1184 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1185 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1187 ep
= lookup_atid(t
, atid
);
1189 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1190 status
, status2errno(status
));
1192 if (status
== CPL_ERR_RTX_NEG_ADVICE
) {
1193 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1198 connect_reply_upcall(ep
, status2errno(status
));
1199 state_set(&ep
->com
, DEAD
);
1201 if (status
&& act_open_has_tid(status
))
1202 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1204 cxgb4_free_atid(t
, atid
);
1205 dst_release(ep
->dst
);
1206 cxgb4_l2t_release(ep
->l2t
);
1207 c4iw_put_ep(&ep
->com
);
1212 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1214 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1215 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1216 unsigned int stid
= GET_TID(rpl
);
1217 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1220 printk(KERN_ERR MOD
"stid %d lookup failure!\n", stid
);
1223 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1224 rpl
->status
, status2errno(rpl
->status
));
1225 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1226 ep
->com
.rpl_done
= 1;
1227 wake_up(&ep
->com
.waitq
);
1232 static int listen_stop(struct c4iw_listen_ep
*ep
)
1234 struct sk_buff
*skb
;
1235 struct cpl_close_listsvr_req
*req
;
1237 PDBG("%s ep %p\n", __func__
, ep
);
1238 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1240 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1243 req
= (struct cpl_close_listsvr_req
*) skb_put(skb
, sizeof(*req
));
1245 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
,
1247 req
->reply_ctrl
= cpu_to_be16(
1248 QUEUENO(ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]));
1249 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
1250 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1253 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1255 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1256 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1257 unsigned int stid
= GET_TID(rpl
);
1258 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1260 PDBG("%s ep %p\n", __func__
, ep
);
1261 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1262 ep
->com
.rpl_done
= 1;
1263 wake_up(&ep
->com
.waitq
);
1267 static void accept_cr(struct c4iw_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
,
1268 struct cpl_pass_accept_req
*req
)
1270 struct cpl_pass_accept_rpl
*rpl
;
1271 unsigned int mtu_idx
;
1276 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1277 BUG_ON(skb_cloned(skb
));
1278 skb_trim(skb
, sizeof(*rpl
));
1280 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1281 wscale
= compute_wscale(rcv_win
);
1282 opt0
= KEEP_ALIVE(1) |
1285 L2T_IDX(ep
->l2t
->idx
) |
1286 TX_CHAN(ep
->tx_chan
) |
1287 SMAC_SEL(ep
->smac_idx
) |
1289 RCV_BUFSIZ(rcv_win
>>10);
1290 opt2
= RX_CHANNEL(0) |
1291 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1293 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
1294 opt2
|= TSTAMPS_EN(1);
1295 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
1297 if (wscale
&& enable_tcp_window_scaling
)
1298 opt2
|= WND_SCALE_EN(1);
1301 INIT_TP_WR(rpl
, ep
->hwtid
);
1302 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1304 rpl
->opt0
= cpu_to_be64(opt0
);
1305 rpl
->opt2
= cpu_to_be32(opt2
);
1306 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->txq_idx
);
1307 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1312 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, __be32 peer_ip
,
1313 struct sk_buff
*skb
)
1315 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__
, dev
, hwtid
,
1317 BUG_ON(skb_cloned(skb
));
1318 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1320 release_tid(&dev
->rdev
, hwtid
, skb
);
1324 static void get_4tuple(struct cpl_pass_accept_req
*req
,
1325 __be32
*local_ip
, __be32
*peer_ip
,
1326 __be16
*local_port
, __be16
*peer_port
)
1328 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1329 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1330 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
1331 struct tcphdr
*tcp
= (struct tcphdr
*)
1332 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
1334 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
1335 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
1338 *peer_ip
= ip
->saddr
;
1339 *local_ip
= ip
->daddr
;
1340 *peer_port
= tcp
->source
;
1341 *local_port
= tcp
->dest
;
1346 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1348 struct c4iw_ep
*child_ep
, *parent_ep
;
1349 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1350 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
1351 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1352 unsigned int hwtid
= GET_TID(req
);
1353 struct dst_entry
*dst
;
1354 struct l2t_entry
*l2t
;
1356 __be32 local_ip
, peer_ip
;
1357 __be16 local_port
, peer_port
;
1358 struct net_device
*pdev
;
1359 u32 tx_chan
, smac_idx
;
1365 parent_ep
= lookup_stid(t
, stid
);
1366 PDBG("%s parent ep %p tid %u\n", __func__
, parent_ep
, hwtid
);
1368 get_4tuple(req
, &local_ip
, &peer_ip
, &local_port
, &peer_port
);
1370 if (state_read(&parent_ep
->com
) != LISTEN
) {
1371 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1376 /* Find output route */
1377 rt
= find_route(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
1378 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
1380 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1385 if (dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
1386 pdev
= ip_dev_find(&init_net
, peer_ip
);
1388 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1391 tx_chan
= cxgb4_port_chan(pdev
);
1392 smac_idx
= tx_chan
<< 1;
1393 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1394 txq_idx
= cxgb4_port_idx(pdev
) * step
;
1395 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1396 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[cxgb4_port_idx(pdev
) * step
];
1399 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1400 dst
->neighbour
->dev
, 0);
1402 tx_chan
= cxgb4_port_chan(dst
->neighbour
->dev
);
1403 smac_idx
= tx_chan
<< 1;
1404 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1405 txq_idx
= cxgb4_port_idx(dst
->neighbour
->dev
) * step
;
1406 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1407 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[
1408 cxgb4_port_idx(dst
->neighbour
->dev
) * step
];
1411 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1417 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1419 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1421 cxgb4_l2t_release(l2t
);
1425 state_set(&child_ep
->com
, CONNECTING
);
1426 child_ep
->com
.dev
= dev
;
1427 child_ep
->com
.cm_id
= NULL
;
1428 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1429 child_ep
->com
.local_addr
.sin_port
= local_port
;
1430 child_ep
->com
.local_addr
.sin_addr
.s_addr
= local_ip
;
1431 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1432 child_ep
->com
.remote_addr
.sin_port
= peer_port
;
1433 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= peer_ip
;
1434 c4iw_get_ep(&parent_ep
->com
);
1435 child_ep
->parent_ep
= parent_ep
;
1436 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
1437 child_ep
->l2t
= l2t
;
1438 child_ep
->dst
= dst
;
1439 child_ep
->hwtid
= hwtid
;
1440 child_ep
->tx_chan
= tx_chan
;
1441 child_ep
->smac_idx
= smac_idx
;
1442 child_ep
->rss_qid
= rss_qid
;
1443 child_ep
->mtu
= mtu
;
1444 child_ep
->txq_idx
= txq_idx
;
1446 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
1447 tx_chan
, smac_idx
, rss_qid
);
1449 init_timer(&child_ep
->timer
);
1450 cxgb4_insert_tid(t
, child_ep
, hwtid
);
1451 accept_cr(child_ep
, peer_ip
, skb
, req
);
1454 reject_cr(dev
, hwtid
, peer_ip
, skb
);
1459 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1462 struct cpl_pass_establish
*req
= cplhdr(skb
);
1463 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1464 unsigned int tid
= GET_TID(req
);
1466 ep
= lookup_tid(t
, tid
);
1467 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1468 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1469 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1471 set_emss(ep
, ntohs(req
->tcp_opt
));
1473 dst_confirm(ep
->dst
);
1474 state_set(&ep
->com
, MPA_REQ_WAIT
);
1476 send_flowc(ep
, skb
);
1481 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1483 struct cpl_peer_close
*hdr
= cplhdr(skb
);
1485 struct c4iw_qp_attributes attrs
;
1486 unsigned long flags
;
1490 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1491 unsigned int tid
= GET_TID(hdr
);
1492 int start_timer
= 0;
1495 ep
= lookup_tid(t
, tid
);
1496 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1497 dst_confirm(ep
->dst
);
1499 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1500 switch (ep
->com
.state
) {
1502 __state_set(&ep
->com
, CLOSING
);
1505 __state_set(&ep
->com
, CLOSING
);
1506 connect_reply_upcall(ep
, -ECONNRESET
);
1511 * We're gonna mark this puppy DEAD, but keep
1512 * the reference on it until the ULP accepts or
1513 * rejects the CR. Also wake up anyone waiting
1514 * in rdma connection migration (see c4iw_accept_cr()).
1516 __state_set(&ep
->com
, CLOSING
);
1517 ep
->com
.rpl_done
= 1;
1518 ep
->com
.rpl_err
= -ECONNRESET
;
1519 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1520 wake_up(&ep
->com
.waitq
);
1523 __state_set(&ep
->com
, CLOSING
);
1524 ep
->com
.rpl_done
= 1;
1525 ep
->com
.rpl_err
= -ECONNRESET
;
1526 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1527 wake_up(&ep
->com
.waitq
);
1531 __state_set(&ep
->com
, CLOSING
);
1533 peer_close_upcall(ep
);
1539 __state_set(&ep
->com
, MORIBUND
);
1544 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1545 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1546 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1547 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1549 close_complete_upcall(ep
);
1550 __state_set(&ep
->com
, DEAD
);
1560 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1562 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
1563 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1564 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1571 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1573 release_ep_resources(ep
);
1578 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1580 static int is_neg_adv_abort(unsigned int status
)
1582 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1583 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1586 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1588 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1590 struct cpl_abort_rpl
*rpl
;
1591 struct sk_buff
*rpl_skb
;
1592 struct c4iw_qp_attributes attrs
;
1595 unsigned long flags
;
1596 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1597 unsigned int tid
= GET_TID(req
);
1600 ep
= lookup_tid(t
, tid
);
1601 if (is_neg_adv_abort(req
->status
)) {
1602 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
1606 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1607 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
1609 switch (ep
->com
.state
) {
1617 connect_reply_upcall(ep
, -ECONNRESET
);
1620 ep
->com
.rpl_done
= 1;
1621 ep
->com
.rpl_err
= -ECONNRESET
;
1622 PDBG("waking up ep %p\n", ep
);
1623 wake_up(&ep
->com
.waitq
);
1628 * We're gonna mark this puppy DEAD, but keep
1629 * the reference on it until the ULP accepts or
1630 * rejects the CR. Also wake up anyone waiting
1631 * in rdma connection migration (see c4iw_accept_cr()).
1633 ep
->com
.rpl_done
= 1;
1634 ep
->com
.rpl_err
= -ECONNRESET
;
1635 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1636 wake_up(&ep
->com
.waitq
);
1643 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1644 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1645 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1646 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
1650 "%s - qp <- error failed!\n",
1653 peer_abort_upcall(ep
);
1658 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
1659 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1665 dst_confirm(ep
->dst
);
1666 if (ep
->com
.state
!= ABORTING
) {
1667 __state_set(&ep
->com
, DEAD
);
1670 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1672 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
1674 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
1679 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1680 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
1681 INIT_TP_WR(rpl
, ep
->hwtid
);
1682 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
1683 rpl
->cmd
= CPL_ABORT_NO_RST
;
1684 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
1689 release_ep_resources(ep
);
1693 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1696 struct c4iw_qp_attributes attrs
;
1697 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
1698 unsigned long flags
;
1700 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1701 unsigned int tid
= GET_TID(rpl
);
1704 ep
= lookup_tid(t
, tid
);
1706 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1709 /* The cm_id may be null if we failed to connect */
1710 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1711 switch (ep
->com
.state
) {
1713 __state_set(&ep
->com
, MORIBUND
);
1717 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
1718 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1719 c4iw_modify_qp(ep
->com
.qp
->rhp
,
1721 C4IW_QP_ATTR_NEXT_STATE
,
1724 close_complete_upcall(ep
);
1725 __state_set(&ep
->com
, DEAD
);
1735 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1739 release_ep_resources(ep
);
1743 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1746 struct cpl_rdma_terminate
*term
= cplhdr(skb
);
1747 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1748 unsigned int tid
= GET_TID(term
);
1750 ep
= lookup_tid(t
, tid
);
1752 if (state_read(&ep
->com
) != FPDU_MODE
)
1755 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1756 skb_pull(skb
, sizeof *term
);
1757 PDBG("%s saving %d bytes of term msg\n", __func__
, skb
->len
);
1758 skb_copy_from_linear_data(skb
, ep
->com
.qp
->attr
.terminate_buffer
,
1760 ep
->com
.qp
->attr
.terminate_msg_len
= skb
->len
;
1761 ep
->com
.qp
->attr
.is_terminate_local
= 0;
1766 * Upcall from the adapter indicating data has been transmitted.
1767 * For us its just the single MPA request or reply. We can now free
1768 * the skb holding the mpa message.
1770 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1773 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
1774 u8 credits
= hdr
->credits
;
1775 unsigned int tid
= GET_TID(hdr
);
1776 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1779 ep
= lookup_tid(t
, tid
);
1780 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1782 PDBG(KERN_ERR
"%s 0 credit ack ep %p tid %u state %u\n",
1783 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
1787 dst_confirm(ep
->dst
);
1789 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1790 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
1791 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
1792 kfree_skb(ep
->mpa_skb
);
1798 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1800 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
1801 struct c4iw_wr_wait
*wr_waitp
;
1804 PDBG("%s type %u\n", __func__
, rpl
->type
);
1806 switch (rpl
->type
) {
1808 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
1809 wr_waitp
= (__force
struct c4iw_wr_wait
*)rpl
->data
[1];
1810 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
1812 wr_waitp
->ret
= ret
;
1814 wake_up(&wr_waitp
->wait
);
1818 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
1821 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
1828 static void ep_timeout(unsigned long arg
)
1830 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
1831 struct c4iw_qp_attributes attrs
;
1832 unsigned long flags
;
1835 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1836 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
1838 switch (ep
->com
.state
) {
1840 __state_set(&ep
->com
, ABORTING
);
1841 connect_reply_upcall(ep
, -ETIMEDOUT
);
1844 __state_set(&ep
->com
, ABORTING
);
1848 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1849 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1850 c4iw_modify_qp(ep
->com
.qp
->rhp
,
1851 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
1854 __state_set(&ep
->com
, ABORTING
);
1857 printk(KERN_ERR
"%s unexpected state ep %p tid %u state %u\n",
1858 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
1862 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1864 abort_connection(ep
, NULL
, GFP_ATOMIC
);
1865 c4iw_put_ep(&ep
->com
);
1868 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
1871 struct c4iw_ep
*ep
= to_ep(cm_id
);
1872 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1874 if (state_read(&ep
->com
) == DEAD
) {
1875 c4iw_put_ep(&ep
->com
);
1878 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1880 abort_connection(ep
, NULL
, GFP_KERNEL
);
1882 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
1883 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1885 c4iw_put_ep(&ep
->com
);
1889 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1892 struct c4iw_qp_attributes attrs
;
1893 enum c4iw_qp_attr_mask mask
;
1894 struct c4iw_ep
*ep
= to_ep(cm_id
);
1895 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
1896 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
1898 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1899 if (state_read(&ep
->com
) == DEAD
) {
1904 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1907 if ((conn_param
->ord
> T4_MAX_READ_DEPTH
) ||
1908 (conn_param
->ird
> T4_MAX_READ_DEPTH
)) {
1909 abort_connection(ep
, NULL
, GFP_KERNEL
);
1914 cm_id
->add_ref(cm_id
);
1915 ep
->com
.cm_id
= cm_id
;
1918 ep
->ird
= conn_param
->ird
;
1919 ep
->ord
= conn_param
->ord
;
1921 if (peer2peer
&& ep
->ird
== 0)
1924 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
1926 /* bind QP to EP and move to RTS */
1927 attrs
.mpa_attr
= ep
->mpa_attr
;
1928 attrs
.max_ird
= ep
->ird
;
1929 attrs
.max_ord
= ep
->ord
;
1930 attrs
.llp_stream_handle
= ep
;
1931 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1933 /* bind QP and TID with INIT_WR */
1934 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1935 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
1936 C4IW_QP_ATTR_MPA_ATTR
|
1937 C4IW_QP_ATTR_MAX_IRD
|
1938 C4IW_QP_ATTR_MAX_ORD
;
1940 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1941 ep
->com
.qp
, mask
, &attrs
, 1);
1944 err
= send_mpa_reply(ep
, conn_param
->private_data
,
1945 conn_param
->private_data_len
);
1949 state_set(&ep
->com
, FPDU_MODE
);
1950 established_upcall(ep
);
1951 c4iw_put_ep(&ep
->com
);
1954 ep
->com
.cm_id
= NULL
;
1956 cm_id
->rem_ref(cm_id
);
1958 c4iw_put_ep(&ep
->com
);
1962 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1965 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
1968 struct net_device
*pdev
;
1971 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1973 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
1977 init_timer(&ep
->timer
);
1978 ep
->plen
= conn_param
->private_data_len
;
1980 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
1981 conn_param
->private_data
, ep
->plen
);
1982 ep
->ird
= conn_param
->ird
;
1983 ep
->ord
= conn_param
->ord
;
1985 if (peer2peer
&& ep
->ord
== 0)
1988 cm_id
->add_ref(cm_id
);
1990 ep
->com
.cm_id
= cm_id
;
1991 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
1992 BUG_ON(!ep
->com
.qp
);
1993 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
1997 * Allocate an active TID to initiate a TCP connection.
1999 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
2000 if (ep
->atid
== -1) {
2001 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2006 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__
,
2007 ntohl(cm_id
->local_addr
.sin_addr
.s_addr
),
2008 ntohs(cm_id
->local_addr
.sin_port
),
2009 ntohl(cm_id
->remote_addr
.sin_addr
.s_addr
),
2010 ntohs(cm_id
->remote_addr
.sin_port
));
2013 rt
= find_route(dev
,
2014 cm_id
->local_addr
.sin_addr
.s_addr
,
2015 cm_id
->remote_addr
.sin_addr
.s_addr
,
2016 cm_id
->local_addr
.sin_port
,
2017 cm_id
->remote_addr
.sin_port
, 0);
2019 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
2020 err
= -EHOSTUNREACH
;
2023 ep
->dst
= &rt
->u
.dst
;
2025 /* get a l2t entry */
2026 if (ep
->dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
2027 PDBG("%s LOOPBACK\n", __func__
);
2028 pdev
= ip_dev_find(&init_net
,
2029 cm_id
->remote_addr
.sin_addr
.s_addr
);
2030 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
2033 ep
->mtu
= pdev
->mtu
;
2034 ep
->tx_chan
= cxgb4_port_chan(pdev
);
2035 ep
->smac_idx
= ep
->tx_chan
<< 1;
2036 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
2037 ep
->com
.dev
->rdev
.lldi
.nchan
;
2038 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
2039 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
2040 ep
->com
.dev
->rdev
.lldi
.nchan
;
2041 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
2042 cxgb4_port_idx(pdev
) * step
];
2045 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
2047 ep
->dst
->neighbour
->dev
, 0);
2048 ep
->mtu
= dst_mtu(ep
->dst
);
2049 ep
->tx_chan
= cxgb4_port_chan(ep
->dst
->neighbour
->dev
);
2050 ep
->smac_idx
= ep
->tx_chan
<< 1;
2051 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
2052 ep
->com
.dev
->rdev
.lldi
.nchan
;
2053 ep
->txq_idx
= cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
;
2054 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
2055 ep
->com
.dev
->rdev
.lldi
.nchan
;
2056 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
2057 cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
];
2060 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
2065 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2066 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2069 state_set(&ep
->com
, CONNECTING
);
2071 ep
->com
.local_addr
= cm_id
->local_addr
;
2072 ep
->com
.remote_addr
= cm_id
->remote_addr
;
2074 /* send connect request to rnic */
2075 err
= send_connect(ep
);
2079 cxgb4_l2t_release(ep
->l2t
);
2081 dst_release(ep
->dst
);
2083 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2085 cm_id
->rem_ref(cm_id
);
2086 c4iw_put_ep(&ep
->com
);
2091 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
2094 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2095 struct c4iw_listen_ep
*ep
;
2100 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2102 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2106 PDBG("%s ep %p\n", __func__
, ep
);
2107 cm_id
->add_ref(cm_id
);
2108 ep
->com
.cm_id
= cm_id
;
2110 ep
->backlog
= backlog
;
2111 ep
->com
.local_addr
= cm_id
->local_addr
;
2114 * Allocate a server TID.
2116 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2117 if (ep
->stid
== -1) {
2118 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2123 state_set(&ep
->com
, LISTEN
);
2124 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2125 ep
->com
.local_addr
.sin_addr
.s_addr
,
2126 ep
->com
.local_addr
.sin_port
,
2127 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2131 /* wait for pass_open_rpl */
2132 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2133 err
= ep
->com
.rpl_err
;
2135 cm_id
->provider_data
= ep
;
2139 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2141 cm_id
->rem_ref(cm_id
);
2142 c4iw_put_ep(&ep
->com
);
2148 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2151 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2153 PDBG("%s ep %p\n", __func__
, ep
);
2156 state_set(&ep
->com
, DEAD
);
2157 ep
->com
.rpl_done
= 0;
2158 ep
->com
.rpl_err
= 0;
2159 err
= listen_stop(ep
);
2162 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2163 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2165 err
= ep
->com
.rpl_err
;
2166 cm_id
->rem_ref(cm_id
);
2167 c4iw_put_ep(&ep
->com
);
2171 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
2174 unsigned long flags
;
2177 struct c4iw_rdev
*rdev
;
2178 int start_timer
= 0;
2181 spin_lock_irqsave(&ep
->com
.lock
, flags
);
2183 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2184 states
[ep
->com
.state
], abrupt
);
2186 rdev
= &ep
->com
.dev
->rdev
;
2187 if (c4iw_fatal_error(rdev
)) {
2189 close_complete_upcall(ep
);
2190 ep
->com
.state
= DEAD
;
2192 switch (ep
->com
.state
) {
2200 ep
->com
.state
= ABORTING
;
2202 ep
->com
.state
= CLOSING
;
2205 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2208 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2212 ep
->com
.state
= ABORTING
;
2214 ep
->com
.state
= MORIBUND
;
2220 PDBG("%s ignoring disconnect ep %p state %u\n",
2221 __func__
, ep
, ep
->com
.state
);
2228 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
2235 ret
= abort_connection(ep
, NULL
, gfp
);
2237 ret
= send_halfclose(ep
, gfp
);
2242 release_ep_resources(ep
);
2247 * All the CM events are handled on a work queue to have a safe context.
2249 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2253 * Save dev in the skb->cb area.
2255 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
2258 * Queue the skb and schedule the worker thread.
2260 skb_queue_tail(&rxq
, skb
);
2261 queue_work(workq
, &skb_work
);
2265 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2267 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
2269 if (rpl
->status
!= CPL_ERR_NONE
) {
2270 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
2271 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
2276 int __init
c4iw_cm_init(void)
2278 skb_queue_head_init(&rxq
);
2280 workq
= create_singlethread_workqueue("iw_cxgb4");
2285 * Most upcalls from the T4 Core go to sched() to
2286 * schedule the processing on a work queue.
2288 c4iw_handlers
[CPL_ACT_ESTABLISH
] = sched
;
2289 c4iw_handlers
[CPL_ACT_OPEN_RPL
] = sched
;
2290 c4iw_handlers
[CPL_RX_DATA
] = sched
;
2291 c4iw_handlers
[CPL_ABORT_RPL_RSS
] = sched
;
2292 c4iw_handlers
[CPL_ABORT_RPL
] = sched
;
2293 c4iw_handlers
[CPL_PASS_OPEN_RPL
] = sched
;
2294 c4iw_handlers
[CPL_CLOSE_LISTSRV_RPL
] = sched
;
2295 c4iw_handlers
[CPL_PASS_ACCEPT_REQ
] = sched
;
2296 c4iw_handlers
[CPL_PASS_ESTABLISH
] = sched
;
2297 c4iw_handlers
[CPL_PEER_CLOSE
] = sched
;
2298 c4iw_handlers
[CPL_CLOSE_CON_RPL
] = sched
;
2299 c4iw_handlers
[CPL_ABORT_REQ_RSS
] = sched
;
2300 c4iw_handlers
[CPL_RDMA_TERMINATE
] = sched
;
2301 c4iw_handlers
[CPL_FW4_ACK
] = sched
;
2302 c4iw_handlers
[CPL_SET_TCB_RPL
] = set_tcb_rpl
;
2303 c4iw_handlers
[CPL_FW6_MSG
] = fw6_msg
;
2306 * These are the real handlers that are called from a
2309 work_handlers
[CPL_ACT_ESTABLISH
] = act_establish
;
2310 work_handlers
[CPL_ACT_OPEN_RPL
] = act_open_rpl
;
2311 work_handlers
[CPL_RX_DATA
] = rx_data
;
2312 work_handlers
[CPL_ABORT_RPL_RSS
] = abort_rpl
;
2313 work_handlers
[CPL_ABORT_RPL
] = abort_rpl
;
2314 work_handlers
[CPL_PASS_OPEN_RPL
] = pass_open_rpl
;
2315 work_handlers
[CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
;
2316 work_handlers
[CPL_PASS_ACCEPT_REQ
] = pass_accept_req
;
2317 work_handlers
[CPL_PASS_ESTABLISH
] = pass_establish
;
2318 work_handlers
[CPL_PEER_CLOSE
] = peer_close
;
2319 work_handlers
[CPL_ABORT_REQ_RSS
] = peer_abort
;
2320 work_handlers
[CPL_CLOSE_CON_RPL
] = close_con_rpl
;
2321 work_handlers
[CPL_RDMA_TERMINATE
] = terminate
;
2322 work_handlers
[CPL_FW4_ACK
] = fw4_ack
;
2326 void __exit
c4iw_cm_term(void)
2328 flush_workqueue(workq
);
2329 destroy_workqueue(workq
);