2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43 #include <linux/slab.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_cm.h>
52 #include <rdma/ib_sa.h>
53 #include <rdma/iw_cm.h>
55 MODULE_AUTHOR("Sean Hefty");
56 MODULE_DESCRIPTION("Generic RDMA CM Agent");
57 MODULE_LICENSE("Dual BSD/GPL");
59 #define CMA_CM_RESPONSE_TIMEOUT 20
60 #define CMA_MAX_CM_RETRIES 15
61 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
62 #define CMA_IBOE_PACKET_LIFETIME 18
64 static void cma_add_one(struct ib_device
*device
);
65 static void cma_remove_one(struct ib_device
*device
);
67 static struct ib_client cma_client
= {
70 .remove
= cma_remove_one
73 static struct ib_sa_client sa_client
;
74 static struct rdma_addr_client addr_client
;
75 static LIST_HEAD(dev_list
);
76 static LIST_HEAD(listen_any_list
);
77 static DEFINE_MUTEX(lock
);
78 static struct workqueue_struct
*cma_wq
;
79 static DEFINE_IDR(sdp_ps
);
80 static DEFINE_IDR(tcp_ps
);
81 static DEFINE_IDR(udp_ps
);
82 static DEFINE_IDR(ipoib_ps
);
85 struct list_head list
;
86 struct ib_device
*device
;
87 struct completion comp
;
89 struct list_head id_list
;
106 struct rdma_bind_list
{
108 struct hlist_head owners
;
113 * Device removal can occur at anytime, so we need extra handling to
114 * serialize notifying the user of device removal with other callbacks.
115 * We do this by disabling removal notification while a callback is in process,
116 * and reporting it after the callback completes.
118 struct rdma_id_private
{
119 struct rdma_cm_id id
;
121 struct rdma_bind_list
*bind_list
;
122 struct hlist_node node
;
123 struct list_head list
; /* listen_any_list or cma_device.list */
124 struct list_head listen_list
; /* per device listens */
125 struct cma_device
*cma_dev
;
126 struct list_head mc_list
;
129 enum cma_state state
;
131 struct mutex qp_mutex
;
133 struct completion comp
;
135 struct mutex handler_mutex
;
139 struct ib_sa_query
*query
;
153 struct cma_multicast
{
154 struct rdma_id_private
*id_priv
;
156 struct ib_sa_multicast
*ib
;
158 struct list_head list
;
160 struct sockaddr_storage addr
;
165 struct work_struct work
;
166 struct rdma_id_private
*id
;
167 enum cma_state old_state
;
168 enum cma_state new_state
;
169 struct rdma_cm_event event
;
172 struct cma_ndev_work
{
173 struct work_struct work
;
174 struct rdma_id_private
*id
;
175 struct rdma_cm_event event
;
178 struct iboe_mcast_work
{
179 struct work_struct work
;
180 struct rdma_id_private
*id
;
181 struct cma_multicast
*mc
;
194 u8 ip_version
; /* IP version: 7:4 */
196 union cma_ip_addr src_addr
;
197 union cma_ip_addr dst_addr
;
202 u8 sdp_version
; /* Major version: 7:4 */
203 u8 ip_version
; /* IP version: 7:4 */
204 u8 sdp_specific1
[10];
206 __be16 sdp_specific2
;
207 union cma_ip_addr src_addr
;
208 union cma_ip_addr dst_addr
;
216 #define CMA_VERSION 0x00
217 #define SDP_MAJ_VERSION 0x2
219 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
224 spin_lock_irqsave(&id_priv
->lock
, flags
);
225 ret
= (id_priv
->state
== comp
);
226 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
230 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
231 enum cma_state comp
, enum cma_state exch
)
236 spin_lock_irqsave(&id_priv
->lock
, flags
);
237 if ((ret
= (id_priv
->state
== comp
)))
238 id_priv
->state
= exch
;
239 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
243 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
249 spin_lock_irqsave(&id_priv
->lock
, flags
);
250 old
= id_priv
->state
;
251 id_priv
->state
= exch
;
252 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
256 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
258 return hdr
->ip_version
>> 4;
261 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
263 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
266 static inline u8
sdp_get_majv(u8 sdp_version
)
268 return sdp_version
>> 4;
271 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
273 return hh
->ip_version
>> 4;
276 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
278 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
281 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
283 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
286 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
287 struct cma_device
*cma_dev
)
289 atomic_inc(&cma_dev
->refcount
);
290 id_priv
->cma_dev
= cma_dev
;
291 id_priv
->id
.device
= cma_dev
->device
;
292 id_priv
->id
.route
.addr
.dev_addr
.transport
=
293 rdma_node_get_transport(cma_dev
->device
->node_type
);
294 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
297 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
299 if (atomic_dec_and_test(&cma_dev
->refcount
))
300 complete(&cma_dev
->comp
);
303 static inline void release_mc(struct kref
*kref
)
305 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
307 kfree(mc
->multicast
.ib
);
311 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
313 list_del(&id_priv
->list
);
314 cma_deref_dev(id_priv
->cma_dev
);
315 id_priv
->cma_dev
= NULL
;
318 static int cma_set_qkey(struct rdma_id_private
*id_priv
)
320 struct ib_sa_mcmember_rec rec
;
326 switch (id_priv
->id
.ps
) {
328 id_priv
->qkey
= RDMA_UDP_QKEY
;
331 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
332 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
333 id_priv
->id
.port_num
, &rec
.mgid
,
336 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
344 static int find_gid_port(struct ib_device
*device
, union ib_gid
*gid
, u8 port_num
)
348 struct ib_port_attr props
;
351 err
= ib_query_port(device
, port_num
, &props
);
355 for (i
= 0; i
< props
.gid_tbl_len
; ++i
) {
356 err
= ib_query_gid(device
, port_num
, i
, &tmp
);
359 if (!memcmp(&tmp
, gid
, sizeof tmp
))
366 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
368 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
369 struct cma_device
*cma_dev
;
370 union ib_gid gid
, iboe_gid
;
373 enum rdma_link_layer dev_ll
= dev_addr
->dev_type
== ARPHRD_INFINIBAND
?
374 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
376 iboe_addr_get_sgid(dev_addr
, &iboe_gid
);
377 memcpy(&gid
, dev_addr
->src_dev_addr
+
378 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
379 list_for_each_entry(cma_dev
, &dev_list
, list
) {
380 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
381 if (rdma_port_get_link_layer(cma_dev
->device
, port
) == dev_ll
) {
382 if (rdma_node_get_transport(cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
&&
383 rdma_port_get_link_layer(cma_dev
->device
, port
) == IB_LINK_LAYER_ETHERNET
)
384 ret
= find_gid_port(cma_dev
->device
, &iboe_gid
, port
);
386 ret
= find_gid_port(cma_dev
->device
, &gid
, port
);
389 id_priv
->id
.port_num
= port
;
399 cma_attach_to_dev(id_priv
, cma_dev
);
404 static void cma_deref_id(struct rdma_id_private
*id_priv
)
406 if (atomic_dec_and_test(&id_priv
->refcount
))
407 complete(&id_priv
->comp
);
410 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
411 enum cma_state state
)
413 mutex_lock(&id_priv
->handler_mutex
);
414 if (id_priv
->state
!= state
) {
415 mutex_unlock(&id_priv
->handler_mutex
);
421 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
423 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
426 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
427 void *context
, enum rdma_port_space ps
)
429 struct rdma_id_private
*id_priv
;
431 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
433 return ERR_PTR(-ENOMEM
);
435 id_priv
->state
= CMA_IDLE
;
436 id_priv
->id
.context
= context
;
437 id_priv
->id
.event_handler
= event_handler
;
439 spin_lock_init(&id_priv
->lock
);
440 mutex_init(&id_priv
->qp_mutex
);
441 init_completion(&id_priv
->comp
);
442 atomic_set(&id_priv
->refcount
, 1);
443 mutex_init(&id_priv
->handler_mutex
);
444 INIT_LIST_HEAD(&id_priv
->listen_list
);
445 INIT_LIST_HEAD(&id_priv
->mc_list
);
446 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
450 EXPORT_SYMBOL(rdma_create_id
);
452 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
454 struct ib_qp_attr qp_attr
;
455 int qp_attr_mask
, ret
;
457 qp_attr
.qp_state
= IB_QPS_INIT
;
458 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
462 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
466 qp_attr
.qp_state
= IB_QPS_RTR
;
467 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
471 qp_attr
.qp_state
= IB_QPS_RTS
;
473 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
478 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
480 struct ib_qp_attr qp_attr
;
481 int qp_attr_mask
, ret
;
483 qp_attr
.qp_state
= IB_QPS_INIT
;
484 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
488 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
491 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
492 struct ib_qp_init_attr
*qp_init_attr
)
494 struct rdma_id_private
*id_priv
;
498 id_priv
= container_of(id
, struct rdma_id_private
, id
);
499 if (id
->device
!= pd
->device
)
502 qp
= ib_create_qp(pd
, qp_init_attr
);
506 if (cma_is_ud_ps(id_priv
->id
.ps
))
507 ret
= cma_init_ud_qp(id_priv
, qp
);
509 ret
= cma_init_conn_qp(id_priv
, qp
);
514 id_priv
->qp_num
= qp
->qp_num
;
515 id_priv
->srq
= (qp
->srq
!= NULL
);
521 EXPORT_SYMBOL(rdma_create_qp
);
523 void rdma_destroy_qp(struct rdma_cm_id
*id
)
525 struct rdma_id_private
*id_priv
;
527 id_priv
= container_of(id
, struct rdma_id_private
, id
);
528 mutex_lock(&id_priv
->qp_mutex
);
529 ib_destroy_qp(id_priv
->id
.qp
);
530 id_priv
->id
.qp
= NULL
;
531 mutex_unlock(&id_priv
->qp_mutex
);
533 EXPORT_SYMBOL(rdma_destroy_qp
);
535 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
536 struct rdma_conn_param
*conn_param
)
538 struct ib_qp_attr qp_attr
;
539 int qp_attr_mask
, ret
;
541 mutex_lock(&id_priv
->qp_mutex
);
542 if (!id_priv
->id
.qp
) {
547 /* Need to update QP attributes from default values. */
548 qp_attr
.qp_state
= IB_QPS_INIT
;
549 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
553 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
557 qp_attr
.qp_state
= IB_QPS_RTR
;
558 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
563 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
564 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
566 mutex_unlock(&id_priv
->qp_mutex
);
570 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
571 struct rdma_conn_param
*conn_param
)
573 struct ib_qp_attr qp_attr
;
574 int qp_attr_mask
, ret
;
576 mutex_lock(&id_priv
->qp_mutex
);
577 if (!id_priv
->id
.qp
) {
582 qp_attr
.qp_state
= IB_QPS_RTS
;
583 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
588 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
589 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
591 mutex_unlock(&id_priv
->qp_mutex
);
595 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
597 struct ib_qp_attr qp_attr
;
600 mutex_lock(&id_priv
->qp_mutex
);
601 if (!id_priv
->id
.qp
) {
606 qp_attr
.qp_state
= IB_QPS_ERR
;
607 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
609 mutex_unlock(&id_priv
->qp_mutex
);
613 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
614 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
616 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
620 if (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
) ==
621 IB_LINK_LAYER_INFINIBAND
)
622 pkey
= ib_addr_get_pkey(dev_addr
);
626 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
627 pkey
, &qp_attr
->pkey_index
);
631 qp_attr
->port_num
= id_priv
->id
.port_num
;
632 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
634 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
635 ret
= cma_set_qkey(id_priv
);
639 qp_attr
->qkey
= id_priv
->qkey
;
640 *qp_attr_mask
|= IB_QP_QKEY
;
642 qp_attr
->qp_access_flags
= 0;
643 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
648 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
651 struct rdma_id_private
*id_priv
;
654 id_priv
= container_of(id
, struct rdma_id_private
, id
);
655 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
656 case RDMA_TRANSPORT_IB
:
657 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
658 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
660 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
662 if (qp_attr
->qp_state
== IB_QPS_RTR
)
663 qp_attr
->rq_psn
= id_priv
->seq_num
;
665 case RDMA_TRANSPORT_IWARP
:
666 if (!id_priv
->cm_id
.iw
) {
667 qp_attr
->qp_access_flags
= 0;
668 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
670 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
680 EXPORT_SYMBOL(rdma_init_qp_attr
);
682 static inline int cma_zero_addr(struct sockaddr
*addr
)
684 struct in6_addr
*ip6
;
686 if (addr
->sa_family
== AF_INET
)
687 return ipv4_is_zeronet(
688 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
690 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
691 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
692 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
696 static inline int cma_loopback_addr(struct sockaddr
*addr
)
698 if (addr
->sa_family
== AF_INET
)
699 return ipv4_is_loopback(
700 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
702 return ipv6_addr_loopback(
703 &((struct sockaddr_in6
*) addr
)->sin6_addr
);
706 static inline int cma_any_addr(struct sockaddr
*addr
)
708 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
711 static inline __be16
cma_port(struct sockaddr
*addr
)
713 if (addr
->sa_family
== AF_INET
)
714 return ((struct sockaddr_in
*) addr
)->sin_port
;
716 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
719 static inline int cma_any_port(struct sockaddr
*addr
)
721 return !cma_port(addr
);
724 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
725 u8
*ip_ver
, __be16
*port
,
726 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
730 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
734 *ip_ver
= sdp_get_ip_ver(hdr
);
735 *port
= ((struct sdp_hh
*) hdr
)->port
;
736 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
737 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
740 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
743 *ip_ver
= cma_get_ip_ver(hdr
);
744 *port
= ((struct cma_hdr
*) hdr
)->port
;
745 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
746 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
750 if (*ip_ver
!= 4 && *ip_ver
!= 6)
755 static void cma_save_net_info(struct rdma_addr
*addr
,
756 struct rdma_addr
*listen_addr
,
757 u8 ip_ver
, __be16 port
,
758 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
760 struct sockaddr_in
*listen4
, *ip4
;
761 struct sockaddr_in6
*listen6
, *ip6
;
765 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
766 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
767 ip4
->sin_family
= listen4
->sin_family
;
768 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
769 ip4
->sin_port
= listen4
->sin_port
;
771 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
772 ip4
->sin_family
= listen4
->sin_family
;
773 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
774 ip4
->sin_port
= port
;
777 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
778 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
779 ip6
->sin6_family
= listen6
->sin6_family
;
780 ip6
->sin6_addr
= dst
->ip6
;
781 ip6
->sin6_port
= listen6
->sin6_port
;
783 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
784 ip6
->sin6_family
= listen6
->sin6_family
;
785 ip6
->sin6_addr
= src
->ip6
;
786 ip6
->sin6_port
= port
;
793 static inline int cma_user_data_offset(enum rdma_port_space ps
)
799 return sizeof(struct cma_hdr
);
803 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
805 switch (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
806 case IB_LINK_LAYER_INFINIBAND
:
808 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
815 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
817 struct rdma_id_private
*dev_id_priv
;
820 * Remove from listen_any_list to prevent added devices from spawning
821 * additional listen requests.
824 list_del(&id_priv
->list
);
826 while (!list_empty(&id_priv
->listen_list
)) {
827 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
828 struct rdma_id_private
, listen_list
);
829 /* sync with device removal to avoid duplicate destruction */
830 list_del_init(&dev_id_priv
->list
);
831 list_del(&dev_id_priv
->listen_list
);
834 rdma_destroy_id(&dev_id_priv
->id
);
840 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
841 enum cma_state state
)
845 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
847 case CMA_ROUTE_QUERY
:
848 cma_cancel_route(id_priv
);
851 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
)
852 && !id_priv
->cma_dev
)
853 cma_cancel_listens(id_priv
);
860 static void cma_release_port(struct rdma_id_private
*id_priv
)
862 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
868 hlist_del(&id_priv
->node
);
869 if (hlist_empty(&bind_list
->owners
)) {
870 idr_remove(bind_list
->ps
, bind_list
->port
);
876 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
878 struct cma_multicast
*mc
;
880 while (!list_empty(&id_priv
->mc_list
)) {
881 mc
= container_of(id_priv
->mc_list
.next
,
882 struct cma_multicast
, list
);
884 switch (rdma_port_get_link_layer(id_priv
->cma_dev
->device
, id_priv
->id
.port_num
)) {
885 case IB_LINK_LAYER_INFINIBAND
:
886 ib_sa_free_multicast(mc
->multicast
.ib
);
889 case IB_LINK_LAYER_ETHERNET
:
890 kref_put(&mc
->mcref
, release_mc
);
898 void rdma_destroy_id(struct rdma_cm_id
*id
)
900 struct rdma_id_private
*id_priv
;
901 enum cma_state state
;
903 id_priv
= container_of(id
, struct rdma_id_private
, id
);
904 state
= cma_exch(id_priv
, CMA_DESTROYING
);
905 cma_cancel_operation(id_priv
, state
);
908 if (id_priv
->cma_dev
) {
910 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
911 case RDMA_TRANSPORT_IB
:
912 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
913 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
915 case RDMA_TRANSPORT_IWARP
:
916 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
917 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
922 cma_leave_mc_groups(id_priv
);
924 cma_detach_from_dev(id_priv
);
928 cma_release_port(id_priv
);
929 cma_deref_id(id_priv
);
930 wait_for_completion(&id_priv
->comp
);
932 if (id_priv
->internal_id
)
933 cma_deref_id(id_priv
->id
.context
);
935 kfree(id_priv
->id
.route
.path_rec
);
938 EXPORT_SYMBOL(rdma_destroy_id
);
940 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
944 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
948 ret
= cma_modify_qp_rts(id_priv
, NULL
);
952 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
958 cma_modify_qp_err(id_priv
);
959 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
964 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
966 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
967 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
974 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
975 struct ib_cm_rep_event_param
*rep_data
,
978 event
->param
.conn
.private_data
= private_data
;
979 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
980 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
981 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
982 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
983 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
984 event
->param
.conn
.srq
= rep_data
->srq
;
985 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
988 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
990 struct rdma_id_private
*id_priv
= cm_id
->context
;
991 struct rdma_cm_event event
;
994 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
995 cma_disable_callback(id_priv
, CMA_CONNECT
)) ||
996 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
997 cma_disable_callback(id_priv
, CMA_DISCONNECT
)))
1000 memset(&event
, 0, sizeof event
);
1001 switch (ib_event
->event
) {
1002 case IB_CM_REQ_ERROR
:
1003 case IB_CM_REP_ERROR
:
1004 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1005 event
.status
= -ETIMEDOUT
;
1007 case IB_CM_REP_RECEIVED
:
1008 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
1010 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1011 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
1012 event
.status
= cma_rep_recv(id_priv
);
1013 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1014 RDMA_CM_EVENT_ESTABLISHED
;
1016 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1017 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1018 ib_event
->private_data
);
1020 case IB_CM_RTU_RECEIVED
:
1021 case IB_CM_USER_ESTABLISHED
:
1022 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1024 case IB_CM_DREQ_ERROR
:
1025 event
.status
= -ETIMEDOUT
; /* fall through */
1026 case IB_CM_DREQ_RECEIVED
:
1027 case IB_CM_DREP_RECEIVED
:
1028 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
1030 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1032 case IB_CM_TIMEWAIT_EXIT
:
1033 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1035 case IB_CM_MRA_RECEIVED
:
1038 case IB_CM_REJ_RECEIVED
:
1039 cma_modify_qp_err(id_priv
);
1040 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1041 event
.event
= RDMA_CM_EVENT_REJECTED
;
1042 event
.param
.conn
.private_data
= ib_event
->private_data
;
1043 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1046 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
1051 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1053 /* Destroy the CM ID by returning a non-zero value. */
1054 id_priv
->cm_id
.ib
= NULL
;
1055 cma_exch(id_priv
, CMA_DESTROYING
);
1056 mutex_unlock(&id_priv
->handler_mutex
);
1057 rdma_destroy_id(&id_priv
->id
);
1061 mutex_unlock(&id_priv
->handler_mutex
);
1065 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1066 struct ib_cm_event
*ib_event
)
1068 struct rdma_id_private
*id_priv
;
1069 struct rdma_cm_id
*id
;
1070 struct rdma_route
*rt
;
1071 union cma_ip_addr
*src
, *dst
;
1076 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1077 &ip_ver
, &port
, &src
, &dst
))
1080 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1085 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1086 ip_ver
, port
, src
, dst
);
1089 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1090 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1095 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1096 if (rt
->num_paths
== 2)
1097 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1099 if (cma_any_addr((struct sockaddr
*) &rt
->addr
.src_addr
)) {
1100 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1101 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1102 ib_addr_set_pkey(&rt
->addr
.dev_addr
, rt
->path_rec
[0].pkey
);
1104 ret
= rdma_translate_ip((struct sockaddr
*) &rt
->addr
.src_addr
,
1105 &rt
->addr
.dev_addr
);
1109 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1111 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1112 id_priv
->state
= CMA_CONNECT
;
1116 rdma_destroy_id(id
);
1121 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1122 struct ib_cm_event
*ib_event
)
1124 struct rdma_id_private
*id_priv
;
1125 struct rdma_cm_id
*id
;
1126 union cma_ip_addr
*src
, *dst
;
1131 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1137 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1138 &ip_ver
, &port
, &src
, &dst
))
1141 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1142 ip_ver
, port
, src
, dst
);
1144 if (!cma_any_addr((struct sockaddr
*) &id
->route
.addr
.src_addr
)) {
1145 ret
= rdma_translate_ip((struct sockaddr
*) &id
->route
.addr
.src_addr
,
1146 &id
->route
.addr
.dev_addr
);
1151 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1152 id_priv
->state
= CMA_CONNECT
;
1155 rdma_destroy_id(id
);
1159 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1160 struct ib_cm_req_event_param
*req_data
,
1161 void *private_data
, int offset
)
1163 event
->param
.conn
.private_data
= private_data
+ offset
;
1164 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1165 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1166 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1167 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1168 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1169 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1170 event
->param
.conn
.srq
= req_data
->srq
;
1171 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1174 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1176 struct rdma_id_private
*listen_id
, *conn_id
;
1177 struct rdma_cm_event event
;
1180 listen_id
= cm_id
->context
;
1181 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1182 return -ECONNABORTED
;
1184 memset(&event
, 0, sizeof event
);
1185 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1186 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1187 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1188 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1189 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1190 event
.param
.ud
.private_data_len
=
1191 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1193 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1194 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1195 ib_event
->private_data
, offset
);
1202 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1204 ret
= cma_acquire_dev(conn_id
);
1205 mutex_unlock(&lock
);
1207 goto release_conn_id
;
1209 conn_id
->cm_id
.ib
= cm_id
;
1210 cm_id
->context
= conn_id
;
1211 cm_id
->cm_handler
= cma_ib_handler
;
1214 * Protect against the user destroying conn_id from another thread
1215 * until we're done accessing it.
1217 atomic_inc(&conn_id
->refcount
);
1218 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1221 * Acquire mutex to prevent user executing rdma_destroy_id()
1222 * while we're accessing the cm_id.
1225 if (cma_comp(conn_id
, CMA_CONNECT
) &&
1226 !cma_is_ud_ps(conn_id
->id
.ps
))
1227 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1228 mutex_unlock(&lock
);
1229 mutex_unlock(&conn_id
->handler_mutex
);
1230 cma_deref_id(conn_id
);
1233 cma_deref_id(conn_id
);
1235 /* Destroy the CM ID by returning a non-zero value. */
1236 conn_id
->cm_id
.ib
= NULL
;
1239 cma_exch(conn_id
, CMA_DESTROYING
);
1240 mutex_unlock(&conn_id
->handler_mutex
);
1241 rdma_destroy_id(&conn_id
->id
);
1244 mutex_unlock(&listen_id
->handler_mutex
);
1248 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1250 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1253 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1254 struct ib_cm_compare_data
*compare
)
1256 struct cma_hdr
*cma_data
, *cma_mask
;
1257 struct sdp_hh
*sdp_data
, *sdp_mask
;
1259 struct in6_addr ip6_addr
;
1261 memset(compare
, 0, sizeof *compare
);
1262 cma_data
= (void *) compare
->data
;
1263 cma_mask
= (void *) compare
->mask
;
1264 sdp_data
= (void *) compare
->data
;
1265 sdp_mask
= (void *) compare
->mask
;
1267 switch (addr
->sa_family
) {
1269 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1270 if (ps
== RDMA_PS_SDP
) {
1271 sdp_set_ip_ver(sdp_data
, 4);
1272 sdp_set_ip_ver(sdp_mask
, 0xF);
1273 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1274 sdp_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1276 cma_set_ip_ver(cma_data
, 4);
1277 cma_set_ip_ver(cma_mask
, 0xF);
1278 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1279 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1283 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1284 if (ps
== RDMA_PS_SDP
) {
1285 sdp_set_ip_ver(sdp_data
, 6);
1286 sdp_set_ip_ver(sdp_mask
, 0xF);
1287 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1288 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1289 sizeof sdp_mask
->dst_addr
.ip6
);
1291 cma_set_ip_ver(cma_data
, 6);
1292 cma_set_ip_ver(cma_mask
, 0xF);
1293 cma_data
->dst_addr
.ip6
= ip6_addr
;
1294 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1295 sizeof cma_mask
->dst_addr
.ip6
);
1303 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1305 struct rdma_id_private
*id_priv
= iw_id
->context
;
1306 struct rdma_cm_event event
;
1307 struct sockaddr_in
*sin
;
1310 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
1313 memset(&event
, 0, sizeof event
);
1314 switch (iw_event
->event
) {
1315 case IW_CM_EVENT_CLOSE
:
1316 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1318 case IW_CM_EVENT_CONNECT_REPLY
:
1319 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1320 *sin
= iw_event
->local_addr
;
1321 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1322 *sin
= iw_event
->remote_addr
;
1323 switch (iw_event
->status
) {
1325 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1329 event
.event
= RDMA_CM_EVENT_REJECTED
;
1332 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1335 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1339 case IW_CM_EVENT_ESTABLISHED
:
1340 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1346 event
.status
= iw_event
->status
;
1347 event
.param
.conn
.private_data
= iw_event
->private_data
;
1348 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1349 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1351 /* Destroy the CM ID by returning a non-zero value. */
1352 id_priv
->cm_id
.iw
= NULL
;
1353 cma_exch(id_priv
, CMA_DESTROYING
);
1354 mutex_unlock(&id_priv
->handler_mutex
);
1355 rdma_destroy_id(&id_priv
->id
);
1359 mutex_unlock(&id_priv
->handler_mutex
);
1363 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1364 struct iw_cm_event
*iw_event
)
1366 struct rdma_cm_id
*new_cm_id
;
1367 struct rdma_id_private
*listen_id
, *conn_id
;
1368 struct sockaddr_in
*sin
;
1369 struct net_device
*dev
= NULL
;
1370 struct rdma_cm_event event
;
1372 struct ib_device_attr attr
;
1374 listen_id
= cm_id
->context
;
1375 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1376 return -ECONNABORTED
;
1378 /* Create a new RDMA id for the new IW CM ID */
1379 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1380 listen_id
->id
.context
,
1382 if (IS_ERR(new_cm_id
)) {
1386 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1387 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1388 conn_id
->state
= CMA_CONNECT
;
1390 dev
= ip_dev_find(&init_net
, iw_event
->local_addr
.sin_addr
.s_addr
);
1392 ret
= -EADDRNOTAVAIL
;
1393 mutex_unlock(&conn_id
->handler_mutex
);
1394 rdma_destroy_id(new_cm_id
);
1397 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1399 mutex_unlock(&conn_id
->handler_mutex
);
1400 rdma_destroy_id(new_cm_id
);
1405 ret
= cma_acquire_dev(conn_id
);
1406 mutex_unlock(&lock
);
1408 mutex_unlock(&conn_id
->handler_mutex
);
1409 rdma_destroy_id(new_cm_id
);
1413 conn_id
->cm_id
.iw
= cm_id
;
1414 cm_id
->context
= conn_id
;
1415 cm_id
->cm_handler
= cma_iw_handler
;
1417 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1418 *sin
= iw_event
->local_addr
;
1419 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1420 *sin
= iw_event
->remote_addr
;
1422 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1424 mutex_unlock(&conn_id
->handler_mutex
);
1425 rdma_destroy_id(new_cm_id
);
1429 memset(&event
, 0, sizeof event
);
1430 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1431 event
.param
.conn
.private_data
= iw_event
->private_data
;
1432 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1433 event
.param
.conn
.initiator_depth
= attr
.max_qp_init_rd_atom
;
1434 event
.param
.conn
.responder_resources
= attr
.max_qp_rd_atom
;
1437 * Protect against the user destroying conn_id from another thread
1438 * until we're done accessing it.
1440 atomic_inc(&conn_id
->refcount
);
1441 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1443 /* User wants to destroy the CM ID */
1444 conn_id
->cm_id
.iw
= NULL
;
1445 cma_exch(conn_id
, CMA_DESTROYING
);
1446 mutex_unlock(&conn_id
->handler_mutex
);
1447 cma_deref_id(conn_id
);
1448 rdma_destroy_id(&conn_id
->id
);
1452 mutex_unlock(&conn_id
->handler_mutex
);
1453 cma_deref_id(conn_id
);
1458 mutex_unlock(&listen_id
->handler_mutex
);
1462 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1464 struct ib_cm_compare_data compare_data
;
1465 struct sockaddr
*addr
;
1469 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1471 if (IS_ERR(id_priv
->cm_id
.ib
))
1472 return PTR_ERR(id_priv
->cm_id
.ib
);
1474 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1475 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1476 if (cma_any_addr(addr
))
1477 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1479 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1480 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1484 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1485 id_priv
->cm_id
.ib
= NULL
;
1491 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1494 struct sockaddr_in
*sin
;
1496 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1497 iw_conn_req_handler
,
1499 if (IS_ERR(id_priv
->cm_id
.iw
))
1500 return PTR_ERR(id_priv
->cm_id
.iw
);
1502 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1503 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1505 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1508 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1509 id_priv
->cm_id
.iw
= NULL
;
1515 static int cma_listen_handler(struct rdma_cm_id
*id
,
1516 struct rdma_cm_event
*event
)
1518 struct rdma_id_private
*id_priv
= id
->context
;
1520 id
->context
= id_priv
->id
.context
;
1521 id
->event_handler
= id_priv
->id
.event_handler
;
1522 return id_priv
->id
.event_handler(id
, event
);
1525 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1526 struct cma_device
*cma_dev
)
1528 struct rdma_id_private
*dev_id_priv
;
1529 struct rdma_cm_id
*id
;
1532 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1536 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1538 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1539 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1540 ip_addr_size((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
1542 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1543 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1544 atomic_inc(&id_priv
->refcount
);
1545 dev_id_priv
->internal_id
= 1;
1547 ret
= rdma_listen(id
, id_priv
->backlog
);
1549 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1550 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1553 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1555 struct cma_device
*cma_dev
;
1558 list_add_tail(&id_priv
->list
, &listen_any_list
);
1559 list_for_each_entry(cma_dev
, &dev_list
, list
)
1560 cma_listen_on_dev(id_priv
, cma_dev
);
1561 mutex_unlock(&lock
);
1564 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1566 struct rdma_id_private
*id_priv
;
1569 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1570 if (id_priv
->state
== CMA_IDLE
) {
1571 ((struct sockaddr
*) &id
->route
.addr
.src_addr
)->sa_family
= AF_INET
;
1572 ret
= rdma_bind_addr(id
, (struct sockaddr
*) &id
->route
.addr
.src_addr
);
1577 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1580 id_priv
->backlog
= backlog
;
1582 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1583 case RDMA_TRANSPORT_IB
:
1584 ret
= cma_ib_listen(id_priv
);
1588 case RDMA_TRANSPORT_IWARP
:
1589 ret
= cma_iw_listen(id_priv
, backlog
);
1598 cma_listen_on_all(id_priv
);
1602 id_priv
->backlog
= 0;
1603 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1606 EXPORT_SYMBOL(rdma_listen
);
1608 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1610 struct rdma_id_private
*id_priv
;
1612 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1613 id_priv
->tos
= (u8
) tos
;
1615 EXPORT_SYMBOL(rdma_set_service_type
);
1617 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1620 struct cma_work
*work
= context
;
1621 struct rdma_route
*route
;
1623 route
= &work
->id
->id
.route
;
1626 route
->num_paths
= 1;
1627 *route
->path_rec
= *path_rec
;
1629 work
->old_state
= CMA_ROUTE_QUERY
;
1630 work
->new_state
= CMA_ADDR_RESOLVED
;
1631 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1632 work
->event
.status
= status
;
1635 queue_work(cma_wq
, &work
->work
);
1638 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1639 struct cma_work
*work
)
1641 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1642 struct ib_sa_path_rec path_rec
;
1643 ib_sa_comp_mask comp_mask
;
1644 struct sockaddr_in6
*sin6
;
1646 memset(&path_rec
, 0, sizeof path_rec
);
1647 rdma_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1648 rdma_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1649 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1650 path_rec
.numb_path
= 1;
1651 path_rec
.reversible
= 1;
1652 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1653 (struct sockaddr
*) &addr
->dst_addr
);
1655 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1656 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1657 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1659 if (addr
->src_addr
.ss_family
== AF_INET
) {
1660 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1661 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1663 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1664 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1665 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1668 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1669 id_priv
->id
.port_num
, &path_rec
,
1670 comp_mask
, timeout_ms
,
1671 GFP_KERNEL
, cma_query_handler
,
1672 work
, &id_priv
->query
);
1674 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1677 static void cma_work_handler(struct work_struct
*_work
)
1679 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1680 struct rdma_id_private
*id_priv
= work
->id
;
1683 mutex_lock(&id_priv
->handler_mutex
);
1684 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1687 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1688 cma_exch(id_priv
, CMA_DESTROYING
);
1692 mutex_unlock(&id_priv
->handler_mutex
);
1693 cma_deref_id(id_priv
);
1695 rdma_destroy_id(&id_priv
->id
);
1699 static void cma_ndev_work_handler(struct work_struct
*_work
)
1701 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1702 struct rdma_id_private
*id_priv
= work
->id
;
1705 mutex_lock(&id_priv
->handler_mutex
);
1706 if (id_priv
->state
== CMA_DESTROYING
||
1707 id_priv
->state
== CMA_DEVICE_REMOVAL
)
1710 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1711 cma_exch(id_priv
, CMA_DESTROYING
);
1716 mutex_unlock(&id_priv
->handler_mutex
);
1717 cma_deref_id(id_priv
);
1719 rdma_destroy_id(&id_priv
->id
);
1723 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1725 struct rdma_route
*route
= &id_priv
->id
.route
;
1726 struct cma_work
*work
;
1729 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1734 INIT_WORK(&work
->work
, cma_work_handler
);
1735 work
->old_state
= CMA_ROUTE_QUERY
;
1736 work
->new_state
= CMA_ROUTE_RESOLVED
;
1737 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1739 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1740 if (!route
->path_rec
) {
1745 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1751 kfree(route
->path_rec
);
1752 route
->path_rec
= NULL
;
1758 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1759 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1761 struct rdma_id_private
*id_priv
;
1764 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1765 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1768 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
1770 if (!id
->route
.path_rec
) {
1775 id
->route
.num_paths
= num_paths
;
1778 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1781 EXPORT_SYMBOL(rdma_set_ib_paths
);
1783 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1785 struct cma_work
*work
;
1787 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1792 INIT_WORK(&work
->work
, cma_work_handler
);
1793 work
->old_state
= CMA_ROUTE_QUERY
;
1794 work
->new_state
= CMA_ROUTE_RESOLVED
;
1795 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1796 queue_work(cma_wq
, &work
->work
);
1800 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
1802 struct rdma_route
*route
= &id_priv
->id
.route
;
1803 struct rdma_addr
*addr
= &route
->addr
;
1804 struct cma_work
*work
;
1806 struct sockaddr_in
*src_addr
= (struct sockaddr_in
*)&route
->addr
.src_addr
;
1807 struct sockaddr_in
*dst_addr
= (struct sockaddr_in
*)&route
->addr
.dst_addr
;
1808 struct net_device
*ndev
= NULL
;
1811 if (src_addr
->sin_family
!= dst_addr
->sin_family
)
1814 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1819 INIT_WORK(&work
->work
, cma_work_handler
);
1821 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1822 if (!route
->path_rec
) {
1827 route
->num_paths
= 1;
1829 if (addr
->dev_addr
.bound_dev_if
)
1830 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
1836 vid
= rdma_vlan_dev_vlan_id(ndev
);
1838 iboe_mac_vlan_to_ll(&route
->path_rec
->sgid
, addr
->dev_addr
.src_dev_addr
, vid
);
1839 iboe_mac_vlan_to_ll(&route
->path_rec
->dgid
, addr
->dev_addr
.dst_dev_addr
, vid
);
1841 route
->path_rec
->hop_limit
= 1;
1842 route
->path_rec
->reversible
= 1;
1843 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
1844 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
1845 route
->path_rec
->sl
= id_priv
->tos
>> 5;
1847 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
1848 route
->path_rec
->rate_selector
= IB_SA_EQ
;
1849 route
->path_rec
->rate
= iboe_get_rate(ndev
);
1851 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
1852 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
1853 if (!route
->path_rec
->mtu
) {
1858 work
->old_state
= CMA_ROUTE_QUERY
;
1859 work
->new_state
= CMA_ROUTE_RESOLVED
;
1860 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1861 work
->event
.status
= 0;
1863 queue_work(cma_wq
, &work
->work
);
1868 kfree(route
->path_rec
);
1869 route
->path_rec
= NULL
;
1875 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1877 struct rdma_id_private
*id_priv
;
1880 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1881 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1884 atomic_inc(&id_priv
->refcount
);
1885 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1886 case RDMA_TRANSPORT_IB
:
1887 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
1888 case IB_LINK_LAYER_INFINIBAND
:
1889 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1891 case IB_LINK_LAYER_ETHERNET
:
1892 ret
= cma_resolve_iboe_route(id_priv
);
1898 case RDMA_TRANSPORT_IWARP
:
1899 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1910 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1911 cma_deref_id(id_priv
);
1914 EXPORT_SYMBOL(rdma_resolve_route
);
1916 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1918 struct cma_device
*cma_dev
;
1919 struct ib_port_attr port_attr
;
1926 if (list_empty(&dev_list
)) {
1930 list_for_each_entry(cma_dev
, &dev_list
, list
)
1931 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1932 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1933 port_attr
.state
== IB_PORT_ACTIVE
)
1937 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1940 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1944 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1948 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
1949 (rdma_port_get_link_layer(cma_dev
->device
, p
) == IB_LINK_LAYER_INFINIBAND
) ?
1950 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
1952 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1953 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1954 id_priv
->id
.port_num
= p
;
1955 cma_attach_to_dev(id_priv
, cma_dev
);
1957 mutex_unlock(&lock
);
1961 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1962 struct rdma_dev_addr
*dev_addr
, void *context
)
1964 struct rdma_id_private
*id_priv
= context
;
1965 struct rdma_cm_event event
;
1967 memset(&event
, 0, sizeof event
);
1968 mutex_lock(&id_priv
->handler_mutex
);
1971 * Grab mutex to block rdma_destroy_id() from removing the device while
1972 * we're trying to acquire it.
1975 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1976 mutex_unlock(&lock
);
1980 if (!status
&& !id_priv
->cma_dev
)
1981 status
= cma_acquire_dev(id_priv
);
1982 mutex_unlock(&lock
);
1985 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1987 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1988 event
.status
= status
;
1990 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1991 ip_addr_size(src_addr
));
1992 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1995 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1996 cma_exch(id_priv
, CMA_DESTROYING
);
1997 mutex_unlock(&id_priv
->handler_mutex
);
1998 cma_deref_id(id_priv
);
1999 rdma_destroy_id(&id_priv
->id
);
2003 mutex_unlock(&id_priv
->handler_mutex
);
2004 cma_deref_id(id_priv
);
2007 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2009 struct cma_work
*work
;
2010 struct sockaddr
*src
, *dst
;
2014 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2018 if (!id_priv
->cma_dev
) {
2019 ret
= cma_bind_loopback(id_priv
);
2024 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2025 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2027 src
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
2028 if (cma_zero_addr(src
)) {
2029 dst
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
2030 if ((src
->sa_family
= dst
->sa_family
) == AF_INET
) {
2031 ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
=
2032 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
2034 ipv6_addr_copy(&((struct sockaddr_in6
*) src
)->sin6_addr
,
2035 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
2040 INIT_WORK(&work
->work
, cma_work_handler
);
2041 work
->old_state
= CMA_ADDR_QUERY
;
2042 work
->new_state
= CMA_ADDR_RESOLVED
;
2043 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2044 queue_work(cma_wq
, &work
->work
);
2051 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2052 struct sockaddr
*dst_addr
)
2054 if (!src_addr
|| !src_addr
->sa_family
) {
2055 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2056 if ((src_addr
->sa_family
= dst_addr
->sa_family
) == AF_INET6
) {
2057 ((struct sockaddr_in6
*) src_addr
)->sin6_scope_id
=
2058 ((struct sockaddr_in6
*) dst_addr
)->sin6_scope_id
;
2061 return rdma_bind_addr(id
, src_addr
);
2064 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2065 struct sockaddr
*dst_addr
, int timeout_ms
)
2067 struct rdma_id_private
*id_priv
;
2070 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2071 if (id_priv
->state
== CMA_IDLE
) {
2072 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2077 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
2080 atomic_inc(&id_priv
->refcount
);
2081 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
2082 if (cma_any_addr(dst_addr
))
2083 ret
= cma_resolve_loopback(id_priv
);
2085 ret
= rdma_resolve_ip(&addr_client
, (struct sockaddr
*) &id
->route
.addr
.src_addr
,
2086 dst_addr
, &id
->route
.addr
.dev_addr
,
2087 timeout_ms
, addr_handler
, id_priv
);
2093 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
2094 cma_deref_id(id_priv
);
2097 EXPORT_SYMBOL(rdma_resolve_addr
);
2099 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2100 struct rdma_id_private
*id_priv
)
2102 struct sockaddr_in
*sin
;
2104 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2105 sin
->sin_port
= htons(bind_list
->port
);
2106 id_priv
->bind_list
= bind_list
;
2107 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2110 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
2111 unsigned short snum
)
2113 struct rdma_bind_list
*bind_list
;
2116 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2121 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
2122 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
2128 ret
= -EADDRNOTAVAIL
;
2133 bind_list
->port
= (unsigned short) port
;
2134 cma_bind_port(bind_list
, id_priv
);
2137 idr_remove(ps
, port
);
2143 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2145 static unsigned int last_used_port
;
2146 int low
, high
, remaining
;
2149 inet_get_local_port_range(&low
, &high
);
2150 remaining
= (high
- low
) + 1;
2151 rover
= net_random() % remaining
+ low
;
2153 if (last_used_port
!= rover
&&
2154 !idr_find(ps
, (unsigned short) rover
)) {
2155 int ret
= cma_alloc_port(ps
, id_priv
, rover
);
2157 * Remember previously used port number in order to avoid
2158 * re-using same port immediately after it is closed.
2161 last_used_port
= rover
;
2162 if (ret
!= -EADDRNOTAVAIL
)
2167 if ((rover
< low
) || (rover
> high
))
2171 return -EADDRNOTAVAIL
;
2174 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2176 struct rdma_id_private
*cur_id
;
2177 struct sockaddr_in
*sin
, *cur_sin
;
2178 struct rdma_bind_list
*bind_list
;
2179 struct hlist_node
*node
;
2180 unsigned short snum
;
2182 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2183 snum
= ntohs(sin
->sin_port
);
2184 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2187 bind_list
= idr_find(ps
, snum
);
2189 return cma_alloc_port(ps
, id_priv
, snum
);
2192 * We don't support binding to any address if anyone is bound to
2193 * a specific address on the same port.
2195 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2196 return -EADDRNOTAVAIL
;
2198 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
2199 if (cma_any_addr((struct sockaddr
*) &cur_id
->id
.route
.addr
.src_addr
))
2200 return -EADDRNOTAVAIL
;
2202 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
2203 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
2207 cma_bind_port(bind_list
, id_priv
);
2211 static int cma_get_port(struct rdma_id_private
*id_priv
)
2216 switch (id_priv
->id
.ps
) {
2230 return -EPROTONOSUPPORT
;
2234 if (cma_any_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2235 ret
= cma_alloc_any_port(ps
, id_priv
);
2237 ret
= cma_use_port(ps
, id_priv
);
2238 mutex_unlock(&lock
);
2243 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
2244 struct sockaddr
*addr
)
2246 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2247 struct sockaddr_in6
*sin6
;
2249 if (addr
->sa_family
!= AF_INET6
)
2252 sin6
= (struct sockaddr_in6
*) addr
;
2253 if ((ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
) &&
2254 !sin6
->sin6_scope_id
)
2257 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
2262 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2264 struct rdma_id_private
*id_priv
;
2267 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
2268 return -EAFNOSUPPORT
;
2270 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2271 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
2274 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
2278 if (!cma_any_addr(addr
)) {
2279 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2284 ret
= cma_acquire_dev(id_priv
);
2285 mutex_unlock(&lock
);
2290 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2291 ret
= cma_get_port(id_priv
);
2297 if (id_priv
->cma_dev
) {
2299 cma_detach_from_dev(id_priv
);
2300 mutex_unlock(&lock
);
2303 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2306 EXPORT_SYMBOL(rdma_bind_addr
);
2308 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2309 struct rdma_route
*route
)
2311 struct cma_hdr
*cma_hdr
;
2312 struct sdp_hh
*sdp_hdr
;
2314 if (route
->addr
.src_addr
.ss_family
== AF_INET
) {
2315 struct sockaddr_in
*src4
, *dst4
;
2317 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2318 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2323 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2325 sdp_set_ip_ver(sdp_hdr
, 4);
2326 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2327 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2328 sdp_hdr
->port
= src4
->sin_port
;
2332 cma_hdr
->cma_version
= CMA_VERSION
;
2333 cma_set_ip_ver(cma_hdr
, 4);
2334 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2335 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2336 cma_hdr
->port
= src4
->sin_port
;
2340 struct sockaddr_in6
*src6
, *dst6
;
2342 src6
= (struct sockaddr_in6
*) &route
->addr
.src_addr
;
2343 dst6
= (struct sockaddr_in6
*) &route
->addr
.dst_addr
;
2348 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2350 sdp_set_ip_ver(sdp_hdr
, 6);
2351 sdp_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2352 sdp_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2353 sdp_hdr
->port
= src6
->sin6_port
;
2357 cma_hdr
->cma_version
= CMA_VERSION
;
2358 cma_set_ip_ver(cma_hdr
, 6);
2359 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2360 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2361 cma_hdr
->port
= src6
->sin6_port
;
2368 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2369 struct ib_cm_event
*ib_event
)
2371 struct rdma_id_private
*id_priv
= cm_id
->context
;
2372 struct rdma_cm_event event
;
2373 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2376 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
2379 memset(&event
, 0, sizeof event
);
2380 switch (ib_event
->event
) {
2381 case IB_CM_SIDR_REQ_ERROR
:
2382 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2383 event
.status
= -ETIMEDOUT
;
2385 case IB_CM_SIDR_REP_RECEIVED
:
2386 event
.param
.ud
.private_data
= ib_event
->private_data
;
2387 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2388 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2389 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2390 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2393 ret
= cma_set_qkey(id_priv
);
2395 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2396 event
.status
= -EINVAL
;
2399 if (id_priv
->qkey
!= rep
->qkey
) {
2400 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2401 event
.status
= -EINVAL
;
2404 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2405 id_priv
->id
.route
.path_rec
,
2406 &event
.param
.ud
.ah_attr
);
2407 event
.param
.ud
.qp_num
= rep
->qpn
;
2408 event
.param
.ud
.qkey
= rep
->qkey
;
2409 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2413 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2418 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2420 /* Destroy the CM ID by returning a non-zero value. */
2421 id_priv
->cm_id
.ib
= NULL
;
2422 cma_exch(id_priv
, CMA_DESTROYING
);
2423 mutex_unlock(&id_priv
->handler_mutex
);
2424 rdma_destroy_id(&id_priv
->id
);
2428 mutex_unlock(&id_priv
->handler_mutex
);
2432 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2433 struct rdma_conn_param
*conn_param
)
2435 struct ib_cm_sidr_req_param req
;
2436 struct rdma_route
*route
;
2439 req
.private_data_len
= sizeof(struct cma_hdr
) +
2440 conn_param
->private_data_len
;
2441 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2442 if (!req
.private_data
)
2445 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2446 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2447 conn_param
->private_data
, conn_param
->private_data_len
);
2449 route
= &id_priv
->id
.route
;
2450 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2454 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2455 cma_sidr_rep_handler
, id_priv
);
2456 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2457 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2461 req
.path
= route
->path_rec
;
2462 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2463 (struct sockaddr
*) &route
->addr
.dst_addr
);
2464 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2465 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2467 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2469 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2470 id_priv
->cm_id
.ib
= NULL
;
2473 kfree(req
.private_data
);
2477 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2478 struct rdma_conn_param
*conn_param
)
2480 struct ib_cm_req_param req
;
2481 struct rdma_route
*route
;
2485 memset(&req
, 0, sizeof req
);
2486 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2487 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2488 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2492 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2493 memcpy(private_data
+ offset
, conn_param
->private_data
,
2494 conn_param
->private_data_len
);
2496 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2498 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2499 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2503 route
= &id_priv
->id
.route
;
2504 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2507 req
.private_data
= private_data
;
2509 req
.primary_path
= &route
->path_rec
[0];
2510 if (route
->num_paths
== 2)
2511 req
.alternate_path
= &route
->path_rec
[1];
2513 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2514 (struct sockaddr
*) &route
->addr
.dst_addr
);
2515 req
.qp_num
= id_priv
->qp_num
;
2516 req
.qp_type
= IB_QPT_RC
;
2517 req
.starting_psn
= id_priv
->seq_num
;
2518 req
.responder_resources
= conn_param
->responder_resources
;
2519 req
.initiator_depth
= conn_param
->initiator_depth
;
2520 req
.flow_control
= conn_param
->flow_control
;
2521 req
.retry_count
= conn_param
->retry_count
;
2522 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2523 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2524 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2525 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2526 req
.srq
= id_priv
->srq
? 1 : 0;
2528 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2530 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2531 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2532 id_priv
->cm_id
.ib
= NULL
;
2535 kfree(private_data
);
2539 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2540 struct rdma_conn_param
*conn_param
)
2542 struct iw_cm_id
*cm_id
;
2543 struct sockaddr_in
* sin
;
2545 struct iw_cm_conn_param iw_param
;
2547 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2548 if (IS_ERR(cm_id
)) {
2549 ret
= PTR_ERR(cm_id
);
2553 id_priv
->cm_id
.iw
= cm_id
;
2555 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2556 cm_id
->local_addr
= *sin
;
2558 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2559 cm_id
->remote_addr
= *sin
;
2561 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2565 iw_param
.ord
= conn_param
->initiator_depth
;
2566 iw_param
.ird
= conn_param
->responder_resources
;
2567 iw_param
.private_data
= conn_param
->private_data
;
2568 iw_param
.private_data_len
= conn_param
->private_data_len
;
2570 iw_param
.qpn
= id_priv
->qp_num
;
2572 iw_param
.qpn
= conn_param
->qp_num
;
2573 ret
= iw_cm_connect(cm_id
, &iw_param
);
2575 if (ret
&& !IS_ERR(cm_id
)) {
2576 iw_destroy_cm_id(cm_id
);
2577 id_priv
->cm_id
.iw
= NULL
;
2582 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2584 struct rdma_id_private
*id_priv
;
2587 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2588 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2592 id_priv
->qp_num
= conn_param
->qp_num
;
2593 id_priv
->srq
= conn_param
->srq
;
2596 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2597 case RDMA_TRANSPORT_IB
:
2598 if (cma_is_ud_ps(id
->ps
))
2599 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2601 ret
= cma_connect_ib(id_priv
, conn_param
);
2603 case RDMA_TRANSPORT_IWARP
:
2604 ret
= cma_connect_iw(id_priv
, conn_param
);
2615 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2618 EXPORT_SYMBOL(rdma_connect
);
2620 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2621 struct rdma_conn_param
*conn_param
)
2623 struct ib_cm_rep_param rep
;
2626 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2630 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2634 memset(&rep
, 0, sizeof rep
);
2635 rep
.qp_num
= id_priv
->qp_num
;
2636 rep
.starting_psn
= id_priv
->seq_num
;
2637 rep
.private_data
= conn_param
->private_data
;
2638 rep
.private_data_len
= conn_param
->private_data_len
;
2639 rep
.responder_resources
= conn_param
->responder_resources
;
2640 rep
.initiator_depth
= conn_param
->initiator_depth
;
2641 rep
.failover_accepted
= 0;
2642 rep
.flow_control
= conn_param
->flow_control
;
2643 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2644 rep
.srq
= id_priv
->srq
? 1 : 0;
2646 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2651 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2652 struct rdma_conn_param
*conn_param
)
2654 struct iw_cm_conn_param iw_param
;
2657 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2661 iw_param
.ord
= conn_param
->initiator_depth
;
2662 iw_param
.ird
= conn_param
->responder_resources
;
2663 iw_param
.private_data
= conn_param
->private_data
;
2664 iw_param
.private_data_len
= conn_param
->private_data_len
;
2665 if (id_priv
->id
.qp
) {
2666 iw_param
.qpn
= id_priv
->qp_num
;
2668 iw_param
.qpn
= conn_param
->qp_num
;
2670 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2673 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2674 enum ib_cm_sidr_status status
,
2675 const void *private_data
, int private_data_len
)
2677 struct ib_cm_sidr_rep_param rep
;
2680 memset(&rep
, 0, sizeof rep
);
2681 rep
.status
= status
;
2682 if (status
== IB_SIDR_SUCCESS
) {
2683 ret
= cma_set_qkey(id_priv
);
2686 rep
.qp_num
= id_priv
->qp_num
;
2687 rep
.qkey
= id_priv
->qkey
;
2689 rep
.private_data
= private_data
;
2690 rep
.private_data_len
= private_data_len
;
2692 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2695 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2697 struct rdma_id_private
*id_priv
;
2700 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2701 if (!cma_comp(id_priv
, CMA_CONNECT
))
2704 if (!id
->qp
&& conn_param
) {
2705 id_priv
->qp_num
= conn_param
->qp_num
;
2706 id_priv
->srq
= conn_param
->srq
;
2709 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2710 case RDMA_TRANSPORT_IB
:
2711 if (cma_is_ud_ps(id
->ps
))
2712 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2713 conn_param
->private_data
,
2714 conn_param
->private_data_len
);
2715 else if (conn_param
)
2716 ret
= cma_accept_ib(id_priv
, conn_param
);
2718 ret
= cma_rep_recv(id_priv
);
2720 case RDMA_TRANSPORT_IWARP
:
2721 ret
= cma_accept_iw(id_priv
, conn_param
);
2733 cma_modify_qp_err(id_priv
);
2734 rdma_reject(id
, NULL
, 0);
2737 EXPORT_SYMBOL(rdma_accept
);
2739 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2741 struct rdma_id_private
*id_priv
;
2744 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2745 if (!cma_has_cm_dev(id_priv
))
2748 switch (id
->device
->node_type
) {
2749 case RDMA_NODE_IB_CA
:
2750 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2758 EXPORT_SYMBOL(rdma_notify
);
2760 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2761 u8 private_data_len
)
2763 struct rdma_id_private
*id_priv
;
2766 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2767 if (!cma_has_cm_dev(id_priv
))
2770 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2771 case RDMA_TRANSPORT_IB
:
2772 if (cma_is_ud_ps(id
->ps
))
2773 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2774 private_data
, private_data_len
);
2776 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2777 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2778 0, private_data
, private_data_len
);
2780 case RDMA_TRANSPORT_IWARP
:
2781 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2782 private_data
, private_data_len
);
2790 EXPORT_SYMBOL(rdma_reject
);
2792 int rdma_disconnect(struct rdma_cm_id
*id
)
2794 struct rdma_id_private
*id_priv
;
2797 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2798 if (!cma_has_cm_dev(id_priv
))
2801 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2802 case RDMA_TRANSPORT_IB
:
2803 ret
= cma_modify_qp_err(id_priv
);
2806 /* Initiate or respond to a disconnect. */
2807 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2808 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2810 case RDMA_TRANSPORT_IWARP
:
2811 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2820 EXPORT_SYMBOL(rdma_disconnect
);
2822 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2824 struct rdma_id_private
*id_priv
;
2825 struct cma_multicast
*mc
= multicast
->context
;
2826 struct rdma_cm_event event
;
2829 id_priv
= mc
->id_priv
;
2830 if (cma_disable_callback(id_priv
, CMA_ADDR_BOUND
) &&
2831 cma_disable_callback(id_priv
, CMA_ADDR_RESOLVED
))
2834 mutex_lock(&id_priv
->qp_mutex
);
2835 if (!status
&& id_priv
->id
.qp
)
2836 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2837 multicast
->rec
.mlid
);
2838 mutex_unlock(&id_priv
->qp_mutex
);
2840 memset(&event
, 0, sizeof event
);
2841 event
.status
= status
;
2842 event
.param
.ud
.private_data
= mc
->context
;
2844 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2845 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2846 id_priv
->id
.port_num
, &multicast
->rec
,
2847 &event
.param
.ud
.ah_attr
);
2848 event
.param
.ud
.qp_num
= 0xFFFFFF;
2849 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2851 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2853 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2855 cma_exch(id_priv
, CMA_DESTROYING
);
2856 mutex_unlock(&id_priv
->handler_mutex
);
2857 rdma_destroy_id(&id_priv
->id
);
2861 mutex_unlock(&id_priv
->handler_mutex
);
2865 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2866 struct sockaddr
*addr
, union ib_gid
*mgid
)
2868 unsigned char mc_map
[MAX_ADDR_LEN
];
2869 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2870 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2871 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2873 if (cma_any_addr(addr
)) {
2874 memset(mgid
, 0, sizeof *mgid
);
2875 } else if ((addr
->sa_family
== AF_INET6
) &&
2876 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
2878 /* IPv6 address is an SA assigned MGID. */
2879 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2880 } else if ((addr
->sa_family
== AF_INET6
)) {
2881 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
2882 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2883 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2884 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2886 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
2887 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2888 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2889 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2893 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2894 struct cma_multicast
*mc
)
2896 struct ib_sa_mcmember_rec rec
;
2897 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2898 ib_sa_comp_mask comp_mask
;
2901 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2902 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2907 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
2908 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2909 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2910 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2911 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2914 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2915 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2916 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2917 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2918 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2920 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
2921 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
2922 IB_SA_MCMEMBER_REC_RATE_SELECTOR
;
2924 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2925 id_priv
->id
.port_num
, &rec
,
2926 comp_mask
, GFP_KERNEL
,
2927 cma_ib_mc_handler
, mc
);
2928 if (IS_ERR(mc
->multicast
.ib
))
2929 return PTR_ERR(mc
->multicast
.ib
);
2934 static void iboe_mcast_work_handler(struct work_struct
*work
)
2936 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
2937 struct cma_multicast
*mc
= mw
->mc
;
2938 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
2940 mc
->multicast
.ib
->context
= mc
;
2941 cma_ib_mc_handler(0, m
);
2942 kref_put(&mc
->mcref
, release_mc
);
2946 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
2948 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
2949 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
2951 if (cma_any_addr(addr
)) {
2952 memset(mgid
, 0, sizeof *mgid
);
2953 } else if (addr
->sa_family
== AF_INET6
) {
2954 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2956 mgid
->raw
[0] = 0xff;
2957 mgid
->raw
[1] = 0x0e;
2966 mgid
->raw
[10] = 0xff;
2967 mgid
->raw
[11] = 0xff;
2968 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
2972 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
2973 struct cma_multicast
*mc
)
2975 struct iboe_mcast_work
*work
;
2976 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2978 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
2979 struct net_device
*ndev
= NULL
;
2981 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
2984 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2988 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
2989 if (!mc
->multicast
.ib
) {
2994 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
2996 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
2997 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2998 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3000 if (dev_addr
->bound_dev_if
)
3001 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3006 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
3007 mc
->multicast
.ib
->rec
.hop_limit
= 1;
3008 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
3010 if (!mc
->multicast
.ib
->rec
.mtu
) {
3014 iboe_addr_get_sgid(dev_addr
, &mc
->multicast
.ib
->rec
.port_gid
);
3017 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
3018 kref_get(&mc
->mcref
);
3019 queue_work(cma_wq
, &work
->work
);
3024 kfree(mc
->multicast
.ib
);
3030 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
3033 struct rdma_id_private
*id_priv
;
3034 struct cma_multicast
*mc
;
3037 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3038 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
3039 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
3042 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
3046 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
3047 mc
->context
= context
;
3048 mc
->id_priv
= id_priv
;
3050 spin_lock(&id_priv
->lock
);
3051 list_add(&mc
->list
, &id_priv
->mc_list
);
3052 spin_unlock(&id_priv
->lock
);
3054 switch (rdma_node_get_transport(id
->device
->node_type
)) {
3055 case RDMA_TRANSPORT_IB
:
3056 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3057 case IB_LINK_LAYER_INFINIBAND
:
3058 ret
= cma_join_ib_multicast(id_priv
, mc
);
3060 case IB_LINK_LAYER_ETHERNET
:
3061 kref_init(&mc
->mcref
);
3062 ret
= cma_iboe_join_multicast(id_priv
, mc
);
3074 spin_lock_irq(&id_priv
->lock
);
3075 list_del(&mc
->list
);
3076 spin_unlock_irq(&id_priv
->lock
);
3081 EXPORT_SYMBOL(rdma_join_multicast
);
3083 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3085 struct rdma_id_private
*id_priv
;
3086 struct cma_multicast
*mc
;
3088 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3089 spin_lock_irq(&id_priv
->lock
);
3090 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
3091 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
3092 list_del(&mc
->list
);
3093 spin_unlock_irq(&id_priv
->lock
);
3096 ib_detach_mcast(id
->qp
,
3097 &mc
->multicast
.ib
->rec
.mgid
,
3098 mc
->multicast
.ib
->rec
.mlid
);
3099 if (rdma_node_get_transport(id_priv
->cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
) {
3100 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3101 case IB_LINK_LAYER_INFINIBAND
:
3102 ib_sa_free_multicast(mc
->multicast
.ib
);
3105 case IB_LINK_LAYER_ETHERNET
:
3106 kref_put(&mc
->mcref
, release_mc
);
3115 spin_unlock_irq(&id_priv
->lock
);
3117 EXPORT_SYMBOL(rdma_leave_multicast
);
3119 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
3121 struct rdma_dev_addr
*dev_addr
;
3122 struct cma_ndev_work
*work
;
3124 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3126 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
3127 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
3128 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
3129 ndev
->name
, &id_priv
->id
);
3130 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3134 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
3136 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
3137 atomic_inc(&id_priv
->refcount
);
3138 queue_work(cma_wq
, &work
->work
);
3144 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
3147 struct net_device
*ndev
= (struct net_device
*)ctx
;
3148 struct cma_device
*cma_dev
;
3149 struct rdma_id_private
*id_priv
;
3150 int ret
= NOTIFY_DONE
;
3152 if (dev_net(ndev
) != &init_net
)
3155 if (event
!= NETDEV_BONDING_FAILOVER
)
3158 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
3162 list_for_each_entry(cma_dev
, &dev_list
, list
)
3163 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3164 ret
= cma_netdev_change(ndev
, id_priv
);
3170 mutex_unlock(&lock
);
3174 static struct notifier_block cma_nb
= {
3175 .notifier_call
= cma_netdev_callback
3178 static void cma_add_one(struct ib_device
*device
)
3180 struct cma_device
*cma_dev
;
3181 struct rdma_id_private
*id_priv
;
3183 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
3187 cma_dev
->device
= device
;
3189 init_completion(&cma_dev
->comp
);
3190 atomic_set(&cma_dev
->refcount
, 1);
3191 INIT_LIST_HEAD(&cma_dev
->id_list
);
3192 ib_set_client_data(device
, &cma_client
, cma_dev
);
3195 list_add_tail(&cma_dev
->list
, &dev_list
);
3196 list_for_each_entry(id_priv
, &listen_any_list
, list
)
3197 cma_listen_on_dev(id_priv
, cma_dev
);
3198 mutex_unlock(&lock
);
3201 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
3203 struct rdma_cm_event event
;
3204 enum cma_state state
;
3207 /* Record that we want to remove the device */
3208 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
3209 if (state
== CMA_DESTROYING
)
3212 cma_cancel_operation(id_priv
, state
);
3213 mutex_lock(&id_priv
->handler_mutex
);
3215 /* Check for destruction from another callback. */
3216 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
3219 memset(&event
, 0, sizeof event
);
3220 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
3221 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3223 mutex_unlock(&id_priv
->handler_mutex
);
3227 static void cma_process_remove(struct cma_device
*cma_dev
)
3229 struct rdma_id_private
*id_priv
;
3233 while (!list_empty(&cma_dev
->id_list
)) {
3234 id_priv
= list_entry(cma_dev
->id_list
.next
,
3235 struct rdma_id_private
, list
);
3237 list_del(&id_priv
->listen_list
);
3238 list_del_init(&id_priv
->list
);
3239 atomic_inc(&id_priv
->refcount
);
3240 mutex_unlock(&lock
);
3242 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
3243 cma_deref_id(id_priv
);
3245 rdma_destroy_id(&id_priv
->id
);
3249 mutex_unlock(&lock
);
3251 cma_deref_dev(cma_dev
);
3252 wait_for_completion(&cma_dev
->comp
);
3255 static void cma_remove_one(struct ib_device
*device
)
3257 struct cma_device
*cma_dev
;
3259 cma_dev
= ib_get_client_data(device
, &cma_client
);
3264 list_del(&cma_dev
->list
);
3265 mutex_unlock(&lock
);
3267 cma_process_remove(cma_dev
);
3271 static int __init
cma_init(void)
3275 cma_wq
= create_singlethread_workqueue("rdma_cm");
3279 ib_sa_register_client(&sa_client
);
3280 rdma_addr_register_client(&addr_client
);
3281 register_netdevice_notifier(&cma_nb
);
3283 ret
= ib_register_client(&cma_client
);
3289 unregister_netdevice_notifier(&cma_nb
);
3290 rdma_addr_unregister_client(&addr_client
);
3291 ib_sa_unregister_client(&sa_client
);
3292 destroy_workqueue(cma_wq
);
3296 static void __exit
cma_cleanup(void)
3298 ib_unregister_client(&cma_client
);
3299 unregister_netdevice_notifier(&cma_nb
);
3300 rdma_addr_unregister_client(&addr_client
);
3301 ib_sa_unregister_client(&sa_client
);
3302 destroy_workqueue(cma_wq
);
3303 idr_destroy(&sdp_ps
);
3304 idr_destroy(&tcp_ps
);
3305 idr_destroy(&udp_ps
);
3306 idr_destroy(&ipoib_ps
);
3309 module_init(cma_init
);
3310 module_exit(cma_cleanup
);