2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
46 #include <rdma/rdma_cm.h>
47 #include <rdma/rdma_cm_ib.h>
48 #include <rdma/ib_cache.h>
49 #include <rdma/ib_cm.h>
50 #include <rdma/ib_sa.h>
51 #include <rdma/iw_cm.h>
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("Generic RDMA CM Agent");
55 MODULE_LICENSE("Dual BSD/GPL");
57 #define CMA_CM_RESPONSE_TIMEOUT 20
58 #define CMA_MAX_CM_RETRIES 15
59 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
61 static void cma_add_one(struct ib_device
*device
);
62 static void cma_remove_one(struct ib_device
*device
);
64 static struct ib_client cma_client
= {
67 .remove
= cma_remove_one
70 static struct ib_sa_client sa_client
;
71 static struct rdma_addr_client addr_client
;
72 static LIST_HEAD(dev_list
);
73 static LIST_HEAD(listen_any_list
);
74 static DEFINE_MUTEX(lock
);
75 static struct workqueue_struct
*cma_wq
;
76 static DEFINE_IDR(sdp_ps
);
77 static DEFINE_IDR(tcp_ps
);
78 static DEFINE_IDR(udp_ps
);
79 static DEFINE_IDR(ipoib_ps
);
83 struct list_head list
;
84 struct ib_device
*device
;
85 struct completion comp
;
87 struct list_head id_list
;
104 struct rdma_bind_list
{
106 struct hlist_head owners
;
111 * Device removal can occur at anytime, so we need extra handling to
112 * serialize notifying the user of device removal with other callbacks.
113 * We do this by disabling removal notification while a callback is in process,
114 * and reporting it after the callback completes.
116 struct rdma_id_private
{
117 struct rdma_cm_id id
;
119 struct rdma_bind_list
*bind_list
;
120 struct hlist_node node
;
121 struct list_head list
; /* listen_any_list or cma_device.list */
122 struct list_head listen_list
; /* per device listens */
123 struct cma_device
*cma_dev
;
124 struct list_head mc_list
;
127 enum cma_state state
;
129 struct mutex qp_mutex
;
131 struct completion comp
;
133 struct mutex handler_mutex
;
137 struct ib_sa_query
*query
;
151 struct cma_multicast
{
152 struct rdma_id_private
*id_priv
;
154 struct ib_sa_multicast
*ib
;
156 struct list_head list
;
158 struct sockaddr addr
;
159 u8 pad
[sizeof(struct sockaddr_in6
) -
160 sizeof(struct sockaddr
)];
164 struct work_struct work
;
165 struct rdma_id_private
*id
;
166 enum cma_state old_state
;
167 enum cma_state new_state
;
168 struct rdma_cm_event event
;
171 struct cma_ndev_work
{
172 struct work_struct work
;
173 struct rdma_id_private
*id
;
174 struct rdma_cm_event event
;
187 u8 ip_version
; /* IP version: 7:4 */
189 union cma_ip_addr src_addr
;
190 union cma_ip_addr dst_addr
;
195 u8 sdp_version
; /* Major version: 7:4 */
196 u8 ip_version
; /* IP version: 7:4 */
197 u8 sdp_specific1
[10];
199 __be16 sdp_specific2
;
200 union cma_ip_addr src_addr
;
201 union cma_ip_addr dst_addr
;
209 #define CMA_VERSION 0x00
210 #define SDP_MAJ_VERSION 0x2
212 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
217 spin_lock_irqsave(&id_priv
->lock
, flags
);
218 ret
= (id_priv
->state
== comp
);
219 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
223 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
224 enum cma_state comp
, enum cma_state exch
)
229 spin_lock_irqsave(&id_priv
->lock
, flags
);
230 if ((ret
= (id_priv
->state
== comp
)))
231 id_priv
->state
= exch
;
232 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
236 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
242 spin_lock_irqsave(&id_priv
->lock
, flags
);
243 old
= id_priv
->state
;
244 id_priv
->state
= exch
;
245 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
249 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
251 return hdr
->ip_version
>> 4;
254 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
256 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
259 static inline u8
sdp_get_majv(u8 sdp_version
)
261 return sdp_version
>> 4;
264 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
266 return hh
->ip_version
>> 4;
269 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
271 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
274 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
276 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
279 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
280 struct cma_device
*cma_dev
)
282 atomic_inc(&cma_dev
->refcount
);
283 id_priv
->cma_dev
= cma_dev
;
284 id_priv
->id
.device
= cma_dev
->device
;
285 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
288 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
290 if (atomic_dec_and_test(&cma_dev
->refcount
))
291 complete(&cma_dev
->comp
);
294 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
296 list_del(&id_priv
->list
);
297 cma_deref_dev(id_priv
->cma_dev
);
298 id_priv
->cma_dev
= NULL
;
301 static int cma_set_qkey(struct ib_device
*device
, u8 port_num
,
302 enum rdma_port_space ps
,
303 struct rdma_dev_addr
*dev_addr
, u32
*qkey
)
305 struct ib_sa_mcmember_rec rec
;
310 *qkey
= RDMA_UDP_QKEY
;
313 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
314 ret
= ib_sa_get_mcmember_rec(device
, port_num
, &rec
.mgid
, &rec
);
315 *qkey
= be32_to_cpu(rec
.qkey
);
323 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
325 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
326 struct cma_device
*cma_dev
;
330 switch (rdma_node_get_transport(dev_addr
->dev_type
)) {
331 case RDMA_TRANSPORT_IB
:
332 ib_addr_get_sgid(dev_addr
, &gid
);
334 case RDMA_TRANSPORT_IWARP
:
335 iw_addr_get_sgid(dev_addr
, &gid
);
341 list_for_each_entry(cma_dev
, &dev_list
, list
) {
342 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
343 &id_priv
->id
.port_num
, NULL
);
345 ret
= cma_set_qkey(cma_dev
->device
,
346 id_priv
->id
.port_num
,
347 id_priv
->id
.ps
, dev_addr
,
350 cma_attach_to_dev(id_priv
, cma_dev
);
357 static void cma_deref_id(struct rdma_id_private
*id_priv
)
359 if (atomic_dec_and_test(&id_priv
->refcount
))
360 complete(&id_priv
->comp
);
363 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
364 enum cma_state state
)
366 mutex_lock(&id_priv
->handler_mutex
);
367 if (id_priv
->state
!= state
) {
368 mutex_unlock(&id_priv
->handler_mutex
);
374 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
376 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
379 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
380 void *context
, enum rdma_port_space ps
)
382 struct rdma_id_private
*id_priv
;
384 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
386 return ERR_PTR(-ENOMEM
);
388 id_priv
->state
= CMA_IDLE
;
389 id_priv
->id
.context
= context
;
390 id_priv
->id
.event_handler
= event_handler
;
392 spin_lock_init(&id_priv
->lock
);
393 mutex_init(&id_priv
->qp_mutex
);
394 init_completion(&id_priv
->comp
);
395 atomic_set(&id_priv
->refcount
, 1);
396 mutex_init(&id_priv
->handler_mutex
);
397 INIT_LIST_HEAD(&id_priv
->listen_list
);
398 INIT_LIST_HEAD(&id_priv
->mc_list
);
399 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
403 EXPORT_SYMBOL(rdma_create_id
);
405 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
407 struct ib_qp_attr qp_attr
;
408 int qp_attr_mask
, ret
;
410 qp_attr
.qp_state
= IB_QPS_INIT
;
411 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
415 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
419 qp_attr
.qp_state
= IB_QPS_RTR
;
420 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
424 qp_attr
.qp_state
= IB_QPS_RTS
;
426 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
431 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
433 struct ib_qp_attr qp_attr
;
434 int qp_attr_mask
, ret
;
436 qp_attr
.qp_state
= IB_QPS_INIT
;
437 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
441 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
444 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
445 struct ib_qp_init_attr
*qp_init_attr
)
447 struct rdma_id_private
*id_priv
;
451 id_priv
= container_of(id
, struct rdma_id_private
, id
);
452 if (id
->device
!= pd
->device
)
455 qp
= ib_create_qp(pd
, qp_init_attr
);
459 if (cma_is_ud_ps(id_priv
->id
.ps
))
460 ret
= cma_init_ud_qp(id_priv
, qp
);
462 ret
= cma_init_conn_qp(id_priv
, qp
);
467 id_priv
->qp_num
= qp
->qp_num
;
468 id_priv
->srq
= (qp
->srq
!= NULL
);
474 EXPORT_SYMBOL(rdma_create_qp
);
476 void rdma_destroy_qp(struct rdma_cm_id
*id
)
478 struct rdma_id_private
*id_priv
;
480 id_priv
= container_of(id
, struct rdma_id_private
, id
);
481 mutex_lock(&id_priv
->qp_mutex
);
482 ib_destroy_qp(id_priv
->id
.qp
);
483 id_priv
->id
.qp
= NULL
;
484 mutex_unlock(&id_priv
->qp_mutex
);
486 EXPORT_SYMBOL(rdma_destroy_qp
);
488 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
489 struct rdma_conn_param
*conn_param
)
491 struct ib_qp_attr qp_attr
;
492 int qp_attr_mask
, ret
;
494 mutex_lock(&id_priv
->qp_mutex
);
495 if (!id_priv
->id
.qp
) {
500 /* Need to update QP attributes from default values. */
501 qp_attr
.qp_state
= IB_QPS_INIT
;
502 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
506 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
510 qp_attr
.qp_state
= IB_QPS_RTR
;
511 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
516 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
517 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
519 mutex_unlock(&id_priv
->qp_mutex
);
523 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
524 struct rdma_conn_param
*conn_param
)
526 struct ib_qp_attr qp_attr
;
527 int qp_attr_mask
, ret
;
529 mutex_lock(&id_priv
->qp_mutex
);
530 if (!id_priv
->id
.qp
) {
535 qp_attr
.qp_state
= IB_QPS_RTS
;
536 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
541 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
542 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
544 mutex_unlock(&id_priv
->qp_mutex
);
548 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
550 struct ib_qp_attr qp_attr
;
553 mutex_lock(&id_priv
->qp_mutex
);
554 if (!id_priv
->id
.qp
) {
559 qp_attr
.qp_state
= IB_QPS_ERR
;
560 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
562 mutex_unlock(&id_priv
->qp_mutex
);
566 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
567 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
569 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
572 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
573 ib_addr_get_pkey(dev_addr
),
574 &qp_attr
->pkey_index
);
578 qp_attr
->port_num
= id_priv
->id
.port_num
;
579 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
581 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
582 qp_attr
->qkey
= id_priv
->qkey
;
583 *qp_attr_mask
|= IB_QP_QKEY
;
585 qp_attr
->qp_access_flags
= 0;
586 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
591 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
594 struct rdma_id_private
*id_priv
;
597 id_priv
= container_of(id
, struct rdma_id_private
, id
);
598 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
599 case RDMA_TRANSPORT_IB
:
600 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
601 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
603 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
605 if (qp_attr
->qp_state
== IB_QPS_RTR
)
606 qp_attr
->rq_psn
= id_priv
->seq_num
;
608 case RDMA_TRANSPORT_IWARP
:
609 if (!id_priv
->cm_id
.iw
) {
610 qp_attr
->qp_access_flags
= 0;
611 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
613 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
623 EXPORT_SYMBOL(rdma_init_qp_attr
);
625 static inline int cma_zero_addr(struct sockaddr
*addr
)
627 struct in6_addr
*ip6
;
629 if (addr
->sa_family
== AF_INET
)
630 return ipv4_is_zeronet(
631 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
633 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
634 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
635 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
639 static inline int cma_loopback_addr(struct sockaddr
*addr
)
641 return ipv4_is_loopback(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
644 static inline int cma_any_addr(struct sockaddr
*addr
)
646 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
649 static inline __be16
cma_port(struct sockaddr
*addr
)
651 if (addr
->sa_family
== AF_INET
)
652 return ((struct sockaddr_in
*) addr
)->sin_port
;
654 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
657 static inline int cma_any_port(struct sockaddr
*addr
)
659 return !cma_port(addr
);
662 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
663 u8
*ip_ver
, __be16
*port
,
664 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
668 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
672 *ip_ver
= sdp_get_ip_ver(hdr
);
673 *port
= ((struct sdp_hh
*) hdr
)->port
;
674 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
675 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
678 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
681 *ip_ver
= cma_get_ip_ver(hdr
);
682 *port
= ((struct cma_hdr
*) hdr
)->port
;
683 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
684 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
688 if (*ip_ver
!= 4 && *ip_ver
!= 6)
693 static void cma_save_net_info(struct rdma_addr
*addr
,
694 struct rdma_addr
*listen_addr
,
695 u8 ip_ver
, __be16 port
,
696 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
698 struct sockaddr_in
*listen4
, *ip4
;
699 struct sockaddr_in6
*listen6
, *ip6
;
703 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
704 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
705 ip4
->sin_family
= listen4
->sin_family
;
706 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
707 ip4
->sin_port
= listen4
->sin_port
;
709 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
710 ip4
->sin_family
= listen4
->sin_family
;
711 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
712 ip4
->sin_port
= port
;
715 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
716 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
717 ip6
->sin6_family
= listen6
->sin6_family
;
718 ip6
->sin6_addr
= dst
->ip6
;
719 ip6
->sin6_port
= listen6
->sin6_port
;
721 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
722 ip6
->sin6_family
= listen6
->sin6_family
;
723 ip6
->sin6_addr
= src
->ip6
;
724 ip6
->sin6_port
= port
;
731 static inline int cma_user_data_offset(enum rdma_port_space ps
)
737 return sizeof(struct cma_hdr
);
741 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
743 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
744 case RDMA_TRANSPORT_IB
:
746 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
753 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
755 struct rdma_id_private
*dev_id_priv
;
758 * Remove from listen_any_list to prevent added devices from spawning
759 * additional listen requests.
762 list_del(&id_priv
->list
);
764 while (!list_empty(&id_priv
->listen_list
)) {
765 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
766 struct rdma_id_private
, listen_list
);
767 /* sync with device removal to avoid duplicate destruction */
768 list_del_init(&dev_id_priv
->list
);
769 list_del(&dev_id_priv
->listen_list
);
772 rdma_destroy_id(&dev_id_priv
->id
);
778 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
779 enum cma_state state
)
783 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
785 case CMA_ROUTE_QUERY
:
786 cma_cancel_route(id_priv
);
789 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
) &&
791 cma_cancel_listens(id_priv
);
798 static void cma_release_port(struct rdma_id_private
*id_priv
)
800 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
806 hlist_del(&id_priv
->node
);
807 if (hlist_empty(&bind_list
->owners
)) {
808 idr_remove(bind_list
->ps
, bind_list
->port
);
814 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
816 struct cma_multicast
*mc
;
818 while (!list_empty(&id_priv
->mc_list
)) {
819 mc
= container_of(id_priv
->mc_list
.next
,
820 struct cma_multicast
, list
);
822 ib_sa_free_multicast(mc
->multicast
.ib
);
827 void rdma_destroy_id(struct rdma_cm_id
*id
)
829 struct rdma_id_private
*id_priv
;
830 enum cma_state state
;
832 id_priv
= container_of(id
, struct rdma_id_private
, id
);
833 state
= cma_exch(id_priv
, CMA_DESTROYING
);
834 cma_cancel_operation(id_priv
, state
);
837 if (id_priv
->cma_dev
) {
839 switch (rdma_node_get_transport(id
->device
->node_type
)) {
840 case RDMA_TRANSPORT_IB
:
841 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
842 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
844 case RDMA_TRANSPORT_IWARP
:
845 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
846 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
851 cma_leave_mc_groups(id_priv
);
853 cma_detach_from_dev(id_priv
);
857 cma_release_port(id_priv
);
858 cma_deref_id(id_priv
);
859 wait_for_completion(&id_priv
->comp
);
861 if (id_priv
->internal_id
)
862 cma_deref_id(id_priv
->id
.context
);
864 kfree(id_priv
->id
.route
.path_rec
);
867 EXPORT_SYMBOL(rdma_destroy_id
);
869 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
873 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
877 ret
= cma_modify_qp_rts(id_priv
, NULL
);
881 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
887 cma_modify_qp_err(id_priv
);
888 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
893 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
895 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
896 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
903 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
904 struct ib_cm_rep_event_param
*rep_data
,
907 event
->param
.conn
.private_data
= private_data
;
908 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
909 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
910 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
911 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
912 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
913 event
->param
.conn
.srq
= rep_data
->srq
;
914 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
917 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
919 struct rdma_id_private
*id_priv
= cm_id
->context
;
920 struct rdma_cm_event event
;
923 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
924 cma_disable_callback(id_priv
, CMA_CONNECT
)) ||
925 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
926 cma_disable_callback(id_priv
, CMA_DISCONNECT
)))
929 memset(&event
, 0, sizeof event
);
930 switch (ib_event
->event
) {
931 case IB_CM_REQ_ERROR
:
932 case IB_CM_REP_ERROR
:
933 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
934 event
.status
= -ETIMEDOUT
;
936 case IB_CM_REP_RECEIVED
:
937 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
939 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
940 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
941 event
.status
= cma_rep_recv(id_priv
);
942 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
943 RDMA_CM_EVENT_ESTABLISHED
;
945 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
946 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
947 ib_event
->private_data
);
949 case IB_CM_RTU_RECEIVED
:
950 case IB_CM_USER_ESTABLISHED
:
951 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
953 case IB_CM_DREQ_ERROR
:
954 event
.status
= -ETIMEDOUT
; /* fall through */
955 case IB_CM_DREQ_RECEIVED
:
956 case IB_CM_DREP_RECEIVED
:
957 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
959 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
961 case IB_CM_TIMEWAIT_EXIT
:
962 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
964 case IB_CM_MRA_RECEIVED
:
967 case IB_CM_REJ_RECEIVED
:
968 cma_modify_qp_err(id_priv
);
969 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
970 event
.event
= RDMA_CM_EVENT_REJECTED
;
971 event
.param
.conn
.private_data
= ib_event
->private_data
;
972 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
975 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
980 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
982 /* Destroy the CM ID by returning a non-zero value. */
983 id_priv
->cm_id
.ib
= NULL
;
984 cma_exch(id_priv
, CMA_DESTROYING
);
985 mutex_unlock(&id_priv
->handler_mutex
);
986 rdma_destroy_id(&id_priv
->id
);
990 mutex_unlock(&id_priv
->handler_mutex
);
994 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
995 struct ib_cm_event
*ib_event
)
997 struct rdma_id_private
*id_priv
;
998 struct rdma_cm_id
*id
;
999 struct rdma_route
*rt
;
1000 union cma_ip_addr
*src
, *dst
;
1005 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1006 &ip_ver
, &port
, &src
, &dst
))
1009 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1014 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1015 ip_ver
, port
, src
, dst
);
1018 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1019 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1024 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1025 if (rt
->num_paths
== 2)
1026 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1028 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1029 ret
= rdma_translate_ip(&id
->route
.addr
.src_addr
,
1030 &id
->route
.addr
.dev_addr
);
1034 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1035 id_priv
->state
= CMA_CONNECT
;
1039 rdma_destroy_id(id
);
1044 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1045 struct ib_cm_event
*ib_event
)
1047 struct rdma_id_private
*id_priv
;
1048 struct rdma_cm_id
*id
;
1049 union cma_ip_addr
*src
, *dst
;
1054 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1060 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1061 &ip_ver
, &port
, &src
, &dst
))
1064 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1065 ip_ver
, port
, src
, dst
);
1067 ret
= rdma_translate_ip(&id
->route
.addr
.src_addr
,
1068 &id
->route
.addr
.dev_addr
);
1072 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1073 id_priv
->state
= CMA_CONNECT
;
1076 rdma_destroy_id(id
);
1080 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1081 struct ib_cm_req_event_param
*req_data
,
1082 void *private_data
, int offset
)
1084 event
->param
.conn
.private_data
= private_data
+ offset
;
1085 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1086 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1087 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1088 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1089 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1090 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1091 event
->param
.conn
.srq
= req_data
->srq
;
1092 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1095 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1097 struct rdma_id_private
*listen_id
, *conn_id
;
1098 struct rdma_cm_event event
;
1101 listen_id
= cm_id
->context
;
1102 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1103 return -ECONNABORTED
;
1105 memset(&event
, 0, sizeof event
);
1106 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1107 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1108 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1109 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1110 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1111 event
.param
.ud
.private_data_len
=
1112 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1114 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1115 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1116 ib_event
->private_data
, offset
);
1123 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1125 ret
= cma_acquire_dev(conn_id
);
1126 mutex_unlock(&lock
);
1128 goto release_conn_id
;
1130 conn_id
->cm_id
.ib
= cm_id
;
1131 cm_id
->context
= conn_id
;
1132 cm_id
->cm_handler
= cma_ib_handler
;
1134 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1137 * Acquire mutex to prevent user executing rdma_destroy_id()
1138 * while we're accessing the cm_id.
1141 if (cma_comp(conn_id
, CMA_CONNECT
) &&
1142 !cma_is_ud_ps(conn_id
->id
.ps
))
1143 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1144 mutex_unlock(&lock
);
1145 mutex_unlock(&conn_id
->handler_mutex
);
1149 /* Destroy the CM ID by returning a non-zero value. */
1150 conn_id
->cm_id
.ib
= NULL
;
1153 cma_exch(conn_id
, CMA_DESTROYING
);
1154 mutex_unlock(&conn_id
->handler_mutex
);
1155 rdma_destroy_id(&conn_id
->id
);
1158 mutex_unlock(&listen_id
->handler_mutex
);
1162 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1164 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1167 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1168 struct ib_cm_compare_data
*compare
)
1170 struct cma_hdr
*cma_data
, *cma_mask
;
1171 struct sdp_hh
*sdp_data
, *sdp_mask
;
1173 struct in6_addr ip6_addr
;
1175 memset(compare
, 0, sizeof *compare
);
1176 cma_data
= (void *) compare
->data
;
1177 cma_mask
= (void *) compare
->mask
;
1178 sdp_data
= (void *) compare
->data
;
1179 sdp_mask
= (void *) compare
->mask
;
1181 switch (addr
->sa_family
) {
1183 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1184 if (ps
== RDMA_PS_SDP
) {
1185 sdp_set_ip_ver(sdp_data
, 4);
1186 sdp_set_ip_ver(sdp_mask
, 0xF);
1187 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1188 sdp_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1190 cma_set_ip_ver(cma_data
, 4);
1191 cma_set_ip_ver(cma_mask
, 0xF);
1192 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1193 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1197 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1198 if (ps
== RDMA_PS_SDP
) {
1199 sdp_set_ip_ver(sdp_data
, 6);
1200 sdp_set_ip_ver(sdp_mask
, 0xF);
1201 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1202 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1203 sizeof sdp_mask
->dst_addr
.ip6
);
1205 cma_set_ip_ver(cma_data
, 6);
1206 cma_set_ip_ver(cma_mask
, 0xF);
1207 cma_data
->dst_addr
.ip6
= ip6_addr
;
1208 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1209 sizeof cma_mask
->dst_addr
.ip6
);
1217 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1219 struct rdma_id_private
*id_priv
= iw_id
->context
;
1220 struct rdma_cm_event event
;
1221 struct sockaddr_in
*sin
;
1224 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
1227 memset(&event
, 0, sizeof event
);
1228 switch (iw_event
->event
) {
1229 case IW_CM_EVENT_CLOSE
:
1230 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1232 case IW_CM_EVENT_CONNECT_REPLY
:
1233 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1234 *sin
= iw_event
->local_addr
;
1235 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1236 *sin
= iw_event
->remote_addr
;
1237 switch (iw_event
->status
) {
1239 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1243 event
.event
= RDMA_CM_EVENT_REJECTED
;
1246 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1249 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1253 case IW_CM_EVENT_ESTABLISHED
:
1254 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1260 event
.status
= iw_event
->status
;
1261 event
.param
.conn
.private_data
= iw_event
->private_data
;
1262 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1263 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1265 /* Destroy the CM ID by returning a non-zero value. */
1266 id_priv
->cm_id
.iw
= NULL
;
1267 cma_exch(id_priv
, CMA_DESTROYING
);
1268 mutex_unlock(&id_priv
->handler_mutex
);
1269 rdma_destroy_id(&id_priv
->id
);
1273 mutex_unlock(&id_priv
->handler_mutex
);
1277 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1278 struct iw_cm_event
*iw_event
)
1280 struct rdma_cm_id
*new_cm_id
;
1281 struct rdma_id_private
*listen_id
, *conn_id
;
1282 struct sockaddr_in
*sin
;
1283 struct net_device
*dev
= NULL
;
1284 struct rdma_cm_event event
;
1286 struct ib_device_attr attr
;
1288 listen_id
= cm_id
->context
;
1289 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1290 return -ECONNABORTED
;
1292 /* Create a new RDMA id for the new IW CM ID */
1293 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1294 listen_id
->id
.context
,
1296 if (IS_ERR(new_cm_id
)) {
1300 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1301 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1302 conn_id
->state
= CMA_CONNECT
;
1304 dev
= ip_dev_find(&init_net
, iw_event
->local_addr
.sin_addr
.s_addr
);
1306 ret
= -EADDRNOTAVAIL
;
1307 mutex_unlock(&conn_id
->handler_mutex
);
1308 rdma_destroy_id(new_cm_id
);
1311 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1313 mutex_unlock(&conn_id
->handler_mutex
);
1314 rdma_destroy_id(new_cm_id
);
1319 ret
= cma_acquire_dev(conn_id
);
1320 mutex_unlock(&lock
);
1322 mutex_unlock(&conn_id
->handler_mutex
);
1323 rdma_destroy_id(new_cm_id
);
1327 conn_id
->cm_id
.iw
= cm_id
;
1328 cm_id
->context
= conn_id
;
1329 cm_id
->cm_handler
= cma_iw_handler
;
1331 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1332 *sin
= iw_event
->local_addr
;
1333 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1334 *sin
= iw_event
->remote_addr
;
1336 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1338 mutex_unlock(&conn_id
->handler_mutex
);
1339 rdma_destroy_id(new_cm_id
);
1343 memset(&event
, 0, sizeof event
);
1344 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1345 event
.param
.conn
.private_data
= iw_event
->private_data
;
1346 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1347 event
.param
.conn
.initiator_depth
= attr
.max_qp_init_rd_atom
;
1348 event
.param
.conn
.responder_resources
= attr
.max_qp_rd_atom
;
1349 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1351 /* User wants to destroy the CM ID */
1352 conn_id
->cm_id
.iw
= NULL
;
1353 cma_exch(conn_id
, CMA_DESTROYING
);
1354 mutex_unlock(&conn_id
->handler_mutex
);
1355 rdma_destroy_id(&conn_id
->id
);
1359 mutex_unlock(&conn_id
->handler_mutex
);
1364 mutex_unlock(&listen_id
->handler_mutex
);
1368 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1370 struct ib_cm_compare_data compare_data
;
1371 struct sockaddr
*addr
;
1375 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1377 if (IS_ERR(id_priv
->cm_id
.ib
))
1378 return PTR_ERR(id_priv
->cm_id
.ib
);
1380 addr
= &id_priv
->id
.route
.addr
.src_addr
;
1381 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1382 if (cma_any_addr(addr
))
1383 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1385 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1386 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1390 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1391 id_priv
->cm_id
.ib
= NULL
;
1397 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1400 struct sockaddr_in
*sin
;
1402 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1403 iw_conn_req_handler
,
1405 if (IS_ERR(id_priv
->cm_id
.iw
))
1406 return PTR_ERR(id_priv
->cm_id
.iw
);
1408 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1409 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1411 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1414 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1415 id_priv
->cm_id
.iw
= NULL
;
1421 static int cma_listen_handler(struct rdma_cm_id
*id
,
1422 struct rdma_cm_event
*event
)
1424 struct rdma_id_private
*id_priv
= id
->context
;
1426 id
->context
= id_priv
->id
.context
;
1427 id
->event_handler
= id_priv
->id
.event_handler
;
1428 return id_priv
->id
.event_handler(id
, event
);
1431 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1432 struct cma_device
*cma_dev
)
1434 struct rdma_id_private
*dev_id_priv
;
1435 struct rdma_cm_id
*id
;
1438 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1442 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1444 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1445 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1446 ip_addr_size(&id_priv
->id
.route
.addr
.src_addr
));
1448 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1449 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1450 atomic_inc(&id_priv
->refcount
);
1451 dev_id_priv
->internal_id
= 1;
1453 ret
= rdma_listen(id
, id_priv
->backlog
);
1455 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1456 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1459 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1461 struct cma_device
*cma_dev
;
1464 list_add_tail(&id_priv
->list
, &listen_any_list
);
1465 list_for_each_entry(cma_dev
, &dev_list
, list
)
1466 cma_listen_on_dev(id_priv
, cma_dev
);
1467 mutex_unlock(&lock
);
1470 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1472 struct sockaddr_in addr_in
;
1474 memset(&addr_in
, 0, sizeof addr_in
);
1475 addr_in
.sin_family
= af
;
1476 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1479 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1481 struct rdma_id_private
*id_priv
;
1484 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1485 if (id_priv
->state
== CMA_IDLE
) {
1486 ret
= cma_bind_any(id
, AF_INET
);
1491 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1494 id_priv
->backlog
= backlog
;
1496 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1497 case RDMA_TRANSPORT_IB
:
1498 ret
= cma_ib_listen(id_priv
);
1502 case RDMA_TRANSPORT_IWARP
:
1503 ret
= cma_iw_listen(id_priv
, backlog
);
1512 cma_listen_on_all(id_priv
);
1516 id_priv
->backlog
= 0;
1517 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1520 EXPORT_SYMBOL(rdma_listen
);
1522 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1524 struct rdma_id_private
*id_priv
;
1526 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1527 id_priv
->tos
= (u8
) tos
;
1529 EXPORT_SYMBOL(rdma_set_service_type
);
1531 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1534 struct cma_work
*work
= context
;
1535 struct rdma_route
*route
;
1537 route
= &work
->id
->id
.route
;
1540 route
->num_paths
= 1;
1541 *route
->path_rec
= *path_rec
;
1543 work
->old_state
= CMA_ROUTE_QUERY
;
1544 work
->new_state
= CMA_ADDR_RESOLVED
;
1545 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1546 work
->event
.status
= status
;
1549 queue_work(cma_wq
, &work
->work
);
1552 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1553 struct cma_work
*work
)
1555 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1556 struct ib_sa_path_rec path_rec
;
1557 ib_sa_comp_mask comp_mask
;
1558 struct sockaddr_in6
*sin6
;
1560 memset(&path_rec
, 0, sizeof path_rec
);
1561 ib_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1562 ib_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1563 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1564 path_rec
.numb_path
= 1;
1565 path_rec
.reversible
= 1;
1566 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
, &addr
->dst_addr
);
1568 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1569 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1570 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1572 if (addr
->src_addr
.sa_family
== AF_INET
) {
1573 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1574 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1576 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1577 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1578 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1581 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1582 id_priv
->id
.port_num
, &path_rec
,
1583 comp_mask
, timeout_ms
,
1584 GFP_KERNEL
, cma_query_handler
,
1585 work
, &id_priv
->query
);
1587 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1590 static void cma_work_handler(struct work_struct
*_work
)
1592 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1593 struct rdma_id_private
*id_priv
= work
->id
;
1596 mutex_lock(&id_priv
->handler_mutex
);
1597 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1600 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1601 cma_exch(id_priv
, CMA_DESTROYING
);
1605 mutex_unlock(&id_priv
->handler_mutex
);
1606 cma_deref_id(id_priv
);
1608 rdma_destroy_id(&id_priv
->id
);
1612 static void cma_ndev_work_handler(struct work_struct
*_work
)
1614 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1615 struct rdma_id_private
*id_priv
= work
->id
;
1618 mutex_lock(&id_priv
->handler_mutex
);
1619 if (id_priv
->state
== CMA_DESTROYING
||
1620 id_priv
->state
== CMA_DEVICE_REMOVAL
)
1623 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1624 cma_exch(id_priv
, CMA_DESTROYING
);
1629 mutex_unlock(&id_priv
->handler_mutex
);
1630 cma_deref_id(id_priv
);
1632 rdma_destroy_id(&id_priv
->id
);
1636 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1638 struct rdma_route
*route
= &id_priv
->id
.route
;
1639 struct cma_work
*work
;
1642 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1647 INIT_WORK(&work
->work
, cma_work_handler
);
1648 work
->old_state
= CMA_ROUTE_QUERY
;
1649 work
->new_state
= CMA_ROUTE_RESOLVED
;
1650 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1652 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1653 if (!route
->path_rec
) {
1658 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1664 kfree(route
->path_rec
);
1665 route
->path_rec
= NULL
;
1671 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1672 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1674 struct rdma_id_private
*id_priv
;
1677 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1678 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1681 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1682 if (!id
->route
.path_rec
) {
1687 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1690 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1693 EXPORT_SYMBOL(rdma_set_ib_paths
);
1695 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1697 struct cma_work
*work
;
1699 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1704 INIT_WORK(&work
->work
, cma_work_handler
);
1705 work
->old_state
= CMA_ROUTE_QUERY
;
1706 work
->new_state
= CMA_ROUTE_RESOLVED
;
1707 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1708 queue_work(cma_wq
, &work
->work
);
1712 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1714 struct rdma_id_private
*id_priv
;
1717 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1718 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1721 atomic_inc(&id_priv
->refcount
);
1722 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1723 case RDMA_TRANSPORT_IB
:
1724 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1726 case RDMA_TRANSPORT_IWARP
:
1727 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1738 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1739 cma_deref_id(id_priv
);
1742 EXPORT_SYMBOL(rdma_resolve_route
);
1744 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1746 struct cma_device
*cma_dev
;
1747 struct ib_port_attr port_attr
;
1754 if (list_empty(&dev_list
)) {
1758 list_for_each_entry(cma_dev
, &dev_list
, list
)
1759 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1760 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1761 port_attr
.state
== IB_PORT_ACTIVE
)
1765 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1768 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1772 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1776 ib_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1777 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1778 id_priv
->id
.port_num
= p
;
1779 cma_attach_to_dev(id_priv
, cma_dev
);
1781 mutex_unlock(&lock
);
1785 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1786 struct rdma_dev_addr
*dev_addr
, void *context
)
1788 struct rdma_id_private
*id_priv
= context
;
1789 struct rdma_cm_event event
;
1791 memset(&event
, 0, sizeof event
);
1792 mutex_lock(&id_priv
->handler_mutex
);
1795 * Grab mutex to block rdma_destroy_id() from removing the device while
1796 * we're trying to acquire it.
1799 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1800 mutex_unlock(&lock
);
1804 if (!status
&& !id_priv
->cma_dev
)
1805 status
= cma_acquire_dev(id_priv
);
1806 mutex_unlock(&lock
);
1809 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1811 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1812 event
.status
= status
;
1814 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1815 ip_addr_size(src_addr
));
1816 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1819 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1820 cma_exch(id_priv
, CMA_DESTROYING
);
1821 mutex_unlock(&id_priv
->handler_mutex
);
1822 cma_deref_id(id_priv
);
1823 rdma_destroy_id(&id_priv
->id
);
1827 mutex_unlock(&id_priv
->handler_mutex
);
1828 cma_deref_id(id_priv
);
1831 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1833 struct cma_work
*work
;
1834 struct sockaddr_in
*src_in
, *dst_in
;
1838 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1842 if (!id_priv
->cma_dev
) {
1843 ret
= cma_bind_loopback(id_priv
);
1848 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1849 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1851 if (cma_zero_addr(&id_priv
->id
.route
.addr
.src_addr
)) {
1852 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1853 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1854 src_in
->sin_family
= dst_in
->sin_family
;
1855 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1859 INIT_WORK(&work
->work
, cma_work_handler
);
1860 work
->old_state
= CMA_ADDR_QUERY
;
1861 work
->new_state
= CMA_ADDR_RESOLVED
;
1862 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1863 queue_work(cma_wq
, &work
->work
);
1870 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1871 struct sockaddr
*dst_addr
)
1873 if (src_addr
&& src_addr
->sa_family
)
1874 return rdma_bind_addr(id
, src_addr
);
1876 return cma_bind_any(id
, dst_addr
->sa_family
);
1879 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1880 struct sockaddr
*dst_addr
, int timeout_ms
)
1882 struct rdma_id_private
*id_priv
;
1885 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1886 if (id_priv
->state
== CMA_IDLE
) {
1887 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1892 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1895 atomic_inc(&id_priv
->refcount
);
1896 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1897 if (cma_any_addr(dst_addr
))
1898 ret
= cma_resolve_loopback(id_priv
);
1900 ret
= rdma_resolve_ip(&addr_client
, &id
->route
.addr
.src_addr
,
1901 dst_addr
, &id
->route
.addr
.dev_addr
,
1902 timeout_ms
, addr_handler
, id_priv
);
1908 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1909 cma_deref_id(id_priv
);
1912 EXPORT_SYMBOL(rdma_resolve_addr
);
1914 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1915 struct rdma_id_private
*id_priv
)
1917 struct sockaddr_in
*sin
;
1919 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1920 sin
->sin_port
= htons(bind_list
->port
);
1921 id_priv
->bind_list
= bind_list
;
1922 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1925 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1926 unsigned short snum
)
1928 struct rdma_bind_list
*bind_list
;
1931 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1936 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1937 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1943 ret
= -EADDRNOTAVAIL
;
1948 bind_list
->port
= (unsigned short) port
;
1949 cma_bind_port(bind_list
, id_priv
);
1952 idr_remove(ps
, port
);
1958 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1960 struct rdma_bind_list
*bind_list
;
1961 int port
, ret
, low
, high
;
1963 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1968 /* FIXME: add proper port randomization per like inet_csk_get_port */
1970 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1971 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1976 inet_get_local_port_range(&low
, &high
);
1978 if (next_port
!= low
) {
1979 idr_remove(ps
, port
);
1983 ret
= -EADDRNOTAVAIL
;
1990 next_port
= port
+ 1;
1993 bind_list
->port
= (unsigned short) port
;
1994 cma_bind_port(bind_list
, id_priv
);
1997 idr_remove(ps
, port
);
2003 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2005 struct rdma_id_private
*cur_id
;
2006 struct sockaddr_in
*sin
, *cur_sin
;
2007 struct rdma_bind_list
*bind_list
;
2008 struct hlist_node
*node
;
2009 unsigned short snum
;
2011 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2012 snum
= ntohs(sin
->sin_port
);
2013 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2016 bind_list
= idr_find(ps
, snum
);
2018 return cma_alloc_port(ps
, id_priv
, snum
);
2021 * We don't support binding to any address if anyone is bound to
2022 * a specific address on the same port.
2024 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
))
2025 return -EADDRNOTAVAIL
;
2027 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
2028 if (cma_any_addr(&cur_id
->id
.route
.addr
.src_addr
))
2029 return -EADDRNOTAVAIL
;
2031 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
2032 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
2036 cma_bind_port(bind_list
, id_priv
);
2040 static int cma_get_port(struct rdma_id_private
*id_priv
)
2045 switch (id_priv
->id
.ps
) {
2059 return -EPROTONOSUPPORT
;
2063 if (cma_any_port(&id_priv
->id
.route
.addr
.src_addr
))
2064 ret
= cma_alloc_any_port(ps
, id_priv
);
2066 ret
= cma_use_port(ps
, id_priv
);
2067 mutex_unlock(&lock
);
2072 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2074 struct rdma_id_private
*id_priv
;
2077 if (addr
->sa_family
!= AF_INET
)
2078 return -EAFNOSUPPORT
;
2080 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2081 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
2084 if (!cma_any_addr(addr
)) {
2085 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2090 ret
= cma_acquire_dev(id_priv
);
2091 mutex_unlock(&lock
);
2096 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2097 ret
= cma_get_port(id_priv
);
2103 if (!cma_any_addr(addr
)) {
2105 cma_detach_from_dev(id_priv
);
2106 mutex_unlock(&lock
);
2109 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2112 EXPORT_SYMBOL(rdma_bind_addr
);
2114 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2115 struct rdma_route
*route
)
2117 struct sockaddr_in
*src4
, *dst4
;
2118 struct cma_hdr
*cma_hdr
;
2119 struct sdp_hh
*sdp_hdr
;
2121 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2122 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2127 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2129 sdp_set_ip_ver(sdp_hdr
, 4);
2130 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2131 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2132 sdp_hdr
->port
= src4
->sin_port
;
2136 cma_hdr
->cma_version
= CMA_VERSION
;
2137 cma_set_ip_ver(cma_hdr
, 4);
2138 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2139 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2140 cma_hdr
->port
= src4
->sin_port
;
2146 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2147 struct ib_cm_event
*ib_event
)
2149 struct rdma_id_private
*id_priv
= cm_id
->context
;
2150 struct rdma_cm_event event
;
2151 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2154 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
2157 memset(&event
, 0, sizeof event
);
2158 switch (ib_event
->event
) {
2159 case IB_CM_SIDR_REQ_ERROR
:
2160 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2161 event
.status
= -ETIMEDOUT
;
2163 case IB_CM_SIDR_REP_RECEIVED
:
2164 event
.param
.ud
.private_data
= ib_event
->private_data
;
2165 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2166 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2167 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2168 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2171 if (id_priv
->qkey
!= rep
->qkey
) {
2172 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2173 event
.status
= -EINVAL
;
2176 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2177 id_priv
->id
.route
.path_rec
,
2178 &event
.param
.ud
.ah_attr
);
2179 event
.param
.ud
.qp_num
= rep
->qpn
;
2180 event
.param
.ud
.qkey
= rep
->qkey
;
2181 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2185 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2190 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2192 /* Destroy the CM ID by returning a non-zero value. */
2193 id_priv
->cm_id
.ib
= NULL
;
2194 cma_exch(id_priv
, CMA_DESTROYING
);
2195 mutex_unlock(&id_priv
->handler_mutex
);
2196 rdma_destroy_id(&id_priv
->id
);
2200 mutex_unlock(&id_priv
->handler_mutex
);
2204 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2205 struct rdma_conn_param
*conn_param
)
2207 struct ib_cm_sidr_req_param req
;
2208 struct rdma_route
*route
;
2211 req
.private_data_len
= sizeof(struct cma_hdr
) +
2212 conn_param
->private_data_len
;
2213 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2214 if (!req
.private_data
)
2217 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2218 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2219 conn_param
->private_data
, conn_param
->private_data_len
);
2221 route
= &id_priv
->id
.route
;
2222 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2226 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2227 cma_sidr_rep_handler
, id_priv
);
2228 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2229 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2233 req
.path
= route
->path_rec
;
2234 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2235 &route
->addr
.dst_addr
);
2236 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2237 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2239 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2241 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2242 id_priv
->cm_id
.ib
= NULL
;
2245 kfree(req
.private_data
);
2249 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2250 struct rdma_conn_param
*conn_param
)
2252 struct ib_cm_req_param req
;
2253 struct rdma_route
*route
;
2257 memset(&req
, 0, sizeof req
);
2258 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2259 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2260 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2264 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2265 memcpy(private_data
+ offset
, conn_param
->private_data
,
2266 conn_param
->private_data_len
);
2268 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2270 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2271 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2275 route
= &id_priv
->id
.route
;
2276 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2279 req
.private_data
= private_data
;
2281 req
.primary_path
= &route
->path_rec
[0];
2282 if (route
->num_paths
== 2)
2283 req
.alternate_path
= &route
->path_rec
[1];
2285 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2286 &route
->addr
.dst_addr
);
2287 req
.qp_num
= id_priv
->qp_num
;
2288 req
.qp_type
= IB_QPT_RC
;
2289 req
.starting_psn
= id_priv
->seq_num
;
2290 req
.responder_resources
= conn_param
->responder_resources
;
2291 req
.initiator_depth
= conn_param
->initiator_depth
;
2292 req
.flow_control
= conn_param
->flow_control
;
2293 req
.retry_count
= conn_param
->retry_count
;
2294 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2295 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2296 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2297 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2298 req
.srq
= id_priv
->srq
? 1 : 0;
2300 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2302 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2303 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2304 id_priv
->cm_id
.ib
= NULL
;
2307 kfree(private_data
);
2311 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2312 struct rdma_conn_param
*conn_param
)
2314 struct iw_cm_id
*cm_id
;
2315 struct sockaddr_in
* sin
;
2317 struct iw_cm_conn_param iw_param
;
2319 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2320 if (IS_ERR(cm_id
)) {
2321 ret
= PTR_ERR(cm_id
);
2325 id_priv
->cm_id
.iw
= cm_id
;
2327 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2328 cm_id
->local_addr
= *sin
;
2330 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2331 cm_id
->remote_addr
= *sin
;
2333 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2337 iw_param
.ord
= conn_param
->initiator_depth
;
2338 iw_param
.ird
= conn_param
->responder_resources
;
2339 iw_param
.private_data
= conn_param
->private_data
;
2340 iw_param
.private_data_len
= conn_param
->private_data_len
;
2342 iw_param
.qpn
= id_priv
->qp_num
;
2344 iw_param
.qpn
= conn_param
->qp_num
;
2345 ret
= iw_cm_connect(cm_id
, &iw_param
);
2347 if (ret
&& !IS_ERR(cm_id
)) {
2348 iw_destroy_cm_id(cm_id
);
2349 id_priv
->cm_id
.iw
= NULL
;
2354 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2356 struct rdma_id_private
*id_priv
;
2359 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2360 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2364 id_priv
->qp_num
= conn_param
->qp_num
;
2365 id_priv
->srq
= conn_param
->srq
;
2368 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2369 case RDMA_TRANSPORT_IB
:
2370 if (cma_is_ud_ps(id
->ps
))
2371 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2373 ret
= cma_connect_ib(id_priv
, conn_param
);
2375 case RDMA_TRANSPORT_IWARP
:
2376 ret
= cma_connect_iw(id_priv
, conn_param
);
2387 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2390 EXPORT_SYMBOL(rdma_connect
);
2392 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2393 struct rdma_conn_param
*conn_param
)
2395 struct ib_cm_rep_param rep
;
2398 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2402 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2406 memset(&rep
, 0, sizeof rep
);
2407 rep
.qp_num
= id_priv
->qp_num
;
2408 rep
.starting_psn
= id_priv
->seq_num
;
2409 rep
.private_data
= conn_param
->private_data
;
2410 rep
.private_data_len
= conn_param
->private_data_len
;
2411 rep
.responder_resources
= conn_param
->responder_resources
;
2412 rep
.initiator_depth
= conn_param
->initiator_depth
;
2413 rep
.failover_accepted
= 0;
2414 rep
.flow_control
= conn_param
->flow_control
;
2415 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2416 rep
.srq
= id_priv
->srq
? 1 : 0;
2418 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2423 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2424 struct rdma_conn_param
*conn_param
)
2426 struct iw_cm_conn_param iw_param
;
2429 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2433 iw_param
.ord
= conn_param
->initiator_depth
;
2434 iw_param
.ird
= conn_param
->responder_resources
;
2435 iw_param
.private_data
= conn_param
->private_data
;
2436 iw_param
.private_data_len
= conn_param
->private_data_len
;
2437 if (id_priv
->id
.qp
) {
2438 iw_param
.qpn
= id_priv
->qp_num
;
2440 iw_param
.qpn
= conn_param
->qp_num
;
2442 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2445 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2446 enum ib_cm_sidr_status status
,
2447 const void *private_data
, int private_data_len
)
2449 struct ib_cm_sidr_rep_param rep
;
2451 memset(&rep
, 0, sizeof rep
);
2452 rep
.status
= status
;
2453 if (status
== IB_SIDR_SUCCESS
) {
2454 rep
.qp_num
= id_priv
->qp_num
;
2455 rep
.qkey
= id_priv
->qkey
;
2457 rep
.private_data
= private_data
;
2458 rep
.private_data_len
= private_data_len
;
2460 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2463 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2465 struct rdma_id_private
*id_priv
;
2468 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2469 if (!cma_comp(id_priv
, CMA_CONNECT
))
2472 if (!id
->qp
&& conn_param
) {
2473 id_priv
->qp_num
= conn_param
->qp_num
;
2474 id_priv
->srq
= conn_param
->srq
;
2477 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2478 case RDMA_TRANSPORT_IB
:
2479 if (cma_is_ud_ps(id
->ps
))
2480 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2481 conn_param
->private_data
,
2482 conn_param
->private_data_len
);
2483 else if (conn_param
)
2484 ret
= cma_accept_ib(id_priv
, conn_param
);
2486 ret
= cma_rep_recv(id_priv
);
2488 case RDMA_TRANSPORT_IWARP
:
2489 ret
= cma_accept_iw(id_priv
, conn_param
);
2501 cma_modify_qp_err(id_priv
);
2502 rdma_reject(id
, NULL
, 0);
2505 EXPORT_SYMBOL(rdma_accept
);
2507 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2509 struct rdma_id_private
*id_priv
;
2512 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2513 if (!cma_has_cm_dev(id_priv
))
2516 switch (id
->device
->node_type
) {
2517 case RDMA_NODE_IB_CA
:
2518 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2526 EXPORT_SYMBOL(rdma_notify
);
2528 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2529 u8 private_data_len
)
2531 struct rdma_id_private
*id_priv
;
2534 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2535 if (!cma_has_cm_dev(id_priv
))
2538 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2539 case RDMA_TRANSPORT_IB
:
2540 if (cma_is_ud_ps(id
->ps
))
2541 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2542 private_data
, private_data_len
);
2544 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2545 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2546 0, private_data
, private_data_len
);
2548 case RDMA_TRANSPORT_IWARP
:
2549 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2550 private_data
, private_data_len
);
2558 EXPORT_SYMBOL(rdma_reject
);
2560 int rdma_disconnect(struct rdma_cm_id
*id
)
2562 struct rdma_id_private
*id_priv
;
2565 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2566 if (!cma_has_cm_dev(id_priv
))
2569 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2570 case RDMA_TRANSPORT_IB
:
2571 ret
= cma_modify_qp_err(id_priv
);
2574 /* Initiate or respond to a disconnect. */
2575 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2576 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2578 case RDMA_TRANSPORT_IWARP
:
2579 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2588 EXPORT_SYMBOL(rdma_disconnect
);
2590 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2592 struct rdma_id_private
*id_priv
;
2593 struct cma_multicast
*mc
= multicast
->context
;
2594 struct rdma_cm_event event
;
2597 id_priv
= mc
->id_priv
;
2598 if (cma_disable_callback(id_priv
, CMA_ADDR_BOUND
) &&
2599 cma_disable_callback(id_priv
, CMA_ADDR_RESOLVED
))
2602 mutex_lock(&id_priv
->qp_mutex
);
2603 if (!status
&& id_priv
->id
.qp
)
2604 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2605 multicast
->rec
.mlid
);
2606 mutex_unlock(&id_priv
->qp_mutex
);
2608 memset(&event
, 0, sizeof event
);
2609 event
.status
= status
;
2610 event
.param
.ud
.private_data
= mc
->context
;
2612 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2613 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2614 id_priv
->id
.port_num
, &multicast
->rec
,
2615 &event
.param
.ud
.ah_attr
);
2616 event
.param
.ud
.qp_num
= 0xFFFFFF;
2617 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2619 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2621 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2623 cma_exch(id_priv
, CMA_DESTROYING
);
2624 mutex_unlock(&id_priv
->handler_mutex
);
2625 rdma_destroy_id(&id_priv
->id
);
2629 mutex_unlock(&id_priv
->handler_mutex
);
2633 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2634 struct sockaddr
*addr
, union ib_gid
*mgid
)
2636 unsigned char mc_map
[MAX_ADDR_LEN
];
2637 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2638 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2639 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2641 if (cma_any_addr(addr
)) {
2642 memset(mgid
, 0, sizeof *mgid
);
2643 } else if ((addr
->sa_family
== AF_INET6
) &&
2644 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFF10A01B) ==
2646 /* IPv6 address is an SA assigned MGID. */
2647 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2649 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
2650 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2651 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2652 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2656 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2657 struct cma_multicast
*mc
)
2659 struct ib_sa_mcmember_rec rec
;
2660 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2661 ib_sa_comp_mask comp_mask
;
2664 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2665 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2670 cma_set_mgid(id_priv
, &mc
->addr
, &rec
.mgid
);
2671 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2672 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2673 ib_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2674 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2677 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2678 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2679 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2680 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2681 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2683 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2684 id_priv
->id
.port_num
, &rec
,
2685 comp_mask
, GFP_KERNEL
,
2686 cma_ib_mc_handler
, mc
);
2687 if (IS_ERR(mc
->multicast
.ib
))
2688 return PTR_ERR(mc
->multicast
.ib
);
2693 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
2696 struct rdma_id_private
*id_priv
;
2697 struct cma_multicast
*mc
;
2700 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2701 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
2702 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
2705 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
2709 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
2710 mc
->context
= context
;
2711 mc
->id_priv
= id_priv
;
2713 spin_lock(&id_priv
->lock
);
2714 list_add(&mc
->list
, &id_priv
->mc_list
);
2715 spin_unlock(&id_priv
->lock
);
2717 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2718 case RDMA_TRANSPORT_IB
:
2719 ret
= cma_join_ib_multicast(id_priv
, mc
);
2727 spin_lock_irq(&id_priv
->lock
);
2728 list_del(&mc
->list
);
2729 spin_unlock_irq(&id_priv
->lock
);
2734 EXPORT_SYMBOL(rdma_join_multicast
);
2736 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2738 struct rdma_id_private
*id_priv
;
2739 struct cma_multicast
*mc
;
2741 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2742 spin_lock_irq(&id_priv
->lock
);
2743 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
2744 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
2745 list_del(&mc
->list
);
2746 spin_unlock_irq(&id_priv
->lock
);
2749 ib_detach_mcast(id
->qp
,
2750 &mc
->multicast
.ib
->rec
.mgid
,
2751 mc
->multicast
.ib
->rec
.mlid
);
2752 ib_sa_free_multicast(mc
->multicast
.ib
);
2757 spin_unlock_irq(&id_priv
->lock
);
2759 EXPORT_SYMBOL(rdma_leave_multicast
);
2761 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
2763 struct rdma_dev_addr
*dev_addr
;
2764 struct cma_ndev_work
*work
;
2766 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2768 if ((dev_addr
->src_dev
== ndev
) &&
2769 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
2770 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
2771 ndev
->name
, &id_priv
->id
);
2772 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2776 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
2778 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
2779 atomic_inc(&id_priv
->refcount
);
2780 queue_work(cma_wq
, &work
->work
);
2786 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
2789 struct net_device
*ndev
= (struct net_device
*)ctx
;
2790 struct cma_device
*cma_dev
;
2791 struct rdma_id_private
*id_priv
;
2792 int ret
= NOTIFY_DONE
;
2794 if (dev_net(ndev
) != &init_net
)
2797 if (event
!= NETDEV_BONDING_FAILOVER
)
2800 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
2804 list_for_each_entry(cma_dev
, &dev_list
, list
)
2805 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
2806 ret
= cma_netdev_change(ndev
, id_priv
);
2812 mutex_unlock(&lock
);
2816 static struct notifier_block cma_nb
= {
2817 .notifier_call
= cma_netdev_callback
2820 static void cma_add_one(struct ib_device
*device
)
2822 struct cma_device
*cma_dev
;
2823 struct rdma_id_private
*id_priv
;
2825 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2829 cma_dev
->device
= device
;
2831 init_completion(&cma_dev
->comp
);
2832 atomic_set(&cma_dev
->refcount
, 1);
2833 INIT_LIST_HEAD(&cma_dev
->id_list
);
2834 ib_set_client_data(device
, &cma_client
, cma_dev
);
2837 list_add_tail(&cma_dev
->list
, &dev_list
);
2838 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2839 cma_listen_on_dev(id_priv
, cma_dev
);
2840 mutex_unlock(&lock
);
2843 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2845 struct rdma_cm_event event
;
2846 enum cma_state state
;
2849 /* Record that we want to remove the device */
2850 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2851 if (state
== CMA_DESTROYING
)
2854 cma_cancel_operation(id_priv
, state
);
2855 mutex_lock(&id_priv
->handler_mutex
);
2857 /* Check for destruction from another callback. */
2858 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2861 memset(&event
, 0, sizeof event
);
2862 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2863 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2865 mutex_unlock(&id_priv
->handler_mutex
);
2869 static void cma_process_remove(struct cma_device
*cma_dev
)
2871 struct rdma_id_private
*id_priv
;
2875 while (!list_empty(&cma_dev
->id_list
)) {
2876 id_priv
= list_entry(cma_dev
->id_list
.next
,
2877 struct rdma_id_private
, list
);
2879 list_del(&id_priv
->listen_list
);
2880 list_del_init(&id_priv
->list
);
2881 atomic_inc(&id_priv
->refcount
);
2882 mutex_unlock(&lock
);
2884 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
2885 cma_deref_id(id_priv
);
2887 rdma_destroy_id(&id_priv
->id
);
2891 mutex_unlock(&lock
);
2893 cma_deref_dev(cma_dev
);
2894 wait_for_completion(&cma_dev
->comp
);
2897 static void cma_remove_one(struct ib_device
*device
)
2899 struct cma_device
*cma_dev
;
2901 cma_dev
= ib_get_client_data(device
, &cma_client
);
2906 list_del(&cma_dev
->list
);
2907 mutex_unlock(&lock
);
2909 cma_process_remove(cma_dev
);
2913 static int cma_init(void)
2915 int ret
, low
, high
, remaining
;
2917 get_random_bytes(&next_port
, sizeof next_port
);
2918 inet_get_local_port_range(&low
, &high
);
2919 remaining
= (high
- low
) + 1;
2920 next_port
= ((unsigned int) next_port
% remaining
) + low
;
2922 cma_wq
= create_singlethread_workqueue("rdma_cm");
2926 ib_sa_register_client(&sa_client
);
2927 rdma_addr_register_client(&addr_client
);
2928 register_netdevice_notifier(&cma_nb
);
2930 ret
= ib_register_client(&cma_client
);
2936 unregister_netdevice_notifier(&cma_nb
);
2937 rdma_addr_unregister_client(&addr_client
);
2938 ib_sa_unregister_client(&sa_client
);
2939 destroy_workqueue(cma_wq
);
2943 static void cma_cleanup(void)
2945 ib_unregister_client(&cma_client
);
2946 unregister_netdevice_notifier(&cma_nb
);
2947 rdma_addr_unregister_client(&addr_client
);
2948 ib_sa_unregister_client(&sa_client
);
2949 destroy_workqueue(cma_wq
);
2950 idr_destroy(&sdp_ps
);
2951 idr_destroy(&tcp_ps
);
2952 idr_destroy(&udp_ps
);
2953 idr_destroy(&ipoib_ps
);
2956 module_init(cma_init
);
2957 module_exit(cma_cleanup
);