2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2008 Cisco. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
41 #include <linux/cdev.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/kref.h>
46 #include <linux/compat.h>
47 #include <linux/sched.h>
48 #include <linux/semaphore.h>
49 #include <linux/slab.h>
51 #include <asm/uaccess.h>
53 #include <rdma/ib_mad.h>
54 #include <rdma/ib_user_mad.h>
56 MODULE_AUTHOR("Roland Dreier");
57 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
58 MODULE_LICENSE("Dual BSD/GPL");
61 IB_UMAD_MAX_PORTS
= 64,
62 IB_UMAD_MAX_AGENTS
= 32,
65 IB_UMAD_MINOR_BASE
= 0
69 * Our lifetime rules for these structs are the following:
70 * device special file is opened, we take a reference on the
71 * ib_umad_port's struct ib_umad_device. We drop these
72 * references in the corresponding close().
74 * In addition to references coming from open character devices, there
75 * is one more reference to each ib_umad_device representing the
76 * module's reference taken when allocating the ib_umad_device in
79 * When destroying an ib_umad_device, we drop the module's reference.
87 struct device
*sm_dev
;
88 struct semaphore sm_sem
;
90 struct mutex file_mutex
;
91 struct list_head file_list
;
93 struct ib_device
*ib_dev
;
94 struct ib_umad_device
*umad_dev
;
99 struct ib_umad_device
{
100 int start_port
, end_port
;
102 struct ib_umad_port port
[0];
105 struct ib_umad_file
{
107 struct ib_umad_port
*port
;
108 struct list_head recv_list
;
109 struct list_head send_list
;
110 struct list_head port_list
;
111 spinlock_t send_lock
;
112 wait_queue_head_t recv_wait
;
113 struct ib_mad_agent
*agent
[IB_UMAD_MAX_AGENTS
];
119 struct ib_umad_packet
{
120 struct ib_mad_send_buf
*msg
;
121 struct ib_mad_recv_wc
*recv_wc
;
122 struct list_head list
;
124 struct ib_user_mad mad
;
127 static struct class *umad_class
;
129 static const dev_t base_dev
= MKDEV(IB_UMAD_MAJOR
, IB_UMAD_MINOR_BASE
);
131 static DEFINE_SPINLOCK(port_lock
);
132 static DECLARE_BITMAP(dev_map
, IB_UMAD_MAX_PORTS
);
134 static void ib_umad_add_one(struct ib_device
*device
);
135 static void ib_umad_remove_one(struct ib_device
*device
);
137 static void ib_umad_release_dev(struct kref
*ref
)
139 struct ib_umad_device
*dev
=
140 container_of(ref
, struct ib_umad_device
, ref
);
145 static int hdr_size(struct ib_umad_file
*file
)
147 return file
->use_pkey_index
? sizeof (struct ib_user_mad_hdr
) :
148 sizeof (struct ib_user_mad_hdr_old
);
151 /* caller must hold file->mutex */
152 static struct ib_mad_agent
*__get_agent(struct ib_umad_file
*file
, int id
)
154 return file
->agents_dead
? NULL
: file
->agent
[id
];
157 static int queue_packet(struct ib_umad_file
*file
,
158 struct ib_mad_agent
*agent
,
159 struct ib_umad_packet
*packet
)
163 mutex_lock(&file
->mutex
);
165 for (packet
->mad
.hdr
.id
= 0;
166 packet
->mad
.hdr
.id
< IB_UMAD_MAX_AGENTS
;
167 packet
->mad
.hdr
.id
++)
168 if (agent
== __get_agent(file
, packet
->mad
.hdr
.id
)) {
169 list_add_tail(&packet
->list
, &file
->recv_list
);
170 wake_up_interruptible(&file
->recv_wait
);
175 mutex_unlock(&file
->mutex
);
180 static void dequeue_send(struct ib_umad_file
*file
,
181 struct ib_umad_packet
*packet
)
183 spin_lock_irq(&file
->send_lock
);
184 list_del(&packet
->list
);
185 spin_unlock_irq(&file
->send_lock
);
188 static void send_handler(struct ib_mad_agent
*agent
,
189 struct ib_mad_send_wc
*send_wc
)
191 struct ib_umad_file
*file
= agent
->context
;
192 struct ib_umad_packet
*packet
= send_wc
->send_buf
->context
[0];
194 dequeue_send(file
, packet
);
195 ib_destroy_ah(packet
->msg
->ah
);
196 ib_free_send_mad(packet
->msg
);
198 if (send_wc
->status
== IB_WC_RESP_TIMEOUT_ERR
) {
199 packet
->length
= IB_MGMT_MAD_HDR
;
200 packet
->mad
.hdr
.status
= ETIMEDOUT
;
201 if (!queue_packet(file
, agent
, packet
))
207 static void recv_handler(struct ib_mad_agent
*agent
,
208 struct ib_mad_recv_wc
*mad_recv_wc
)
210 struct ib_umad_file
*file
= agent
->context
;
211 struct ib_umad_packet
*packet
;
213 if (mad_recv_wc
->wc
->status
!= IB_WC_SUCCESS
)
216 packet
= kzalloc(sizeof *packet
, GFP_KERNEL
);
220 packet
->length
= mad_recv_wc
->mad_len
;
221 packet
->recv_wc
= mad_recv_wc
;
223 packet
->mad
.hdr
.status
= 0;
224 packet
->mad
.hdr
.length
= hdr_size(file
) + mad_recv_wc
->mad_len
;
225 packet
->mad
.hdr
.qpn
= cpu_to_be32(mad_recv_wc
->wc
->src_qp
);
226 packet
->mad
.hdr
.lid
= cpu_to_be16(mad_recv_wc
->wc
->slid
);
227 packet
->mad
.hdr
.sl
= mad_recv_wc
->wc
->sl
;
228 packet
->mad
.hdr
.path_bits
= mad_recv_wc
->wc
->dlid_path_bits
;
229 packet
->mad
.hdr
.pkey_index
= mad_recv_wc
->wc
->pkey_index
;
230 packet
->mad
.hdr
.grh_present
= !!(mad_recv_wc
->wc
->wc_flags
& IB_WC_GRH
);
231 if (packet
->mad
.hdr
.grh_present
) {
232 struct ib_ah_attr ah_attr
;
234 ib_init_ah_from_wc(agent
->device
, agent
->port_num
,
235 mad_recv_wc
->wc
, mad_recv_wc
->recv_buf
.grh
,
238 packet
->mad
.hdr
.gid_index
= ah_attr
.grh
.sgid_index
;
239 packet
->mad
.hdr
.hop_limit
= ah_attr
.grh
.hop_limit
;
240 packet
->mad
.hdr
.traffic_class
= ah_attr
.grh
.traffic_class
;
241 memcpy(packet
->mad
.hdr
.gid
, &ah_attr
.grh
.dgid
, 16);
242 packet
->mad
.hdr
.flow_label
= cpu_to_be32(ah_attr
.grh
.flow_label
);
245 if (queue_packet(file
, agent
, packet
))
252 ib_free_recv_mad(mad_recv_wc
);
255 static ssize_t
copy_recv_mad(struct ib_umad_file
*file
, char __user
*buf
,
256 struct ib_umad_packet
*packet
, size_t count
)
258 struct ib_mad_recv_buf
*recv_buf
;
259 int left
, seg_payload
, offset
, max_seg_payload
;
261 /* We need enough room to copy the first (or only) MAD segment. */
262 recv_buf
= &packet
->recv_wc
->recv_buf
;
263 if ((packet
->length
<= sizeof (*recv_buf
->mad
) &&
264 count
< hdr_size(file
) + packet
->length
) ||
265 (packet
->length
> sizeof (*recv_buf
->mad
) &&
266 count
< hdr_size(file
) + sizeof (*recv_buf
->mad
)))
269 if (copy_to_user(buf
, &packet
->mad
, hdr_size(file
)))
272 buf
+= hdr_size(file
);
273 seg_payload
= min_t(int, packet
->length
, sizeof (*recv_buf
->mad
));
274 if (copy_to_user(buf
, recv_buf
->mad
, seg_payload
))
277 if (seg_payload
< packet
->length
) {
279 * Multipacket RMPP MAD message. Copy remainder of message.
280 * Note that last segment may have a shorter payload.
282 if (count
< hdr_size(file
) + packet
->length
) {
284 * The buffer is too small, return the first RMPP segment,
285 * which includes the RMPP message length.
289 offset
= ib_get_mad_data_offset(recv_buf
->mad
->mad_hdr
.mgmt_class
);
290 max_seg_payload
= sizeof (struct ib_mad
) - offset
;
292 for (left
= packet
->length
- seg_payload
, buf
+= seg_payload
;
293 left
; left
-= seg_payload
, buf
+= seg_payload
) {
294 recv_buf
= container_of(recv_buf
->list
.next
,
295 struct ib_mad_recv_buf
, list
);
296 seg_payload
= min(left
, max_seg_payload
);
297 if (copy_to_user(buf
, ((void *) recv_buf
->mad
) + offset
,
302 return hdr_size(file
) + packet
->length
;
305 static ssize_t
copy_send_mad(struct ib_umad_file
*file
, char __user
*buf
,
306 struct ib_umad_packet
*packet
, size_t count
)
308 ssize_t size
= hdr_size(file
) + packet
->length
;
313 if (copy_to_user(buf
, &packet
->mad
, hdr_size(file
)))
316 buf
+= hdr_size(file
);
318 if (copy_to_user(buf
, packet
->mad
.data
, packet
->length
))
324 static ssize_t
ib_umad_read(struct file
*filp
, char __user
*buf
,
325 size_t count
, loff_t
*pos
)
327 struct ib_umad_file
*file
= filp
->private_data
;
328 struct ib_umad_packet
*packet
;
331 if (count
< hdr_size(file
))
334 mutex_lock(&file
->mutex
);
336 while (list_empty(&file
->recv_list
)) {
337 mutex_unlock(&file
->mutex
);
339 if (filp
->f_flags
& O_NONBLOCK
)
342 if (wait_event_interruptible(file
->recv_wait
,
343 !list_empty(&file
->recv_list
)))
346 mutex_lock(&file
->mutex
);
349 packet
= list_entry(file
->recv_list
.next
, struct ib_umad_packet
, list
);
350 list_del(&packet
->list
);
352 mutex_unlock(&file
->mutex
);
355 ret
= copy_recv_mad(file
, buf
, packet
, count
);
357 ret
= copy_send_mad(file
, buf
, packet
, count
);
361 mutex_lock(&file
->mutex
);
362 list_add(&packet
->list
, &file
->recv_list
);
363 mutex_unlock(&file
->mutex
);
366 ib_free_recv_mad(packet
->recv_wc
);
372 static int copy_rmpp_mad(struct ib_mad_send_buf
*msg
, const char __user
*buf
)
376 /* Copy class specific header */
377 if ((msg
->hdr_len
> IB_MGMT_RMPP_HDR
) &&
378 copy_from_user(msg
->mad
+ IB_MGMT_RMPP_HDR
, buf
+ IB_MGMT_RMPP_HDR
,
379 msg
->hdr_len
- IB_MGMT_RMPP_HDR
))
382 /* All headers are in place. Copy data segments. */
383 for (seg
= 1, left
= msg
->data_len
, buf
+= msg
->hdr_len
; left
> 0;
384 seg
++, left
-= msg
->seg_size
, buf
+= msg
->seg_size
) {
385 if (copy_from_user(ib_get_rmpp_segment(msg
, seg
), buf
,
386 min(left
, msg
->seg_size
)))
392 static int same_destination(struct ib_user_mad_hdr
*hdr1
,
393 struct ib_user_mad_hdr
*hdr2
)
395 if (!hdr1
->grh_present
&& !hdr2
->grh_present
)
396 return (hdr1
->lid
== hdr2
->lid
);
398 if (hdr1
->grh_present
&& hdr2
->grh_present
)
399 return !memcmp(hdr1
->gid
, hdr2
->gid
, 16);
404 static int is_duplicate(struct ib_umad_file
*file
,
405 struct ib_umad_packet
*packet
)
407 struct ib_umad_packet
*sent_packet
;
408 struct ib_mad_hdr
*sent_hdr
, *hdr
;
410 hdr
= (struct ib_mad_hdr
*) packet
->mad
.data
;
411 list_for_each_entry(sent_packet
, &file
->send_list
, list
) {
412 sent_hdr
= (struct ib_mad_hdr
*) sent_packet
->mad
.data
;
414 if ((hdr
->tid
!= sent_hdr
->tid
) ||
415 (hdr
->mgmt_class
!= sent_hdr
->mgmt_class
))
419 * No need to be overly clever here. If two new operations have
420 * the same TID, reject the second as a duplicate. This is more
421 * restrictive than required by the spec.
423 if (!ib_response_mad((struct ib_mad
*) hdr
)) {
424 if (!ib_response_mad((struct ib_mad
*) sent_hdr
))
427 } else if (!ib_response_mad((struct ib_mad
*) sent_hdr
))
430 if (same_destination(&packet
->mad
.hdr
, &sent_packet
->mad
.hdr
))
437 static ssize_t
ib_umad_write(struct file
*filp
, const char __user
*buf
,
438 size_t count
, loff_t
*pos
)
440 struct ib_umad_file
*file
= filp
->private_data
;
441 struct ib_umad_packet
*packet
;
442 struct ib_mad_agent
*agent
;
443 struct ib_ah_attr ah_attr
;
445 struct ib_rmpp_mad
*rmpp_mad
;
447 int ret
, data_len
, hdr_len
, copy_offset
, rmpp_active
;
449 if (count
< hdr_size(file
) + IB_MGMT_RMPP_HDR
)
452 packet
= kzalloc(sizeof *packet
+ IB_MGMT_RMPP_HDR
, GFP_KERNEL
);
456 if (copy_from_user(&packet
->mad
, buf
, hdr_size(file
))) {
461 if (packet
->mad
.hdr
.id
>= IB_UMAD_MAX_AGENTS
) {
466 buf
+= hdr_size(file
);
468 if (copy_from_user(packet
->mad
.data
, buf
, IB_MGMT_RMPP_HDR
)) {
473 mutex_lock(&file
->mutex
);
475 agent
= __get_agent(file
, packet
->mad
.hdr
.id
);
481 memset(&ah_attr
, 0, sizeof ah_attr
);
482 ah_attr
.dlid
= be16_to_cpu(packet
->mad
.hdr
.lid
);
483 ah_attr
.sl
= packet
->mad
.hdr
.sl
;
484 ah_attr
.src_path_bits
= packet
->mad
.hdr
.path_bits
;
485 ah_attr
.port_num
= file
->port
->port_num
;
486 if (packet
->mad
.hdr
.grh_present
) {
487 ah_attr
.ah_flags
= IB_AH_GRH
;
488 memcpy(ah_attr
.grh
.dgid
.raw
, packet
->mad
.hdr
.gid
, 16);
489 ah_attr
.grh
.sgid_index
= packet
->mad
.hdr
.gid_index
;
490 ah_attr
.grh
.flow_label
= be32_to_cpu(packet
->mad
.hdr
.flow_label
);
491 ah_attr
.grh
.hop_limit
= packet
->mad
.hdr
.hop_limit
;
492 ah_attr
.grh
.traffic_class
= packet
->mad
.hdr
.traffic_class
;
495 ah
= ib_create_ah(agent
->qp
->pd
, &ah_attr
);
501 rmpp_mad
= (struct ib_rmpp_mad
*) packet
->mad
.data
;
502 hdr_len
= ib_get_mad_data_offset(rmpp_mad
->mad_hdr
.mgmt_class
);
503 if (!ib_is_mad_class_rmpp(rmpp_mad
->mad_hdr
.mgmt_class
)) {
504 copy_offset
= IB_MGMT_MAD_HDR
;
507 copy_offset
= IB_MGMT_RMPP_HDR
;
508 rmpp_active
= ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
509 IB_MGMT_RMPP_FLAG_ACTIVE
;
512 data_len
= count
- hdr_size(file
) - hdr_len
;
513 packet
->msg
= ib_create_send_mad(agent
,
514 be32_to_cpu(packet
->mad
.hdr
.qpn
),
515 packet
->mad
.hdr
.pkey_index
, rmpp_active
,
516 hdr_len
, data_len
, GFP_KERNEL
);
517 if (IS_ERR(packet
->msg
)) {
518 ret
= PTR_ERR(packet
->msg
);
522 packet
->msg
->ah
= ah
;
523 packet
->msg
->timeout_ms
= packet
->mad
.hdr
.timeout_ms
;
524 packet
->msg
->retries
= packet
->mad
.hdr
.retries
;
525 packet
->msg
->context
[0] = packet
;
527 /* Copy MAD header. Any RMPP header is already in place. */
528 memcpy(packet
->msg
->mad
, packet
->mad
.data
, IB_MGMT_MAD_HDR
);
531 if (copy_from_user(packet
->msg
->mad
+ copy_offset
,
533 hdr_len
+ data_len
- copy_offset
)) {
538 ret
= copy_rmpp_mad(packet
->msg
, buf
);
544 * Set the high-order part of the transaction ID to make MADs from
545 * different agents unique, and allow routing responses back to the
546 * original requestor.
548 if (!ib_response_mad(packet
->msg
->mad
)) {
549 tid
= &((struct ib_mad_hdr
*) packet
->msg
->mad
)->tid
;
550 *tid
= cpu_to_be64(((u64
) agent
->hi_tid
) << 32 |
551 (be64_to_cpup(tid
) & 0xffffffff));
552 rmpp_mad
->mad_hdr
.tid
= *tid
;
555 spin_lock_irq(&file
->send_lock
);
556 ret
= is_duplicate(file
, packet
);
558 list_add_tail(&packet
->list
, &file
->send_list
);
559 spin_unlock_irq(&file
->send_lock
);
565 ret
= ib_post_send_mad(packet
->msg
, NULL
);
569 mutex_unlock(&file
->mutex
);
573 dequeue_send(file
, packet
);
575 ib_free_send_mad(packet
->msg
);
579 mutex_unlock(&file
->mutex
);
585 static unsigned int ib_umad_poll(struct file
*filp
, struct poll_table_struct
*wait
)
587 struct ib_umad_file
*file
= filp
->private_data
;
589 /* we will always be able to post a MAD send */
590 unsigned int mask
= POLLOUT
| POLLWRNORM
;
592 poll_wait(filp
, &file
->recv_wait
, wait
);
594 if (!list_empty(&file
->recv_list
))
595 mask
|= POLLIN
| POLLRDNORM
;
600 static int ib_umad_reg_agent(struct ib_umad_file
*file
, void __user
*arg
,
601 int compat_method_mask
)
603 struct ib_user_mad_reg_req ureq
;
604 struct ib_mad_reg_req req
;
605 struct ib_mad_agent
*agent
= NULL
;
609 mutex_lock(&file
->port
->file_mutex
);
610 mutex_lock(&file
->mutex
);
612 if (!file
->port
->ib_dev
) {
617 if (copy_from_user(&ureq
, arg
, sizeof ureq
)) {
622 if (ureq
.qpn
!= 0 && ureq
.qpn
!= 1) {
627 for (agent_id
= 0; agent_id
< IB_UMAD_MAX_AGENTS
; ++agent_id
)
628 if (!__get_agent(file
, agent_id
))
635 if (ureq
.mgmt_class
) {
636 req
.mgmt_class
= ureq
.mgmt_class
;
637 req
.mgmt_class_version
= ureq
.mgmt_class_version
;
638 memcpy(req
.oui
, ureq
.oui
, sizeof req
.oui
);
640 if (compat_method_mask
) {
641 u32
*umm
= (u32
*) ureq
.method_mask
;
644 for (i
= 0; i
< BITS_TO_LONGS(IB_MGMT_MAX_METHODS
); ++i
)
646 umm
[i
* 2] | ((u64
) umm
[i
* 2 + 1] << 32);
648 memcpy(req
.method_mask
, ureq
.method_mask
,
649 sizeof req
.method_mask
);
652 agent
= ib_register_mad_agent(file
->port
->ib_dev
, file
->port
->port_num
,
653 ureq
.qpn
? IB_QPT_GSI
: IB_QPT_SMI
,
654 ureq
.mgmt_class
? &req
: NULL
,
656 send_handler
, recv_handler
, file
);
658 ret
= PTR_ERR(agent
);
663 if (put_user(agent_id
,
664 (u32 __user
*) (arg
+ offsetof(struct ib_user_mad_reg_req
, id
)))) {
669 if (!file
->already_used
) {
670 file
->already_used
= 1;
671 if (!file
->use_pkey_index
) {
672 printk(KERN_WARNING
"user_mad: process %s did not enable "
673 "P_Key index support.\n", current
->comm
);
674 printk(KERN_WARNING
"user_mad: Documentation/infiniband/user_mad.txt "
675 "has info on the new ABI.\n");
679 file
->agent
[agent_id
] = agent
;
683 mutex_unlock(&file
->mutex
);
686 ib_unregister_mad_agent(agent
);
688 mutex_unlock(&file
->port
->file_mutex
);
693 static int ib_umad_unreg_agent(struct ib_umad_file
*file
, u32 __user
*arg
)
695 struct ib_mad_agent
*agent
= NULL
;
699 if (get_user(id
, arg
))
702 mutex_lock(&file
->port
->file_mutex
);
703 mutex_lock(&file
->mutex
);
705 if (id
>= IB_UMAD_MAX_AGENTS
|| !__get_agent(file
, id
)) {
710 agent
= file
->agent
[id
];
711 file
->agent
[id
] = NULL
;
714 mutex_unlock(&file
->mutex
);
717 ib_unregister_mad_agent(agent
);
719 mutex_unlock(&file
->port
->file_mutex
);
724 static long ib_umad_enable_pkey(struct ib_umad_file
*file
)
728 mutex_lock(&file
->mutex
);
729 if (file
->already_used
)
732 file
->use_pkey_index
= 1;
733 mutex_unlock(&file
->mutex
);
738 static long ib_umad_ioctl(struct file
*filp
, unsigned int cmd
,
742 case IB_USER_MAD_REGISTER_AGENT
:
743 return ib_umad_reg_agent(filp
->private_data
, (void __user
*) arg
, 0);
744 case IB_USER_MAD_UNREGISTER_AGENT
:
745 return ib_umad_unreg_agent(filp
->private_data
, (__u32 __user
*) arg
);
746 case IB_USER_MAD_ENABLE_PKEY
:
747 return ib_umad_enable_pkey(filp
->private_data
);
754 static long ib_umad_compat_ioctl(struct file
*filp
, unsigned int cmd
,
758 case IB_USER_MAD_REGISTER_AGENT
:
759 return ib_umad_reg_agent(filp
->private_data
, compat_ptr(arg
), 1);
760 case IB_USER_MAD_UNREGISTER_AGENT
:
761 return ib_umad_unreg_agent(filp
->private_data
, compat_ptr(arg
));
762 case IB_USER_MAD_ENABLE_PKEY
:
763 return ib_umad_enable_pkey(filp
->private_data
);
771 * ib_umad_open() does not need the BKL:
773 * - the ib_umad_port structures are properly reference counted, and
774 * everything else is purely local to the file being created, so
775 * races against other open calls are not a problem;
776 * - the ioctl method does not affect any global state outside of the
777 * file structure being operated on;
779 static int ib_umad_open(struct inode
*inode
, struct file
*filp
)
781 struct ib_umad_port
*port
;
782 struct ib_umad_file
*file
;
785 port
= container_of(inode
->i_cdev
, struct ib_umad_port
, cdev
);
787 kref_get(&port
->umad_dev
->ref
);
791 mutex_lock(&port
->file_mutex
);
798 file
= kzalloc(sizeof *file
, GFP_KERNEL
);
800 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
805 mutex_init(&file
->mutex
);
806 spin_lock_init(&file
->send_lock
);
807 INIT_LIST_HEAD(&file
->recv_list
);
808 INIT_LIST_HEAD(&file
->send_list
);
809 init_waitqueue_head(&file
->recv_wait
);
812 filp
->private_data
= file
;
814 list_add_tail(&file
->port_list
, &port
->file_list
);
816 ret
= nonseekable_open(inode
, filp
);
819 mutex_unlock(&port
->file_mutex
);
823 static int ib_umad_close(struct inode
*inode
, struct file
*filp
)
825 struct ib_umad_file
*file
= filp
->private_data
;
826 struct ib_umad_device
*dev
= file
->port
->umad_dev
;
827 struct ib_umad_packet
*packet
, *tmp
;
831 mutex_lock(&file
->port
->file_mutex
);
832 mutex_lock(&file
->mutex
);
834 already_dead
= file
->agents_dead
;
835 file
->agents_dead
= 1;
837 list_for_each_entry_safe(packet
, tmp
, &file
->recv_list
, list
) {
839 ib_free_recv_mad(packet
->recv_wc
);
843 list_del(&file
->port_list
);
845 mutex_unlock(&file
->mutex
);
848 for (i
= 0; i
< IB_UMAD_MAX_AGENTS
; ++i
)
850 ib_unregister_mad_agent(file
->agent
[i
]);
852 mutex_unlock(&file
->port
->file_mutex
);
855 kref_put(&dev
->ref
, ib_umad_release_dev
);
860 static const struct file_operations umad_fops
= {
861 .owner
= THIS_MODULE
,
862 .read
= ib_umad_read
,
863 .write
= ib_umad_write
,
864 .poll
= ib_umad_poll
,
865 .unlocked_ioctl
= ib_umad_ioctl
,
867 .compat_ioctl
= ib_umad_compat_ioctl
,
869 .open
= ib_umad_open
,
870 .release
= ib_umad_close
,
874 static int ib_umad_sm_open(struct inode
*inode
, struct file
*filp
)
876 struct ib_umad_port
*port
;
877 struct ib_port_modify props
= {
878 .set_port_cap_mask
= IB_PORT_SM
882 port
= container_of(inode
->i_cdev
, struct ib_umad_port
, sm_cdev
);
884 kref_get(&port
->umad_dev
->ref
);
888 if (filp
->f_flags
& O_NONBLOCK
) {
889 if (down_trylock(&port
->sm_sem
)) {
894 if (down_interruptible(&port
->sm_sem
)) {
900 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
906 filp
->private_data
= port
;
908 return nonseekable_open(inode
, filp
);
911 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
915 static int ib_umad_sm_close(struct inode
*inode
, struct file
*filp
)
917 struct ib_umad_port
*port
= filp
->private_data
;
918 struct ib_port_modify props
= {
919 .clr_port_cap_mask
= IB_PORT_SM
923 mutex_lock(&port
->file_mutex
);
925 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
926 mutex_unlock(&port
->file_mutex
);
930 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
935 static const struct file_operations umad_sm_fops
= {
936 .owner
= THIS_MODULE
,
937 .open
= ib_umad_sm_open
,
938 .release
= ib_umad_sm_close
,
942 static struct ib_client umad_client
= {
944 .add
= ib_umad_add_one
,
945 .remove
= ib_umad_remove_one
948 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
951 struct ib_umad_port
*port
= dev_get_drvdata(dev
);
956 return sprintf(buf
, "%s\n", port
->ib_dev
->name
);
958 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
960 static ssize_t
show_port(struct device
*dev
, struct device_attribute
*attr
,
963 struct ib_umad_port
*port
= dev_get_drvdata(dev
);
968 return sprintf(buf
, "%d\n", port
->port_num
);
970 static DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
972 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
973 __stringify(IB_USER_MAD_ABI_VERSION
));
975 static dev_t overflow_maj
;
976 static DECLARE_BITMAP(overflow_map
, IB_UMAD_MAX_PORTS
);
977 static int find_overflow_devnum(void)
982 ret
= alloc_chrdev_region(&overflow_maj
, 0, IB_UMAD_MAX_PORTS
* 2,
985 printk(KERN_ERR
"user_mad: couldn't register dynamic device number\n");
990 ret
= find_first_zero_bit(overflow_map
, IB_UMAD_MAX_PORTS
);
991 if (ret
>= IB_UMAD_MAX_PORTS
)
997 static int ib_umad_init_port(struct ib_device
*device
, int port_num
,
998 struct ib_umad_port
*port
)
1003 spin_lock(&port_lock
);
1004 devnum
= find_first_zero_bit(dev_map
, IB_UMAD_MAX_PORTS
);
1005 if (devnum
>= IB_UMAD_MAX_PORTS
) {
1006 spin_unlock(&port_lock
);
1007 devnum
= find_overflow_devnum();
1011 spin_lock(&port_lock
);
1012 port
->dev_num
= devnum
+ IB_UMAD_MAX_PORTS
;
1013 base
= devnum
+ overflow_maj
;
1014 set_bit(devnum
, overflow_map
);
1016 port
->dev_num
= devnum
;
1017 base
= devnum
+ base_dev
;
1018 set_bit(devnum
, dev_map
);
1020 spin_unlock(&port_lock
);
1022 port
->ib_dev
= device
;
1023 port
->port_num
= port_num
;
1024 sema_init(&port
->sm_sem
, 1);
1025 mutex_init(&port
->file_mutex
);
1026 INIT_LIST_HEAD(&port
->file_list
);
1028 cdev_init(&port
->cdev
, &umad_fops
);
1029 port
->cdev
.owner
= THIS_MODULE
;
1030 kobject_set_name(&port
->cdev
.kobj
, "umad%d", port
->dev_num
);
1031 if (cdev_add(&port
->cdev
, base
, 1))
1034 port
->dev
= device_create(umad_class
, device
->dma_device
,
1035 port
->cdev
.dev
, port
,
1036 "umad%d", port
->dev_num
);
1037 if (IS_ERR(port
->dev
))
1040 if (device_create_file(port
->dev
, &dev_attr_ibdev
))
1042 if (device_create_file(port
->dev
, &dev_attr_port
))
1045 base
+= IB_UMAD_MAX_PORTS
;
1046 cdev_init(&port
->sm_cdev
, &umad_sm_fops
);
1047 port
->sm_cdev
.owner
= THIS_MODULE
;
1048 kobject_set_name(&port
->sm_cdev
.kobj
, "issm%d", port
->dev_num
);
1049 if (cdev_add(&port
->sm_cdev
, base
, 1))
1052 port
->sm_dev
= device_create(umad_class
, device
->dma_device
,
1053 port
->sm_cdev
.dev
, port
,
1054 "issm%d", port
->dev_num
);
1055 if (IS_ERR(port
->sm_dev
))
1058 if (device_create_file(port
->sm_dev
, &dev_attr_ibdev
))
1060 if (device_create_file(port
->sm_dev
, &dev_attr_port
))
1066 device_destroy(umad_class
, port
->sm_cdev
.dev
);
1069 cdev_del(&port
->sm_cdev
);
1072 device_destroy(umad_class
, port
->cdev
.dev
);
1075 cdev_del(&port
->cdev
);
1076 if (port
->dev_num
< IB_UMAD_MAX_PORTS
)
1077 clear_bit(devnum
, dev_map
);
1079 clear_bit(devnum
, overflow_map
);
1084 static void ib_umad_kill_port(struct ib_umad_port
*port
)
1086 struct ib_umad_file
*file
;
1089 dev_set_drvdata(port
->dev
, NULL
);
1090 dev_set_drvdata(port
->sm_dev
, NULL
);
1092 device_destroy(umad_class
, port
->cdev
.dev
);
1093 device_destroy(umad_class
, port
->sm_cdev
.dev
);
1095 cdev_del(&port
->cdev
);
1096 cdev_del(&port
->sm_cdev
);
1098 mutex_lock(&port
->file_mutex
);
1100 port
->ib_dev
= NULL
;
1102 list_for_each_entry(file
, &port
->file_list
, port_list
) {
1103 mutex_lock(&file
->mutex
);
1104 file
->agents_dead
= 1;
1105 mutex_unlock(&file
->mutex
);
1107 for (id
= 0; id
< IB_UMAD_MAX_AGENTS
; ++id
)
1108 if (file
->agent
[id
])
1109 ib_unregister_mad_agent(file
->agent
[id
]);
1112 mutex_unlock(&port
->file_mutex
);
1114 if (port
->dev_num
< IB_UMAD_MAX_PORTS
)
1115 clear_bit(port
->dev_num
, dev_map
);
1117 clear_bit(port
->dev_num
- IB_UMAD_MAX_PORTS
, overflow_map
);
1120 static void ib_umad_add_one(struct ib_device
*device
)
1122 struct ib_umad_device
*umad_dev
;
1125 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
1128 if (device
->node_type
== RDMA_NODE_IB_SWITCH
)
1132 e
= device
->phys_port_cnt
;
1135 umad_dev
= kzalloc(sizeof *umad_dev
+
1136 (e
- s
+ 1) * sizeof (struct ib_umad_port
),
1141 kref_init(&umad_dev
->ref
);
1143 umad_dev
->start_port
= s
;
1144 umad_dev
->end_port
= e
;
1146 for (i
= s
; i
<= e
; ++i
) {
1147 umad_dev
->port
[i
- s
].umad_dev
= umad_dev
;
1149 if (ib_umad_init_port(device
, i
, &umad_dev
->port
[i
- s
]))
1153 ib_set_client_data(device
, &umad_client
, umad_dev
);
1159 ib_umad_kill_port(&umad_dev
->port
[i
- s
]);
1161 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
1164 static void ib_umad_remove_one(struct ib_device
*device
)
1166 struct ib_umad_device
*umad_dev
= ib_get_client_data(device
, &umad_client
);
1172 for (i
= 0; i
<= umad_dev
->end_port
- umad_dev
->start_port
; ++i
)
1173 ib_umad_kill_port(&umad_dev
->port
[i
]);
1175 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
1178 static char *umad_devnode(struct device
*dev
, umode_t
*mode
)
1180 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
1183 static int __init
ib_umad_init(void)
1187 ret
= register_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2,
1190 printk(KERN_ERR
"user_mad: couldn't register device number\n");
1194 umad_class
= class_create(THIS_MODULE
, "infiniband_mad");
1195 if (IS_ERR(umad_class
)) {
1196 ret
= PTR_ERR(umad_class
);
1197 printk(KERN_ERR
"user_mad: couldn't create class infiniband_mad\n");
1201 umad_class
->devnode
= umad_devnode
;
1203 ret
= class_create_file(umad_class
, &class_attr_abi_version
.attr
);
1205 printk(KERN_ERR
"user_mad: couldn't create abi_version attribute\n");
1209 ret
= ib_register_client(&umad_client
);
1211 printk(KERN_ERR
"user_mad: couldn't register ib_umad client\n");
1218 class_destroy(umad_class
);
1221 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
1227 static void __exit
ib_umad_cleanup(void)
1229 ib_unregister_client(&umad_client
);
1230 class_destroy(umad_class
);
1231 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
1233 unregister_chrdev_region(overflow_maj
, IB_UMAD_MAX_PORTS
* 2);
1236 module_init(ib_umad_init
);
1237 module_exit(ib_umad_cleanup
);