IB/uverbs: Fix reference counting usage of event files
[linux-2.6/btrfs-unstable.git] / drivers / infiniband / core / uverbs_cmd.c
blobae2d5972e20223c2d0e75fc715a935b8233203fd
1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <asm/uaccess.h>
43 #include "uverbs.h"
44 #include "core_priv.h"
46 struct uverbs_lock_class {
47 struct lock_class_key key;
48 char name[16];
51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows:
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is
66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock.
69 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed.
71 * For example, while registering an MR, the associated PD's
72 * uobject.mutex must be held for reading. The rwsem must be held
73 * for writing while initializing or destroying an object.
75 * - In addition, each object has a "live" flag. If this flag is not
76 * set, then lookups of the object will fail even if it is found in
77 * the idr. This handles a reader that blocks and does not acquire
78 * the rwsem until after the object is destroyed. The destroy
79 * operation will set the live flag to 0 and then drop the rwsem;
80 * this will allow the reader to acquire the rwsem, see that the
81 * live flag is 0, and then drop the rwsem and its reference to
82 * object. The underlying storage will not be freed until the last
83 * reference to the object is dropped.
86 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
87 struct ib_ucontext *context, struct uverbs_lock_class *c)
89 uobj->user_handle = user_handle;
90 uobj->context = context;
91 kref_init(&uobj->ref);
92 init_rwsem(&uobj->mutex);
93 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
94 uobj->live = 0;
97 static void release_uobj(struct kref *kref)
99 kfree(container_of(kref, struct ib_uobject, ref));
102 static void put_uobj(struct ib_uobject *uobj)
104 kref_put(&uobj->ref, release_uobj);
107 static void put_uobj_read(struct ib_uobject *uobj)
109 up_read(&uobj->mutex);
110 put_uobj(uobj);
113 static void put_uobj_write(struct ib_uobject *uobj)
115 up_write(&uobj->mutex);
116 put_uobj(uobj);
119 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
121 int ret;
123 idr_preload(GFP_KERNEL);
124 spin_lock(&ib_uverbs_idr_lock);
126 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
127 if (ret >= 0)
128 uobj->id = ret;
130 spin_unlock(&ib_uverbs_idr_lock);
131 idr_preload_end();
133 return ret < 0 ? ret : 0;
136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
138 spin_lock(&ib_uverbs_idr_lock);
139 idr_remove(idr, uobj->id);
140 spin_unlock(&ib_uverbs_idr_lock);
143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
144 struct ib_ucontext *context)
146 struct ib_uobject *uobj;
148 spin_lock(&ib_uverbs_idr_lock);
149 uobj = idr_find(idr, id);
150 if (uobj) {
151 if (uobj->context == context)
152 kref_get(&uobj->ref);
153 else
154 uobj = NULL;
156 spin_unlock(&ib_uverbs_idr_lock);
158 return uobj;
161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
162 struct ib_ucontext *context, int nested)
164 struct ib_uobject *uobj;
166 uobj = __idr_get_uobj(idr, id, context);
167 if (!uobj)
168 return NULL;
170 if (nested)
171 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172 else
173 down_read(&uobj->mutex);
174 if (!uobj->live) {
175 put_uobj_read(uobj);
176 return NULL;
179 return uobj;
182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
183 struct ib_ucontext *context)
185 struct ib_uobject *uobj;
187 uobj = __idr_get_uobj(idr, id, context);
188 if (!uobj)
189 return NULL;
191 down_write(&uobj->mutex);
192 if (!uobj->live) {
193 put_uobj_write(uobj);
194 return NULL;
197 return uobj;
200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
201 int nested)
203 struct ib_uobject *uobj;
205 uobj = idr_read_uobj(idr, id, context, nested);
206 return uobj ? uobj->object : NULL;
209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
211 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
214 static void put_pd_read(struct ib_pd *pd)
216 put_uobj_read(pd->uobject);
219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
221 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
224 static void put_cq_read(struct ib_cq *cq)
226 put_uobj_read(cq->uobject);
229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
231 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
234 static void put_ah_read(struct ib_ah *ah)
236 put_uobj_read(ah->uobject);
239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
241 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
244 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
246 struct ib_uobject *uobj;
248 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
249 return uobj ? uobj->object : NULL;
252 static void put_qp_read(struct ib_qp *qp)
254 put_uobj_read(qp->uobject);
257 static void put_qp_write(struct ib_qp *qp)
259 put_uobj_write(qp->uobject);
262 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
264 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
267 static void put_srq_read(struct ib_srq *srq)
269 put_uobj_read(srq->uobject);
272 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
273 struct ib_uobject **uobj)
275 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
276 return *uobj ? (*uobj)->object : NULL;
279 static void put_xrcd_read(struct ib_uobject *uobj)
281 put_uobj_read(uobj);
284 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
285 const char __user *buf,
286 int in_len, int out_len)
288 struct ib_uverbs_get_context cmd;
289 struct ib_uverbs_get_context_resp resp;
290 struct ib_udata udata;
291 struct ib_device *ibdev = file->device->ib_dev;
292 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
293 struct ib_device_attr dev_attr;
294 #endif
295 struct ib_ucontext *ucontext;
296 struct file *filp;
297 int ret;
299 if (out_len < sizeof resp)
300 return -ENOSPC;
302 if (copy_from_user(&cmd, buf, sizeof cmd))
303 return -EFAULT;
305 mutex_lock(&file->mutex);
307 if (file->ucontext) {
308 ret = -EINVAL;
309 goto err;
312 INIT_UDATA(&udata, buf + sizeof cmd,
313 (unsigned long) cmd.response + sizeof resp,
314 in_len - sizeof cmd, out_len - sizeof resp);
316 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
317 if (IS_ERR(ucontext)) {
318 ret = PTR_ERR(ucontext);
319 goto err;
322 ucontext->device = ibdev;
323 INIT_LIST_HEAD(&ucontext->pd_list);
324 INIT_LIST_HEAD(&ucontext->mr_list);
325 INIT_LIST_HEAD(&ucontext->mw_list);
326 INIT_LIST_HEAD(&ucontext->cq_list);
327 INIT_LIST_HEAD(&ucontext->qp_list);
328 INIT_LIST_HEAD(&ucontext->srq_list);
329 INIT_LIST_HEAD(&ucontext->ah_list);
330 INIT_LIST_HEAD(&ucontext->xrcd_list);
331 INIT_LIST_HEAD(&ucontext->rule_list);
332 rcu_read_lock();
333 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
334 rcu_read_unlock();
335 ucontext->closing = 0;
337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
338 ucontext->umem_tree = RB_ROOT;
339 init_rwsem(&ucontext->umem_rwsem);
340 ucontext->odp_mrs_count = 0;
341 INIT_LIST_HEAD(&ucontext->no_private_counters);
343 ret = ib_query_device(ibdev, &dev_attr);
344 if (ret)
345 goto err_free;
346 if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
347 ucontext->invalidate_range = NULL;
349 #endif
351 resp.num_comp_vectors = file->device->num_comp_vectors;
353 ret = get_unused_fd_flags(O_CLOEXEC);
354 if (ret < 0)
355 goto err_free;
356 resp.async_fd = ret;
358 filp = ib_uverbs_alloc_event_file(file, 1);
359 if (IS_ERR(filp)) {
360 ret = PTR_ERR(filp);
361 goto err_fd;
364 if (copy_to_user((void __user *) (unsigned long) cmd.response,
365 &resp, sizeof resp)) {
366 ret = -EFAULT;
367 goto err_file;
370 file->ucontext = ucontext;
372 fd_install(resp.async_fd, filp);
374 mutex_unlock(&file->mutex);
376 return in_len;
378 err_file:
379 ib_uverbs_free_async_event_file(file);
380 fput(filp);
382 err_fd:
383 put_unused_fd(resp.async_fd);
385 err_free:
386 put_pid(ucontext->tgid);
387 ibdev->dealloc_ucontext(ucontext);
389 err:
390 mutex_unlock(&file->mutex);
391 return ret;
394 static void copy_query_dev_fields(struct ib_uverbs_file *file,
395 struct ib_uverbs_query_device_resp *resp,
396 struct ib_device_attr *attr)
398 resp->fw_ver = attr->fw_ver;
399 resp->node_guid = file->device->ib_dev->node_guid;
400 resp->sys_image_guid = attr->sys_image_guid;
401 resp->max_mr_size = attr->max_mr_size;
402 resp->page_size_cap = attr->page_size_cap;
403 resp->vendor_id = attr->vendor_id;
404 resp->vendor_part_id = attr->vendor_part_id;
405 resp->hw_ver = attr->hw_ver;
406 resp->max_qp = attr->max_qp;
407 resp->max_qp_wr = attr->max_qp_wr;
408 resp->device_cap_flags = attr->device_cap_flags;
409 resp->max_sge = attr->max_sge;
410 resp->max_sge_rd = attr->max_sge_rd;
411 resp->max_cq = attr->max_cq;
412 resp->max_cqe = attr->max_cqe;
413 resp->max_mr = attr->max_mr;
414 resp->max_pd = attr->max_pd;
415 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
416 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
417 resp->max_res_rd_atom = attr->max_res_rd_atom;
418 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
419 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
420 resp->atomic_cap = attr->atomic_cap;
421 resp->max_ee = attr->max_ee;
422 resp->max_rdd = attr->max_rdd;
423 resp->max_mw = attr->max_mw;
424 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
425 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
426 resp->max_mcast_grp = attr->max_mcast_grp;
427 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
428 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
429 resp->max_ah = attr->max_ah;
430 resp->max_fmr = attr->max_fmr;
431 resp->max_map_per_fmr = attr->max_map_per_fmr;
432 resp->max_srq = attr->max_srq;
433 resp->max_srq_wr = attr->max_srq_wr;
434 resp->max_srq_sge = attr->max_srq_sge;
435 resp->max_pkeys = attr->max_pkeys;
436 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
437 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
440 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
441 const char __user *buf,
442 int in_len, int out_len)
444 struct ib_uverbs_query_device cmd;
445 struct ib_uverbs_query_device_resp resp;
446 struct ib_device_attr attr;
447 int ret;
449 if (out_len < sizeof resp)
450 return -ENOSPC;
452 if (copy_from_user(&cmd, buf, sizeof cmd))
453 return -EFAULT;
455 ret = ib_query_device(file->device->ib_dev, &attr);
456 if (ret)
457 return ret;
459 memset(&resp, 0, sizeof resp);
460 copy_query_dev_fields(file, &resp, &attr);
462 if (copy_to_user((void __user *) (unsigned long) cmd.response,
463 &resp, sizeof resp))
464 return -EFAULT;
466 return in_len;
469 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
470 const char __user *buf,
471 int in_len, int out_len)
473 struct ib_uverbs_query_port cmd;
474 struct ib_uverbs_query_port_resp resp;
475 struct ib_port_attr attr;
476 int ret;
478 if (out_len < sizeof resp)
479 return -ENOSPC;
481 if (copy_from_user(&cmd, buf, sizeof cmd))
482 return -EFAULT;
484 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
485 if (ret)
486 return ret;
488 memset(&resp, 0, sizeof resp);
490 resp.state = attr.state;
491 resp.max_mtu = attr.max_mtu;
492 resp.active_mtu = attr.active_mtu;
493 resp.gid_tbl_len = attr.gid_tbl_len;
494 resp.port_cap_flags = attr.port_cap_flags;
495 resp.max_msg_sz = attr.max_msg_sz;
496 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
497 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
498 resp.pkey_tbl_len = attr.pkey_tbl_len;
499 resp.lid = attr.lid;
500 resp.sm_lid = attr.sm_lid;
501 resp.lmc = attr.lmc;
502 resp.max_vl_num = attr.max_vl_num;
503 resp.sm_sl = attr.sm_sl;
504 resp.subnet_timeout = attr.subnet_timeout;
505 resp.init_type_reply = attr.init_type_reply;
506 resp.active_width = attr.active_width;
507 resp.active_speed = attr.active_speed;
508 resp.phys_state = attr.phys_state;
509 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
510 cmd.port_num);
512 if (copy_to_user((void __user *) (unsigned long) cmd.response,
513 &resp, sizeof resp))
514 return -EFAULT;
516 return in_len;
519 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
520 const char __user *buf,
521 int in_len, int out_len)
523 struct ib_uverbs_alloc_pd cmd;
524 struct ib_uverbs_alloc_pd_resp resp;
525 struct ib_udata udata;
526 struct ib_uobject *uobj;
527 struct ib_pd *pd;
528 int ret;
530 if (out_len < sizeof resp)
531 return -ENOSPC;
533 if (copy_from_user(&cmd, buf, sizeof cmd))
534 return -EFAULT;
536 INIT_UDATA(&udata, buf + sizeof cmd,
537 (unsigned long) cmd.response + sizeof resp,
538 in_len - sizeof cmd, out_len - sizeof resp);
540 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
541 if (!uobj)
542 return -ENOMEM;
544 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
545 down_write(&uobj->mutex);
547 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
548 file->ucontext, &udata);
549 if (IS_ERR(pd)) {
550 ret = PTR_ERR(pd);
551 goto err;
554 pd->device = file->device->ib_dev;
555 pd->uobject = uobj;
556 pd->local_mr = NULL;
557 atomic_set(&pd->usecnt, 0);
559 uobj->object = pd;
560 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
561 if (ret)
562 goto err_idr;
564 memset(&resp, 0, sizeof resp);
565 resp.pd_handle = uobj->id;
567 if (copy_to_user((void __user *) (unsigned long) cmd.response,
568 &resp, sizeof resp)) {
569 ret = -EFAULT;
570 goto err_copy;
573 mutex_lock(&file->mutex);
574 list_add_tail(&uobj->list, &file->ucontext->pd_list);
575 mutex_unlock(&file->mutex);
577 uobj->live = 1;
579 up_write(&uobj->mutex);
581 return in_len;
583 err_copy:
584 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
586 err_idr:
587 ib_dealloc_pd(pd);
589 err:
590 put_uobj_write(uobj);
591 return ret;
594 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
595 const char __user *buf,
596 int in_len, int out_len)
598 struct ib_uverbs_dealloc_pd cmd;
599 struct ib_uobject *uobj;
600 struct ib_pd *pd;
601 int ret;
603 if (copy_from_user(&cmd, buf, sizeof cmd))
604 return -EFAULT;
606 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
607 if (!uobj)
608 return -EINVAL;
609 pd = uobj->object;
611 if (atomic_read(&pd->usecnt)) {
612 ret = -EBUSY;
613 goto err_put;
616 ret = pd->device->dealloc_pd(uobj->object);
617 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
618 if (ret)
619 goto err_put;
621 uobj->live = 0;
622 put_uobj_write(uobj);
624 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
626 mutex_lock(&file->mutex);
627 list_del(&uobj->list);
628 mutex_unlock(&file->mutex);
630 put_uobj(uobj);
632 return in_len;
634 err_put:
635 put_uobj_write(uobj);
636 return ret;
639 struct xrcd_table_entry {
640 struct rb_node node;
641 struct ib_xrcd *xrcd;
642 struct inode *inode;
645 static int xrcd_table_insert(struct ib_uverbs_device *dev,
646 struct inode *inode,
647 struct ib_xrcd *xrcd)
649 struct xrcd_table_entry *entry, *scan;
650 struct rb_node **p = &dev->xrcd_tree.rb_node;
651 struct rb_node *parent = NULL;
653 entry = kmalloc(sizeof *entry, GFP_KERNEL);
654 if (!entry)
655 return -ENOMEM;
657 entry->xrcd = xrcd;
658 entry->inode = inode;
660 while (*p) {
661 parent = *p;
662 scan = rb_entry(parent, struct xrcd_table_entry, node);
664 if (inode < scan->inode) {
665 p = &(*p)->rb_left;
666 } else if (inode > scan->inode) {
667 p = &(*p)->rb_right;
668 } else {
669 kfree(entry);
670 return -EEXIST;
674 rb_link_node(&entry->node, parent, p);
675 rb_insert_color(&entry->node, &dev->xrcd_tree);
676 igrab(inode);
677 return 0;
680 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
681 struct inode *inode)
683 struct xrcd_table_entry *entry;
684 struct rb_node *p = dev->xrcd_tree.rb_node;
686 while (p) {
687 entry = rb_entry(p, struct xrcd_table_entry, node);
689 if (inode < entry->inode)
690 p = p->rb_left;
691 else if (inode > entry->inode)
692 p = p->rb_right;
693 else
694 return entry;
697 return NULL;
700 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
702 struct xrcd_table_entry *entry;
704 entry = xrcd_table_search(dev, inode);
705 if (!entry)
706 return NULL;
708 return entry->xrcd;
711 static void xrcd_table_delete(struct ib_uverbs_device *dev,
712 struct inode *inode)
714 struct xrcd_table_entry *entry;
716 entry = xrcd_table_search(dev, inode);
717 if (entry) {
718 iput(inode);
719 rb_erase(&entry->node, &dev->xrcd_tree);
720 kfree(entry);
724 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
725 const char __user *buf, int in_len,
726 int out_len)
728 struct ib_uverbs_open_xrcd cmd;
729 struct ib_uverbs_open_xrcd_resp resp;
730 struct ib_udata udata;
731 struct ib_uxrcd_object *obj;
732 struct ib_xrcd *xrcd = NULL;
733 struct fd f = {NULL, 0};
734 struct inode *inode = NULL;
735 int ret = 0;
736 int new_xrcd = 0;
738 if (out_len < sizeof resp)
739 return -ENOSPC;
741 if (copy_from_user(&cmd, buf, sizeof cmd))
742 return -EFAULT;
744 INIT_UDATA(&udata, buf + sizeof cmd,
745 (unsigned long) cmd.response + sizeof resp,
746 in_len - sizeof cmd, out_len - sizeof resp);
748 mutex_lock(&file->device->xrcd_tree_mutex);
750 if (cmd.fd != -1) {
751 /* search for file descriptor */
752 f = fdget(cmd.fd);
753 if (!f.file) {
754 ret = -EBADF;
755 goto err_tree_mutex_unlock;
758 inode = file_inode(f.file);
759 xrcd = find_xrcd(file->device, inode);
760 if (!xrcd && !(cmd.oflags & O_CREAT)) {
761 /* no file descriptor. Need CREATE flag */
762 ret = -EAGAIN;
763 goto err_tree_mutex_unlock;
766 if (xrcd && cmd.oflags & O_EXCL) {
767 ret = -EINVAL;
768 goto err_tree_mutex_unlock;
772 obj = kmalloc(sizeof *obj, GFP_KERNEL);
773 if (!obj) {
774 ret = -ENOMEM;
775 goto err_tree_mutex_unlock;
778 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
780 down_write(&obj->uobject.mutex);
782 if (!xrcd) {
783 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
784 file->ucontext, &udata);
785 if (IS_ERR(xrcd)) {
786 ret = PTR_ERR(xrcd);
787 goto err;
790 xrcd->inode = inode;
791 xrcd->device = file->device->ib_dev;
792 atomic_set(&xrcd->usecnt, 0);
793 mutex_init(&xrcd->tgt_qp_mutex);
794 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
795 new_xrcd = 1;
798 atomic_set(&obj->refcnt, 0);
799 obj->uobject.object = xrcd;
800 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
801 if (ret)
802 goto err_idr;
804 memset(&resp, 0, sizeof resp);
805 resp.xrcd_handle = obj->uobject.id;
807 if (inode) {
808 if (new_xrcd) {
809 /* create new inode/xrcd table entry */
810 ret = xrcd_table_insert(file->device, inode, xrcd);
811 if (ret)
812 goto err_insert_xrcd;
814 atomic_inc(&xrcd->usecnt);
817 if (copy_to_user((void __user *) (unsigned long) cmd.response,
818 &resp, sizeof resp)) {
819 ret = -EFAULT;
820 goto err_copy;
823 if (f.file)
824 fdput(f);
826 mutex_lock(&file->mutex);
827 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
828 mutex_unlock(&file->mutex);
830 obj->uobject.live = 1;
831 up_write(&obj->uobject.mutex);
833 mutex_unlock(&file->device->xrcd_tree_mutex);
834 return in_len;
836 err_copy:
837 if (inode) {
838 if (new_xrcd)
839 xrcd_table_delete(file->device, inode);
840 atomic_dec(&xrcd->usecnt);
843 err_insert_xrcd:
844 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
846 err_idr:
847 ib_dealloc_xrcd(xrcd);
849 err:
850 put_uobj_write(&obj->uobject);
852 err_tree_mutex_unlock:
853 if (f.file)
854 fdput(f);
856 mutex_unlock(&file->device->xrcd_tree_mutex);
858 return ret;
861 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
862 const char __user *buf, int in_len,
863 int out_len)
865 struct ib_uverbs_close_xrcd cmd;
866 struct ib_uobject *uobj;
867 struct ib_xrcd *xrcd = NULL;
868 struct inode *inode = NULL;
869 struct ib_uxrcd_object *obj;
870 int live;
871 int ret = 0;
873 if (copy_from_user(&cmd, buf, sizeof cmd))
874 return -EFAULT;
876 mutex_lock(&file->device->xrcd_tree_mutex);
877 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
878 if (!uobj) {
879 ret = -EINVAL;
880 goto out;
883 xrcd = uobj->object;
884 inode = xrcd->inode;
885 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
886 if (atomic_read(&obj->refcnt)) {
887 put_uobj_write(uobj);
888 ret = -EBUSY;
889 goto out;
892 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
893 ret = ib_dealloc_xrcd(uobj->object);
894 if (!ret)
895 uobj->live = 0;
898 live = uobj->live;
899 if (inode && ret)
900 atomic_inc(&xrcd->usecnt);
902 put_uobj_write(uobj);
904 if (ret)
905 goto out;
907 if (inode && !live)
908 xrcd_table_delete(file->device, inode);
910 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
911 mutex_lock(&file->mutex);
912 list_del(&uobj->list);
913 mutex_unlock(&file->mutex);
915 put_uobj(uobj);
916 ret = in_len;
918 out:
919 mutex_unlock(&file->device->xrcd_tree_mutex);
920 return ret;
923 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
924 struct ib_xrcd *xrcd)
926 struct inode *inode;
928 inode = xrcd->inode;
929 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
930 return;
932 ib_dealloc_xrcd(xrcd);
934 if (inode)
935 xrcd_table_delete(dev, inode);
938 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
939 const char __user *buf, int in_len,
940 int out_len)
942 struct ib_uverbs_reg_mr cmd;
943 struct ib_uverbs_reg_mr_resp resp;
944 struct ib_udata udata;
945 struct ib_uobject *uobj;
946 struct ib_pd *pd;
947 struct ib_mr *mr;
948 int ret;
950 if (out_len < sizeof resp)
951 return -ENOSPC;
953 if (copy_from_user(&cmd, buf, sizeof cmd))
954 return -EFAULT;
956 INIT_UDATA(&udata, buf + sizeof cmd,
957 (unsigned long) cmd.response + sizeof resp,
958 in_len - sizeof cmd, out_len - sizeof resp);
960 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
961 return -EINVAL;
963 ret = ib_check_mr_access(cmd.access_flags);
964 if (ret)
965 return ret;
967 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
968 if (!uobj)
969 return -ENOMEM;
971 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
972 down_write(&uobj->mutex);
974 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
975 if (!pd) {
976 ret = -EINVAL;
977 goto err_free;
980 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
981 struct ib_device_attr attr;
983 ret = ib_query_device(pd->device, &attr);
984 if (ret || !(attr.device_cap_flags &
985 IB_DEVICE_ON_DEMAND_PAGING)) {
986 pr_debug("ODP support not available\n");
987 ret = -EINVAL;
988 goto err_put;
992 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
993 cmd.access_flags, &udata);
994 if (IS_ERR(mr)) {
995 ret = PTR_ERR(mr);
996 goto err_put;
999 mr->device = pd->device;
1000 mr->pd = pd;
1001 mr->uobject = uobj;
1002 atomic_inc(&pd->usecnt);
1003 atomic_set(&mr->usecnt, 0);
1005 uobj->object = mr;
1006 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
1007 if (ret)
1008 goto err_unreg;
1010 memset(&resp, 0, sizeof resp);
1011 resp.lkey = mr->lkey;
1012 resp.rkey = mr->rkey;
1013 resp.mr_handle = uobj->id;
1015 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1016 &resp, sizeof resp)) {
1017 ret = -EFAULT;
1018 goto err_copy;
1021 put_pd_read(pd);
1023 mutex_lock(&file->mutex);
1024 list_add_tail(&uobj->list, &file->ucontext->mr_list);
1025 mutex_unlock(&file->mutex);
1027 uobj->live = 1;
1029 up_write(&uobj->mutex);
1031 return in_len;
1033 err_copy:
1034 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1036 err_unreg:
1037 ib_dereg_mr(mr);
1039 err_put:
1040 put_pd_read(pd);
1042 err_free:
1043 put_uobj_write(uobj);
1044 return ret;
1047 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1048 const char __user *buf, int in_len,
1049 int out_len)
1051 struct ib_uverbs_rereg_mr cmd;
1052 struct ib_uverbs_rereg_mr_resp resp;
1053 struct ib_udata udata;
1054 struct ib_pd *pd = NULL;
1055 struct ib_mr *mr;
1056 struct ib_pd *old_pd;
1057 int ret;
1058 struct ib_uobject *uobj;
1060 if (out_len < sizeof(resp))
1061 return -ENOSPC;
1063 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1064 return -EFAULT;
1066 INIT_UDATA(&udata, buf + sizeof(cmd),
1067 (unsigned long) cmd.response + sizeof(resp),
1068 in_len - sizeof(cmd), out_len - sizeof(resp));
1070 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1071 return -EINVAL;
1073 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1074 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1075 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1076 return -EINVAL;
1078 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1079 file->ucontext);
1081 if (!uobj)
1082 return -EINVAL;
1084 mr = uobj->object;
1086 if (cmd.flags & IB_MR_REREG_ACCESS) {
1087 ret = ib_check_mr_access(cmd.access_flags);
1088 if (ret)
1089 goto put_uobjs;
1092 if (cmd.flags & IB_MR_REREG_PD) {
1093 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1094 if (!pd) {
1095 ret = -EINVAL;
1096 goto put_uobjs;
1100 if (atomic_read(&mr->usecnt)) {
1101 ret = -EBUSY;
1102 goto put_uobj_pd;
1105 old_pd = mr->pd;
1106 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1107 cmd.length, cmd.hca_va,
1108 cmd.access_flags, pd, &udata);
1109 if (!ret) {
1110 if (cmd.flags & IB_MR_REREG_PD) {
1111 atomic_inc(&pd->usecnt);
1112 mr->pd = pd;
1113 atomic_dec(&old_pd->usecnt);
1115 } else {
1116 goto put_uobj_pd;
1119 memset(&resp, 0, sizeof(resp));
1120 resp.lkey = mr->lkey;
1121 resp.rkey = mr->rkey;
1123 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1124 &resp, sizeof(resp)))
1125 ret = -EFAULT;
1126 else
1127 ret = in_len;
1129 put_uobj_pd:
1130 if (cmd.flags & IB_MR_REREG_PD)
1131 put_pd_read(pd);
1133 put_uobjs:
1135 put_uobj_write(mr->uobject);
1137 return ret;
1140 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1141 const char __user *buf, int in_len,
1142 int out_len)
1144 struct ib_uverbs_dereg_mr cmd;
1145 struct ib_mr *mr;
1146 struct ib_uobject *uobj;
1147 int ret = -EINVAL;
1149 if (copy_from_user(&cmd, buf, sizeof cmd))
1150 return -EFAULT;
1152 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1153 if (!uobj)
1154 return -EINVAL;
1156 mr = uobj->object;
1158 ret = ib_dereg_mr(mr);
1159 if (!ret)
1160 uobj->live = 0;
1162 put_uobj_write(uobj);
1164 if (ret)
1165 return ret;
1167 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1169 mutex_lock(&file->mutex);
1170 list_del(&uobj->list);
1171 mutex_unlock(&file->mutex);
1173 put_uobj(uobj);
1175 return in_len;
1178 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1179 const char __user *buf, int in_len,
1180 int out_len)
1182 struct ib_uverbs_alloc_mw cmd;
1183 struct ib_uverbs_alloc_mw_resp resp;
1184 struct ib_uobject *uobj;
1185 struct ib_pd *pd;
1186 struct ib_mw *mw;
1187 int ret;
1189 if (out_len < sizeof(resp))
1190 return -ENOSPC;
1192 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1193 return -EFAULT;
1195 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1196 if (!uobj)
1197 return -ENOMEM;
1199 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1200 down_write(&uobj->mutex);
1202 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1203 if (!pd) {
1204 ret = -EINVAL;
1205 goto err_free;
1208 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1209 if (IS_ERR(mw)) {
1210 ret = PTR_ERR(mw);
1211 goto err_put;
1214 mw->device = pd->device;
1215 mw->pd = pd;
1216 mw->uobject = uobj;
1217 atomic_inc(&pd->usecnt);
1219 uobj->object = mw;
1220 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1221 if (ret)
1222 goto err_unalloc;
1224 memset(&resp, 0, sizeof(resp));
1225 resp.rkey = mw->rkey;
1226 resp.mw_handle = uobj->id;
1228 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1229 &resp, sizeof(resp))) {
1230 ret = -EFAULT;
1231 goto err_copy;
1234 put_pd_read(pd);
1236 mutex_lock(&file->mutex);
1237 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1238 mutex_unlock(&file->mutex);
1240 uobj->live = 1;
1242 up_write(&uobj->mutex);
1244 return in_len;
1246 err_copy:
1247 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1249 err_unalloc:
1250 ib_dealloc_mw(mw);
1252 err_put:
1253 put_pd_read(pd);
1255 err_free:
1256 put_uobj_write(uobj);
1257 return ret;
1260 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1261 const char __user *buf, int in_len,
1262 int out_len)
1264 struct ib_uverbs_dealloc_mw cmd;
1265 struct ib_mw *mw;
1266 struct ib_uobject *uobj;
1267 int ret = -EINVAL;
1269 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1270 return -EFAULT;
1272 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1273 if (!uobj)
1274 return -EINVAL;
1276 mw = uobj->object;
1278 ret = ib_dealloc_mw(mw);
1279 if (!ret)
1280 uobj->live = 0;
1282 put_uobj_write(uobj);
1284 if (ret)
1285 return ret;
1287 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1289 mutex_lock(&file->mutex);
1290 list_del(&uobj->list);
1291 mutex_unlock(&file->mutex);
1293 put_uobj(uobj);
1295 return in_len;
1298 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1299 const char __user *buf, int in_len,
1300 int out_len)
1302 struct ib_uverbs_create_comp_channel cmd;
1303 struct ib_uverbs_create_comp_channel_resp resp;
1304 struct file *filp;
1305 int ret;
1307 if (out_len < sizeof resp)
1308 return -ENOSPC;
1310 if (copy_from_user(&cmd, buf, sizeof cmd))
1311 return -EFAULT;
1313 ret = get_unused_fd_flags(O_CLOEXEC);
1314 if (ret < 0)
1315 return ret;
1316 resp.fd = ret;
1318 filp = ib_uverbs_alloc_event_file(file, 0);
1319 if (IS_ERR(filp)) {
1320 put_unused_fd(resp.fd);
1321 return PTR_ERR(filp);
1324 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1325 &resp, sizeof resp)) {
1326 put_unused_fd(resp.fd);
1327 fput(filp);
1328 return -EFAULT;
1331 fd_install(resp.fd, filp);
1332 return in_len;
1335 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1336 struct ib_udata *ucore,
1337 struct ib_udata *uhw,
1338 struct ib_uverbs_ex_create_cq *cmd,
1339 size_t cmd_sz,
1340 int (*cb)(struct ib_uverbs_file *file,
1341 struct ib_ucq_object *obj,
1342 struct ib_uverbs_ex_create_cq_resp *resp,
1343 struct ib_udata *udata,
1344 void *context),
1345 void *context)
1347 struct ib_ucq_object *obj;
1348 struct ib_uverbs_event_file *ev_file = NULL;
1349 struct ib_cq *cq;
1350 int ret;
1351 struct ib_uverbs_ex_create_cq_resp resp;
1352 struct ib_cq_init_attr attr = {};
1354 if (cmd->comp_vector >= file->device->num_comp_vectors)
1355 return ERR_PTR(-EINVAL);
1357 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1358 if (!obj)
1359 return ERR_PTR(-ENOMEM);
1361 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
1362 down_write(&obj->uobject.mutex);
1364 if (cmd->comp_channel >= 0) {
1365 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
1366 if (!ev_file) {
1367 ret = -EINVAL;
1368 goto err;
1372 obj->uverbs_file = file;
1373 obj->comp_events_reported = 0;
1374 obj->async_events_reported = 0;
1375 INIT_LIST_HEAD(&obj->comp_list);
1376 INIT_LIST_HEAD(&obj->async_list);
1378 attr.cqe = cmd->cqe;
1379 attr.comp_vector = cmd->comp_vector;
1381 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1382 attr.flags = cmd->flags;
1384 cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr,
1385 file->ucontext, uhw);
1386 if (IS_ERR(cq)) {
1387 ret = PTR_ERR(cq);
1388 goto err_file;
1391 cq->device = file->device->ib_dev;
1392 cq->uobject = &obj->uobject;
1393 cq->comp_handler = ib_uverbs_comp_handler;
1394 cq->event_handler = ib_uverbs_cq_event_handler;
1395 cq->cq_context = ev_file;
1396 atomic_set(&cq->usecnt, 0);
1398 obj->uobject.object = cq;
1399 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1400 if (ret)
1401 goto err_free;
1403 memset(&resp, 0, sizeof resp);
1404 resp.base.cq_handle = obj->uobject.id;
1405 resp.base.cqe = cq->cqe;
1407 resp.response_length = offsetof(typeof(resp), response_length) +
1408 sizeof(resp.response_length);
1410 ret = cb(file, obj, &resp, ucore, context);
1411 if (ret)
1412 goto err_cb;
1414 mutex_lock(&file->mutex);
1415 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1416 mutex_unlock(&file->mutex);
1418 obj->uobject.live = 1;
1420 up_write(&obj->uobject.mutex);
1422 return obj;
1424 err_cb:
1425 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1427 err_free:
1428 ib_destroy_cq(cq);
1430 err_file:
1431 if (ev_file)
1432 ib_uverbs_release_ucq(file, ev_file, obj);
1434 err:
1435 put_uobj_write(&obj->uobject);
1437 return ERR_PTR(ret);
1440 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1441 struct ib_ucq_object *obj,
1442 struct ib_uverbs_ex_create_cq_resp *resp,
1443 struct ib_udata *ucore, void *context)
1445 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1446 return -EFAULT;
1448 return 0;
1451 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1452 const char __user *buf, int in_len,
1453 int out_len)
1455 struct ib_uverbs_create_cq cmd;
1456 struct ib_uverbs_ex_create_cq cmd_ex;
1457 struct ib_uverbs_create_cq_resp resp;
1458 struct ib_udata ucore;
1459 struct ib_udata uhw;
1460 struct ib_ucq_object *obj;
1462 if (out_len < sizeof(resp))
1463 return -ENOSPC;
1465 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1466 return -EFAULT;
1468 INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
1470 INIT_UDATA(&uhw, buf + sizeof(cmd),
1471 (unsigned long)cmd.response + sizeof(resp),
1472 in_len - sizeof(cmd), out_len - sizeof(resp));
1474 memset(&cmd_ex, 0, sizeof(cmd_ex));
1475 cmd_ex.user_handle = cmd.user_handle;
1476 cmd_ex.cqe = cmd.cqe;
1477 cmd_ex.comp_vector = cmd.comp_vector;
1478 cmd_ex.comp_channel = cmd.comp_channel;
1480 obj = create_cq(file, &ucore, &uhw, &cmd_ex,
1481 offsetof(typeof(cmd_ex), comp_channel) +
1482 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1483 NULL);
1485 if (IS_ERR(obj))
1486 return PTR_ERR(obj);
1488 return in_len;
1491 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1492 struct ib_ucq_object *obj,
1493 struct ib_uverbs_ex_create_cq_resp *resp,
1494 struct ib_udata *ucore, void *context)
1496 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1497 return -EFAULT;
1499 return 0;
1502 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1503 struct ib_udata *ucore,
1504 struct ib_udata *uhw)
1506 struct ib_uverbs_ex_create_cq_resp resp;
1507 struct ib_uverbs_ex_create_cq cmd;
1508 struct ib_ucq_object *obj;
1509 int err;
1511 if (ucore->inlen < sizeof(cmd))
1512 return -EINVAL;
1514 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1515 if (err)
1516 return err;
1518 if (cmd.comp_mask)
1519 return -EINVAL;
1521 if (cmd.reserved)
1522 return -EINVAL;
1524 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1525 sizeof(resp.response_length)))
1526 return -ENOSPC;
1528 obj = create_cq(file, ucore, uhw, &cmd,
1529 min(ucore->inlen, sizeof(cmd)),
1530 ib_uverbs_ex_create_cq_cb, NULL);
1532 if (IS_ERR(obj))
1533 return PTR_ERR(obj);
1535 return 0;
1538 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1539 const char __user *buf, int in_len,
1540 int out_len)
1542 struct ib_uverbs_resize_cq cmd;
1543 struct ib_uverbs_resize_cq_resp resp;
1544 struct ib_udata udata;
1545 struct ib_cq *cq;
1546 int ret = -EINVAL;
1548 if (copy_from_user(&cmd, buf, sizeof cmd))
1549 return -EFAULT;
1551 INIT_UDATA(&udata, buf + sizeof cmd,
1552 (unsigned long) cmd.response + sizeof resp,
1553 in_len - sizeof cmd, out_len - sizeof resp);
1555 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1556 if (!cq)
1557 return -EINVAL;
1559 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1560 if (ret)
1561 goto out;
1563 resp.cqe = cq->cqe;
1565 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1566 &resp, sizeof resp.cqe))
1567 ret = -EFAULT;
1569 out:
1570 put_cq_read(cq);
1572 return ret ? ret : in_len;
1575 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1577 struct ib_uverbs_wc tmp;
1579 tmp.wr_id = wc->wr_id;
1580 tmp.status = wc->status;
1581 tmp.opcode = wc->opcode;
1582 tmp.vendor_err = wc->vendor_err;
1583 tmp.byte_len = wc->byte_len;
1584 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1585 tmp.qp_num = wc->qp->qp_num;
1586 tmp.src_qp = wc->src_qp;
1587 tmp.wc_flags = wc->wc_flags;
1588 tmp.pkey_index = wc->pkey_index;
1589 tmp.slid = wc->slid;
1590 tmp.sl = wc->sl;
1591 tmp.dlid_path_bits = wc->dlid_path_bits;
1592 tmp.port_num = wc->port_num;
1593 tmp.reserved = 0;
1595 if (copy_to_user(dest, &tmp, sizeof tmp))
1596 return -EFAULT;
1598 return 0;
1601 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1602 const char __user *buf, int in_len,
1603 int out_len)
1605 struct ib_uverbs_poll_cq cmd;
1606 struct ib_uverbs_poll_cq_resp resp;
1607 u8 __user *header_ptr;
1608 u8 __user *data_ptr;
1609 struct ib_cq *cq;
1610 struct ib_wc wc;
1611 int ret;
1613 if (copy_from_user(&cmd, buf, sizeof cmd))
1614 return -EFAULT;
1616 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1617 if (!cq)
1618 return -EINVAL;
1620 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1621 header_ptr = (void __user *)(unsigned long) cmd.response;
1622 data_ptr = header_ptr + sizeof resp;
1624 memset(&resp, 0, sizeof resp);
1625 while (resp.count < cmd.ne) {
1626 ret = ib_poll_cq(cq, 1, &wc);
1627 if (ret < 0)
1628 goto out_put;
1629 if (!ret)
1630 break;
1632 ret = copy_wc_to_user(data_ptr, &wc);
1633 if (ret)
1634 goto out_put;
1636 data_ptr += sizeof(struct ib_uverbs_wc);
1637 ++resp.count;
1640 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1641 ret = -EFAULT;
1642 goto out_put;
1645 ret = in_len;
1647 out_put:
1648 put_cq_read(cq);
1649 return ret;
1652 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1653 const char __user *buf, int in_len,
1654 int out_len)
1656 struct ib_uverbs_req_notify_cq cmd;
1657 struct ib_cq *cq;
1659 if (copy_from_user(&cmd, buf, sizeof cmd))
1660 return -EFAULT;
1662 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1663 if (!cq)
1664 return -EINVAL;
1666 ib_req_notify_cq(cq, cmd.solicited_only ?
1667 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1669 put_cq_read(cq);
1671 return in_len;
1674 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1675 const char __user *buf, int in_len,
1676 int out_len)
1678 struct ib_uverbs_destroy_cq cmd;
1679 struct ib_uverbs_destroy_cq_resp resp;
1680 struct ib_uobject *uobj;
1681 struct ib_cq *cq;
1682 struct ib_ucq_object *obj;
1683 struct ib_uverbs_event_file *ev_file;
1684 int ret = -EINVAL;
1686 if (copy_from_user(&cmd, buf, sizeof cmd))
1687 return -EFAULT;
1689 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1690 if (!uobj)
1691 return -EINVAL;
1692 cq = uobj->object;
1693 ev_file = cq->cq_context;
1694 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1696 ret = ib_destroy_cq(cq);
1697 if (!ret)
1698 uobj->live = 0;
1700 put_uobj_write(uobj);
1702 if (ret)
1703 return ret;
1705 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1707 mutex_lock(&file->mutex);
1708 list_del(&uobj->list);
1709 mutex_unlock(&file->mutex);
1711 ib_uverbs_release_ucq(file, ev_file, obj);
1713 memset(&resp, 0, sizeof resp);
1714 resp.comp_events_reported = obj->comp_events_reported;
1715 resp.async_events_reported = obj->async_events_reported;
1717 put_uobj(uobj);
1719 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1720 &resp, sizeof resp))
1721 return -EFAULT;
1723 return in_len;
1726 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1727 const char __user *buf, int in_len,
1728 int out_len)
1730 struct ib_uverbs_create_qp cmd;
1731 struct ib_uverbs_create_qp_resp resp;
1732 struct ib_udata udata;
1733 struct ib_uqp_object *obj;
1734 struct ib_device *device;
1735 struct ib_pd *pd = NULL;
1736 struct ib_xrcd *xrcd = NULL;
1737 struct ib_uobject *uninitialized_var(xrcd_uobj);
1738 struct ib_cq *scq = NULL, *rcq = NULL;
1739 struct ib_srq *srq = NULL;
1740 struct ib_qp *qp;
1741 struct ib_qp_init_attr attr;
1742 int ret;
1744 if (out_len < sizeof resp)
1745 return -ENOSPC;
1747 if (copy_from_user(&cmd, buf, sizeof cmd))
1748 return -EFAULT;
1750 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1751 return -EPERM;
1753 INIT_UDATA(&udata, buf + sizeof cmd,
1754 (unsigned long) cmd.response + sizeof resp,
1755 in_len - sizeof cmd, out_len - sizeof resp);
1757 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1758 if (!obj)
1759 return -ENOMEM;
1761 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1762 down_write(&obj->uevent.uobject.mutex);
1764 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1765 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1766 if (!xrcd) {
1767 ret = -EINVAL;
1768 goto err_put;
1770 device = xrcd->device;
1771 } else {
1772 if (cmd.qp_type == IB_QPT_XRC_INI) {
1773 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1774 } else {
1775 if (cmd.is_srq) {
1776 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1777 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1778 ret = -EINVAL;
1779 goto err_put;
1783 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1784 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1785 if (!rcq) {
1786 ret = -EINVAL;
1787 goto err_put;
1792 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1793 rcq = rcq ?: scq;
1794 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1795 if (!pd || !scq) {
1796 ret = -EINVAL;
1797 goto err_put;
1800 device = pd->device;
1803 attr.event_handler = ib_uverbs_qp_event_handler;
1804 attr.qp_context = file;
1805 attr.send_cq = scq;
1806 attr.recv_cq = rcq;
1807 attr.srq = srq;
1808 attr.xrcd = xrcd;
1809 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1810 attr.qp_type = cmd.qp_type;
1811 attr.create_flags = 0;
1813 attr.cap.max_send_wr = cmd.max_send_wr;
1814 attr.cap.max_recv_wr = cmd.max_recv_wr;
1815 attr.cap.max_send_sge = cmd.max_send_sge;
1816 attr.cap.max_recv_sge = cmd.max_recv_sge;
1817 attr.cap.max_inline_data = cmd.max_inline_data;
1819 obj->uevent.events_reported = 0;
1820 INIT_LIST_HEAD(&obj->uevent.event_list);
1821 INIT_LIST_HEAD(&obj->mcast_list);
1823 if (cmd.qp_type == IB_QPT_XRC_TGT)
1824 qp = ib_create_qp(pd, &attr);
1825 else
1826 qp = device->create_qp(pd, &attr, &udata);
1828 if (IS_ERR(qp)) {
1829 ret = PTR_ERR(qp);
1830 goto err_put;
1833 if (cmd.qp_type != IB_QPT_XRC_TGT) {
1834 qp->real_qp = qp;
1835 qp->device = device;
1836 qp->pd = pd;
1837 qp->send_cq = attr.send_cq;
1838 qp->recv_cq = attr.recv_cq;
1839 qp->srq = attr.srq;
1840 qp->event_handler = attr.event_handler;
1841 qp->qp_context = attr.qp_context;
1842 qp->qp_type = attr.qp_type;
1843 atomic_set(&qp->usecnt, 0);
1844 atomic_inc(&pd->usecnt);
1845 atomic_inc(&attr.send_cq->usecnt);
1846 if (attr.recv_cq)
1847 atomic_inc(&attr.recv_cq->usecnt);
1848 if (attr.srq)
1849 atomic_inc(&attr.srq->usecnt);
1851 qp->uobject = &obj->uevent.uobject;
1853 obj->uevent.uobject.object = qp;
1854 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1855 if (ret)
1856 goto err_destroy;
1858 memset(&resp, 0, sizeof resp);
1859 resp.qpn = qp->qp_num;
1860 resp.qp_handle = obj->uevent.uobject.id;
1861 resp.max_recv_sge = attr.cap.max_recv_sge;
1862 resp.max_send_sge = attr.cap.max_send_sge;
1863 resp.max_recv_wr = attr.cap.max_recv_wr;
1864 resp.max_send_wr = attr.cap.max_send_wr;
1865 resp.max_inline_data = attr.cap.max_inline_data;
1867 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1868 &resp, sizeof resp)) {
1869 ret = -EFAULT;
1870 goto err_copy;
1873 if (xrcd) {
1874 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1875 uobject);
1876 atomic_inc(&obj->uxrcd->refcnt);
1877 put_xrcd_read(xrcd_uobj);
1880 if (pd)
1881 put_pd_read(pd);
1882 if (scq)
1883 put_cq_read(scq);
1884 if (rcq && rcq != scq)
1885 put_cq_read(rcq);
1886 if (srq)
1887 put_srq_read(srq);
1889 mutex_lock(&file->mutex);
1890 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1891 mutex_unlock(&file->mutex);
1893 obj->uevent.uobject.live = 1;
1895 up_write(&obj->uevent.uobject.mutex);
1897 return in_len;
1899 err_copy:
1900 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1902 err_destroy:
1903 ib_destroy_qp(qp);
1905 err_put:
1906 if (xrcd)
1907 put_xrcd_read(xrcd_uobj);
1908 if (pd)
1909 put_pd_read(pd);
1910 if (scq)
1911 put_cq_read(scq);
1912 if (rcq && rcq != scq)
1913 put_cq_read(rcq);
1914 if (srq)
1915 put_srq_read(srq);
1917 put_uobj_write(&obj->uevent.uobject);
1918 return ret;
1921 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1922 const char __user *buf, int in_len, int out_len)
1924 struct ib_uverbs_open_qp cmd;
1925 struct ib_uverbs_create_qp_resp resp;
1926 struct ib_udata udata;
1927 struct ib_uqp_object *obj;
1928 struct ib_xrcd *xrcd;
1929 struct ib_uobject *uninitialized_var(xrcd_uobj);
1930 struct ib_qp *qp;
1931 struct ib_qp_open_attr attr;
1932 int ret;
1934 if (out_len < sizeof resp)
1935 return -ENOSPC;
1937 if (copy_from_user(&cmd, buf, sizeof cmd))
1938 return -EFAULT;
1940 INIT_UDATA(&udata, buf + sizeof cmd,
1941 (unsigned long) cmd.response + sizeof resp,
1942 in_len - sizeof cmd, out_len - sizeof resp);
1944 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1945 if (!obj)
1946 return -ENOMEM;
1948 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1949 down_write(&obj->uevent.uobject.mutex);
1951 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1952 if (!xrcd) {
1953 ret = -EINVAL;
1954 goto err_put;
1957 attr.event_handler = ib_uverbs_qp_event_handler;
1958 attr.qp_context = file;
1959 attr.qp_num = cmd.qpn;
1960 attr.qp_type = cmd.qp_type;
1962 obj->uevent.events_reported = 0;
1963 INIT_LIST_HEAD(&obj->uevent.event_list);
1964 INIT_LIST_HEAD(&obj->mcast_list);
1966 qp = ib_open_qp(xrcd, &attr);
1967 if (IS_ERR(qp)) {
1968 ret = PTR_ERR(qp);
1969 goto err_put;
1972 qp->uobject = &obj->uevent.uobject;
1974 obj->uevent.uobject.object = qp;
1975 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1976 if (ret)
1977 goto err_destroy;
1979 memset(&resp, 0, sizeof resp);
1980 resp.qpn = qp->qp_num;
1981 resp.qp_handle = obj->uevent.uobject.id;
1983 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1984 &resp, sizeof resp)) {
1985 ret = -EFAULT;
1986 goto err_remove;
1989 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1990 atomic_inc(&obj->uxrcd->refcnt);
1991 put_xrcd_read(xrcd_uobj);
1993 mutex_lock(&file->mutex);
1994 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1995 mutex_unlock(&file->mutex);
1997 obj->uevent.uobject.live = 1;
1999 up_write(&obj->uevent.uobject.mutex);
2001 return in_len;
2003 err_remove:
2004 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2006 err_destroy:
2007 ib_destroy_qp(qp);
2009 err_put:
2010 put_xrcd_read(xrcd_uobj);
2011 put_uobj_write(&obj->uevent.uobject);
2012 return ret;
2015 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
2016 const char __user *buf, int in_len,
2017 int out_len)
2019 struct ib_uverbs_query_qp cmd;
2020 struct ib_uverbs_query_qp_resp resp;
2021 struct ib_qp *qp;
2022 struct ib_qp_attr *attr;
2023 struct ib_qp_init_attr *init_attr;
2024 int ret;
2026 if (copy_from_user(&cmd, buf, sizeof cmd))
2027 return -EFAULT;
2029 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2030 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2031 if (!attr || !init_attr) {
2032 ret = -ENOMEM;
2033 goto out;
2036 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2037 if (!qp) {
2038 ret = -EINVAL;
2039 goto out;
2042 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
2044 put_qp_read(qp);
2046 if (ret)
2047 goto out;
2049 memset(&resp, 0, sizeof resp);
2051 resp.qp_state = attr->qp_state;
2052 resp.cur_qp_state = attr->cur_qp_state;
2053 resp.path_mtu = attr->path_mtu;
2054 resp.path_mig_state = attr->path_mig_state;
2055 resp.qkey = attr->qkey;
2056 resp.rq_psn = attr->rq_psn;
2057 resp.sq_psn = attr->sq_psn;
2058 resp.dest_qp_num = attr->dest_qp_num;
2059 resp.qp_access_flags = attr->qp_access_flags;
2060 resp.pkey_index = attr->pkey_index;
2061 resp.alt_pkey_index = attr->alt_pkey_index;
2062 resp.sq_draining = attr->sq_draining;
2063 resp.max_rd_atomic = attr->max_rd_atomic;
2064 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
2065 resp.min_rnr_timer = attr->min_rnr_timer;
2066 resp.port_num = attr->port_num;
2067 resp.timeout = attr->timeout;
2068 resp.retry_cnt = attr->retry_cnt;
2069 resp.rnr_retry = attr->rnr_retry;
2070 resp.alt_port_num = attr->alt_port_num;
2071 resp.alt_timeout = attr->alt_timeout;
2073 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2074 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
2075 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
2076 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
2077 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
2078 resp.dest.dlid = attr->ah_attr.dlid;
2079 resp.dest.sl = attr->ah_attr.sl;
2080 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
2081 resp.dest.static_rate = attr->ah_attr.static_rate;
2082 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2083 resp.dest.port_num = attr->ah_attr.port_num;
2085 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2086 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
2087 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
2088 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
2089 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2090 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
2091 resp.alt_dest.sl = attr->alt_ah_attr.sl;
2092 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2093 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
2094 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2095 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
2097 resp.max_send_wr = init_attr->cap.max_send_wr;
2098 resp.max_recv_wr = init_attr->cap.max_recv_wr;
2099 resp.max_send_sge = init_attr->cap.max_send_sge;
2100 resp.max_recv_sge = init_attr->cap.max_recv_sge;
2101 resp.max_inline_data = init_attr->cap.max_inline_data;
2102 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2104 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2105 &resp, sizeof resp))
2106 ret = -EFAULT;
2108 out:
2109 kfree(attr);
2110 kfree(init_attr);
2112 return ret ? ret : in_len;
2115 /* Remove ignored fields set in the attribute mask */
2116 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2118 switch (qp_type) {
2119 case IB_QPT_XRC_INI:
2120 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2121 case IB_QPT_XRC_TGT:
2122 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2123 IB_QP_RNR_RETRY);
2124 default:
2125 return mask;
2129 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2130 const char __user *buf, int in_len,
2131 int out_len)
2133 struct ib_uverbs_modify_qp cmd;
2134 struct ib_udata udata;
2135 struct ib_qp *qp;
2136 struct ib_qp_attr *attr;
2137 int ret;
2139 if (copy_from_user(&cmd, buf, sizeof cmd))
2140 return -EFAULT;
2142 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2143 out_len);
2145 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2146 if (!attr)
2147 return -ENOMEM;
2149 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2150 if (!qp) {
2151 ret = -EINVAL;
2152 goto out;
2155 attr->qp_state = cmd.qp_state;
2156 attr->cur_qp_state = cmd.cur_qp_state;
2157 attr->path_mtu = cmd.path_mtu;
2158 attr->path_mig_state = cmd.path_mig_state;
2159 attr->qkey = cmd.qkey;
2160 attr->rq_psn = cmd.rq_psn;
2161 attr->sq_psn = cmd.sq_psn;
2162 attr->dest_qp_num = cmd.dest_qp_num;
2163 attr->qp_access_flags = cmd.qp_access_flags;
2164 attr->pkey_index = cmd.pkey_index;
2165 attr->alt_pkey_index = cmd.alt_pkey_index;
2166 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2167 attr->max_rd_atomic = cmd.max_rd_atomic;
2168 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2169 attr->min_rnr_timer = cmd.min_rnr_timer;
2170 attr->port_num = cmd.port_num;
2171 attr->timeout = cmd.timeout;
2172 attr->retry_cnt = cmd.retry_cnt;
2173 attr->rnr_retry = cmd.rnr_retry;
2174 attr->alt_port_num = cmd.alt_port_num;
2175 attr->alt_timeout = cmd.alt_timeout;
2177 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2178 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2179 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2180 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2181 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2182 attr->ah_attr.dlid = cmd.dest.dlid;
2183 attr->ah_attr.sl = cmd.dest.sl;
2184 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2185 attr->ah_attr.static_rate = cmd.dest.static_rate;
2186 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2187 attr->ah_attr.port_num = cmd.dest.port_num;
2189 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2190 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2191 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2192 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2193 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2194 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2195 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2196 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2197 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2198 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2199 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2201 if (qp->real_qp == qp) {
2202 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2203 if (ret)
2204 goto release_qp;
2205 ret = qp->device->modify_qp(qp, attr,
2206 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2207 } else {
2208 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2211 if (ret)
2212 goto release_qp;
2214 ret = in_len;
2216 release_qp:
2217 put_qp_read(qp);
2219 out:
2220 kfree(attr);
2222 return ret;
2225 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2226 const char __user *buf, int in_len,
2227 int out_len)
2229 struct ib_uverbs_destroy_qp cmd;
2230 struct ib_uverbs_destroy_qp_resp resp;
2231 struct ib_uobject *uobj;
2232 struct ib_qp *qp;
2233 struct ib_uqp_object *obj;
2234 int ret = -EINVAL;
2236 if (copy_from_user(&cmd, buf, sizeof cmd))
2237 return -EFAULT;
2239 memset(&resp, 0, sizeof resp);
2241 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2242 if (!uobj)
2243 return -EINVAL;
2244 qp = uobj->object;
2245 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2247 if (!list_empty(&obj->mcast_list)) {
2248 put_uobj_write(uobj);
2249 return -EBUSY;
2252 ret = ib_destroy_qp(qp);
2253 if (!ret)
2254 uobj->live = 0;
2256 put_uobj_write(uobj);
2258 if (ret)
2259 return ret;
2261 if (obj->uxrcd)
2262 atomic_dec(&obj->uxrcd->refcnt);
2264 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2266 mutex_lock(&file->mutex);
2267 list_del(&uobj->list);
2268 mutex_unlock(&file->mutex);
2270 ib_uverbs_release_uevent(file, &obj->uevent);
2272 resp.events_reported = obj->uevent.events_reported;
2274 put_uobj(uobj);
2276 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2277 &resp, sizeof resp))
2278 return -EFAULT;
2280 return in_len;
2283 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2284 const char __user *buf, int in_len,
2285 int out_len)
2287 struct ib_uverbs_post_send cmd;
2288 struct ib_uverbs_post_send_resp resp;
2289 struct ib_uverbs_send_wr *user_wr;
2290 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2291 struct ib_qp *qp;
2292 int i, sg_ind;
2293 int is_ud;
2294 ssize_t ret = -EINVAL;
2296 if (copy_from_user(&cmd, buf, sizeof cmd))
2297 return -EFAULT;
2299 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2300 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2301 return -EINVAL;
2303 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2304 return -EINVAL;
2306 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2307 if (!user_wr)
2308 return -ENOMEM;
2310 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2311 if (!qp)
2312 goto out;
2314 is_ud = qp->qp_type == IB_QPT_UD;
2315 sg_ind = 0;
2316 last = NULL;
2317 for (i = 0; i < cmd.wr_count; ++i) {
2318 if (copy_from_user(user_wr,
2319 buf + sizeof cmd + i * cmd.wqe_size,
2320 cmd.wqe_size)) {
2321 ret = -EFAULT;
2322 goto out_put;
2325 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2326 ret = -EINVAL;
2327 goto out_put;
2330 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2331 user_wr->num_sge * sizeof (struct ib_sge),
2332 GFP_KERNEL);
2333 if (!next) {
2334 ret = -ENOMEM;
2335 goto out_put;
2338 if (!last)
2339 wr = next;
2340 else
2341 last->next = next;
2342 last = next;
2344 next->next = NULL;
2345 next->wr_id = user_wr->wr_id;
2346 next->num_sge = user_wr->num_sge;
2347 next->opcode = user_wr->opcode;
2348 next->send_flags = user_wr->send_flags;
2350 if (is_ud) {
2351 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2352 file->ucontext);
2353 if (!next->wr.ud.ah) {
2354 ret = -EINVAL;
2355 goto out_put;
2357 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2358 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2359 if (next->opcode == IB_WR_SEND_WITH_IMM)
2360 next->ex.imm_data =
2361 (__be32 __force) user_wr->ex.imm_data;
2362 } else {
2363 switch (next->opcode) {
2364 case IB_WR_RDMA_WRITE_WITH_IMM:
2365 next->ex.imm_data =
2366 (__be32 __force) user_wr->ex.imm_data;
2367 case IB_WR_RDMA_WRITE:
2368 case IB_WR_RDMA_READ:
2369 next->wr.rdma.remote_addr =
2370 user_wr->wr.rdma.remote_addr;
2371 next->wr.rdma.rkey =
2372 user_wr->wr.rdma.rkey;
2373 break;
2374 case IB_WR_SEND_WITH_IMM:
2375 next->ex.imm_data =
2376 (__be32 __force) user_wr->ex.imm_data;
2377 break;
2378 case IB_WR_SEND_WITH_INV:
2379 next->ex.invalidate_rkey =
2380 user_wr->ex.invalidate_rkey;
2381 break;
2382 case IB_WR_ATOMIC_CMP_AND_SWP:
2383 case IB_WR_ATOMIC_FETCH_AND_ADD:
2384 next->wr.atomic.remote_addr =
2385 user_wr->wr.atomic.remote_addr;
2386 next->wr.atomic.compare_add =
2387 user_wr->wr.atomic.compare_add;
2388 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2389 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2390 break;
2391 default:
2392 break;
2396 if (next->num_sge) {
2397 next->sg_list = (void *) next +
2398 ALIGN(sizeof *next, sizeof (struct ib_sge));
2399 if (copy_from_user(next->sg_list,
2400 buf + sizeof cmd +
2401 cmd.wr_count * cmd.wqe_size +
2402 sg_ind * sizeof (struct ib_sge),
2403 next->num_sge * sizeof (struct ib_sge))) {
2404 ret = -EFAULT;
2405 goto out_put;
2407 sg_ind += next->num_sge;
2408 } else
2409 next->sg_list = NULL;
2412 resp.bad_wr = 0;
2413 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2414 if (ret)
2415 for (next = wr; next; next = next->next) {
2416 ++resp.bad_wr;
2417 if (next == bad_wr)
2418 break;
2421 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2422 &resp, sizeof resp))
2423 ret = -EFAULT;
2425 out_put:
2426 put_qp_read(qp);
2428 while (wr) {
2429 if (is_ud && wr->wr.ud.ah)
2430 put_ah_read(wr->wr.ud.ah);
2431 next = wr->next;
2432 kfree(wr);
2433 wr = next;
2436 out:
2437 kfree(user_wr);
2439 return ret ? ret : in_len;
2442 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2443 int in_len,
2444 u32 wr_count,
2445 u32 sge_count,
2446 u32 wqe_size)
2448 struct ib_uverbs_recv_wr *user_wr;
2449 struct ib_recv_wr *wr = NULL, *last, *next;
2450 int sg_ind;
2451 int i;
2452 int ret;
2454 if (in_len < wqe_size * wr_count +
2455 sge_count * sizeof (struct ib_uverbs_sge))
2456 return ERR_PTR(-EINVAL);
2458 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2459 return ERR_PTR(-EINVAL);
2461 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2462 if (!user_wr)
2463 return ERR_PTR(-ENOMEM);
2465 sg_ind = 0;
2466 last = NULL;
2467 for (i = 0; i < wr_count; ++i) {
2468 if (copy_from_user(user_wr, buf + i * wqe_size,
2469 wqe_size)) {
2470 ret = -EFAULT;
2471 goto err;
2474 if (user_wr->num_sge + sg_ind > sge_count) {
2475 ret = -EINVAL;
2476 goto err;
2479 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2480 user_wr->num_sge * sizeof (struct ib_sge),
2481 GFP_KERNEL);
2482 if (!next) {
2483 ret = -ENOMEM;
2484 goto err;
2487 if (!last)
2488 wr = next;
2489 else
2490 last->next = next;
2491 last = next;
2493 next->next = NULL;
2494 next->wr_id = user_wr->wr_id;
2495 next->num_sge = user_wr->num_sge;
2497 if (next->num_sge) {
2498 next->sg_list = (void *) next +
2499 ALIGN(sizeof *next, sizeof (struct ib_sge));
2500 if (copy_from_user(next->sg_list,
2501 buf + wr_count * wqe_size +
2502 sg_ind * sizeof (struct ib_sge),
2503 next->num_sge * sizeof (struct ib_sge))) {
2504 ret = -EFAULT;
2505 goto err;
2507 sg_ind += next->num_sge;
2508 } else
2509 next->sg_list = NULL;
2512 kfree(user_wr);
2513 return wr;
2515 err:
2516 kfree(user_wr);
2518 while (wr) {
2519 next = wr->next;
2520 kfree(wr);
2521 wr = next;
2524 return ERR_PTR(ret);
2527 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2528 const char __user *buf, int in_len,
2529 int out_len)
2531 struct ib_uverbs_post_recv cmd;
2532 struct ib_uverbs_post_recv_resp resp;
2533 struct ib_recv_wr *wr, *next, *bad_wr;
2534 struct ib_qp *qp;
2535 ssize_t ret = -EINVAL;
2537 if (copy_from_user(&cmd, buf, sizeof cmd))
2538 return -EFAULT;
2540 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2541 in_len - sizeof cmd, cmd.wr_count,
2542 cmd.sge_count, cmd.wqe_size);
2543 if (IS_ERR(wr))
2544 return PTR_ERR(wr);
2546 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2547 if (!qp)
2548 goto out;
2550 resp.bad_wr = 0;
2551 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2553 put_qp_read(qp);
2555 if (ret)
2556 for (next = wr; next; next = next->next) {
2557 ++resp.bad_wr;
2558 if (next == bad_wr)
2559 break;
2562 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2563 &resp, sizeof resp))
2564 ret = -EFAULT;
2566 out:
2567 while (wr) {
2568 next = wr->next;
2569 kfree(wr);
2570 wr = next;
2573 return ret ? ret : in_len;
2576 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2577 const char __user *buf, int in_len,
2578 int out_len)
2580 struct ib_uverbs_post_srq_recv cmd;
2581 struct ib_uverbs_post_srq_recv_resp resp;
2582 struct ib_recv_wr *wr, *next, *bad_wr;
2583 struct ib_srq *srq;
2584 ssize_t ret = -EINVAL;
2586 if (copy_from_user(&cmd, buf, sizeof cmd))
2587 return -EFAULT;
2589 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2590 in_len - sizeof cmd, cmd.wr_count,
2591 cmd.sge_count, cmd.wqe_size);
2592 if (IS_ERR(wr))
2593 return PTR_ERR(wr);
2595 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2596 if (!srq)
2597 goto out;
2599 resp.bad_wr = 0;
2600 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2602 put_srq_read(srq);
2604 if (ret)
2605 for (next = wr; next; next = next->next) {
2606 ++resp.bad_wr;
2607 if (next == bad_wr)
2608 break;
2611 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2612 &resp, sizeof resp))
2613 ret = -EFAULT;
2615 out:
2616 while (wr) {
2617 next = wr->next;
2618 kfree(wr);
2619 wr = next;
2622 return ret ? ret : in_len;
2625 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2626 const char __user *buf, int in_len,
2627 int out_len)
2629 struct ib_uverbs_create_ah cmd;
2630 struct ib_uverbs_create_ah_resp resp;
2631 struct ib_uobject *uobj;
2632 struct ib_pd *pd;
2633 struct ib_ah *ah;
2634 struct ib_ah_attr attr;
2635 int ret;
2637 if (out_len < sizeof resp)
2638 return -ENOSPC;
2640 if (copy_from_user(&cmd, buf, sizeof cmd))
2641 return -EFAULT;
2643 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2644 if (!uobj)
2645 return -ENOMEM;
2647 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2648 down_write(&uobj->mutex);
2650 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2651 if (!pd) {
2652 ret = -EINVAL;
2653 goto err;
2656 attr.dlid = cmd.attr.dlid;
2657 attr.sl = cmd.attr.sl;
2658 attr.src_path_bits = cmd.attr.src_path_bits;
2659 attr.static_rate = cmd.attr.static_rate;
2660 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2661 attr.port_num = cmd.attr.port_num;
2662 attr.grh.flow_label = cmd.attr.grh.flow_label;
2663 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2664 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2665 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2666 attr.vlan_id = 0;
2667 memset(&attr.dmac, 0, sizeof(attr.dmac));
2668 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2670 ah = ib_create_ah(pd, &attr);
2671 if (IS_ERR(ah)) {
2672 ret = PTR_ERR(ah);
2673 goto err_put;
2676 ah->uobject = uobj;
2677 uobj->object = ah;
2679 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2680 if (ret)
2681 goto err_destroy;
2683 resp.ah_handle = uobj->id;
2685 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2686 &resp, sizeof resp)) {
2687 ret = -EFAULT;
2688 goto err_copy;
2691 put_pd_read(pd);
2693 mutex_lock(&file->mutex);
2694 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2695 mutex_unlock(&file->mutex);
2697 uobj->live = 1;
2699 up_write(&uobj->mutex);
2701 return in_len;
2703 err_copy:
2704 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2706 err_destroy:
2707 ib_destroy_ah(ah);
2709 err_put:
2710 put_pd_read(pd);
2712 err:
2713 put_uobj_write(uobj);
2714 return ret;
2717 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2718 const char __user *buf, int in_len, int out_len)
2720 struct ib_uverbs_destroy_ah cmd;
2721 struct ib_ah *ah;
2722 struct ib_uobject *uobj;
2723 int ret;
2725 if (copy_from_user(&cmd, buf, sizeof cmd))
2726 return -EFAULT;
2728 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2729 if (!uobj)
2730 return -EINVAL;
2731 ah = uobj->object;
2733 ret = ib_destroy_ah(ah);
2734 if (!ret)
2735 uobj->live = 0;
2737 put_uobj_write(uobj);
2739 if (ret)
2740 return ret;
2742 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2744 mutex_lock(&file->mutex);
2745 list_del(&uobj->list);
2746 mutex_unlock(&file->mutex);
2748 put_uobj(uobj);
2750 return in_len;
2753 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2754 const char __user *buf, int in_len,
2755 int out_len)
2757 struct ib_uverbs_attach_mcast cmd;
2758 struct ib_qp *qp;
2759 struct ib_uqp_object *obj;
2760 struct ib_uverbs_mcast_entry *mcast;
2761 int ret;
2763 if (copy_from_user(&cmd, buf, sizeof cmd))
2764 return -EFAULT;
2766 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2767 if (!qp)
2768 return -EINVAL;
2770 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2772 list_for_each_entry(mcast, &obj->mcast_list, list)
2773 if (cmd.mlid == mcast->lid &&
2774 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2775 ret = 0;
2776 goto out_put;
2779 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2780 if (!mcast) {
2781 ret = -ENOMEM;
2782 goto out_put;
2785 mcast->lid = cmd.mlid;
2786 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2788 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2789 if (!ret)
2790 list_add_tail(&mcast->list, &obj->mcast_list);
2791 else
2792 kfree(mcast);
2794 out_put:
2795 put_qp_write(qp);
2797 return ret ? ret : in_len;
2800 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2801 const char __user *buf, int in_len,
2802 int out_len)
2804 struct ib_uverbs_detach_mcast cmd;
2805 struct ib_uqp_object *obj;
2806 struct ib_qp *qp;
2807 struct ib_uverbs_mcast_entry *mcast;
2808 int ret = -EINVAL;
2810 if (copy_from_user(&cmd, buf, sizeof cmd))
2811 return -EFAULT;
2813 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2814 if (!qp)
2815 return -EINVAL;
2817 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2818 if (ret)
2819 goto out_put;
2821 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2823 list_for_each_entry(mcast, &obj->mcast_list, list)
2824 if (cmd.mlid == mcast->lid &&
2825 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2826 list_del(&mcast->list);
2827 kfree(mcast);
2828 break;
2831 out_put:
2832 put_qp_write(qp);
2834 return ret ? ret : in_len;
2837 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2838 union ib_flow_spec *ib_spec)
2840 if (kern_spec->reserved)
2841 return -EINVAL;
2843 ib_spec->type = kern_spec->type;
2845 switch (ib_spec->type) {
2846 case IB_FLOW_SPEC_ETH:
2847 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2848 if (ib_spec->eth.size != kern_spec->eth.size)
2849 return -EINVAL;
2850 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2851 sizeof(struct ib_flow_eth_filter));
2852 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2853 sizeof(struct ib_flow_eth_filter));
2854 break;
2855 case IB_FLOW_SPEC_IPV4:
2856 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2857 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2858 return -EINVAL;
2859 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2860 sizeof(struct ib_flow_ipv4_filter));
2861 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2862 sizeof(struct ib_flow_ipv4_filter));
2863 break;
2864 case IB_FLOW_SPEC_TCP:
2865 case IB_FLOW_SPEC_UDP:
2866 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2867 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2868 return -EINVAL;
2869 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2870 sizeof(struct ib_flow_tcp_udp_filter));
2871 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2872 sizeof(struct ib_flow_tcp_udp_filter));
2873 break;
2874 default:
2875 return -EINVAL;
2877 return 0;
2880 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2881 struct ib_udata *ucore,
2882 struct ib_udata *uhw)
2884 struct ib_uverbs_create_flow cmd;
2885 struct ib_uverbs_create_flow_resp resp;
2886 struct ib_uobject *uobj;
2887 struct ib_flow *flow_id;
2888 struct ib_uverbs_flow_attr *kern_flow_attr;
2889 struct ib_flow_attr *flow_attr;
2890 struct ib_qp *qp;
2891 int err = 0;
2892 void *kern_spec;
2893 void *ib_spec;
2894 int i;
2896 if (ucore->inlen < sizeof(cmd))
2897 return -EINVAL;
2899 if (ucore->outlen < sizeof(resp))
2900 return -ENOSPC;
2902 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2903 if (err)
2904 return err;
2906 ucore->inbuf += sizeof(cmd);
2907 ucore->inlen -= sizeof(cmd);
2909 if (cmd.comp_mask)
2910 return -EINVAL;
2912 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2913 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2914 return -EPERM;
2916 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2917 return -EINVAL;
2919 if (cmd.flow_attr.size > ucore->inlen ||
2920 cmd.flow_attr.size >
2921 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2922 return -EINVAL;
2924 if (cmd.flow_attr.reserved[0] ||
2925 cmd.flow_attr.reserved[1])
2926 return -EINVAL;
2928 if (cmd.flow_attr.num_of_specs) {
2929 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2930 GFP_KERNEL);
2931 if (!kern_flow_attr)
2932 return -ENOMEM;
2934 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2935 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2936 cmd.flow_attr.size);
2937 if (err)
2938 goto err_free_attr;
2939 } else {
2940 kern_flow_attr = &cmd.flow_attr;
2943 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2944 if (!uobj) {
2945 err = -ENOMEM;
2946 goto err_free_attr;
2948 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2949 down_write(&uobj->mutex);
2951 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2952 if (!qp) {
2953 err = -EINVAL;
2954 goto err_uobj;
2957 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2958 if (!flow_attr) {
2959 err = -ENOMEM;
2960 goto err_put;
2963 flow_attr->type = kern_flow_attr->type;
2964 flow_attr->priority = kern_flow_attr->priority;
2965 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2966 flow_attr->port = kern_flow_attr->port;
2967 flow_attr->flags = kern_flow_attr->flags;
2968 flow_attr->size = sizeof(*flow_attr);
2970 kern_spec = kern_flow_attr + 1;
2971 ib_spec = flow_attr + 1;
2972 for (i = 0; i < flow_attr->num_of_specs &&
2973 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2974 cmd.flow_attr.size >=
2975 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2976 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2977 if (err)
2978 goto err_free;
2979 flow_attr->size +=
2980 ((union ib_flow_spec *) ib_spec)->size;
2981 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2982 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2983 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2985 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2986 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2987 i, cmd.flow_attr.size);
2988 err = -EINVAL;
2989 goto err_free;
2991 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2992 if (IS_ERR(flow_id)) {
2993 err = PTR_ERR(flow_id);
2994 goto err_free;
2996 flow_id->qp = qp;
2997 flow_id->uobject = uobj;
2998 uobj->object = flow_id;
3000 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3001 if (err)
3002 goto destroy_flow;
3004 memset(&resp, 0, sizeof(resp));
3005 resp.flow_handle = uobj->id;
3007 err = ib_copy_to_udata(ucore,
3008 &resp, sizeof(resp));
3009 if (err)
3010 goto err_copy;
3012 put_qp_read(qp);
3013 mutex_lock(&file->mutex);
3014 list_add_tail(&uobj->list, &file->ucontext->rule_list);
3015 mutex_unlock(&file->mutex);
3017 uobj->live = 1;
3019 up_write(&uobj->mutex);
3020 kfree(flow_attr);
3021 if (cmd.flow_attr.num_of_specs)
3022 kfree(kern_flow_attr);
3023 return 0;
3024 err_copy:
3025 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3026 destroy_flow:
3027 ib_destroy_flow(flow_id);
3028 err_free:
3029 kfree(flow_attr);
3030 err_put:
3031 put_qp_read(qp);
3032 err_uobj:
3033 put_uobj_write(uobj);
3034 err_free_attr:
3035 if (cmd.flow_attr.num_of_specs)
3036 kfree(kern_flow_attr);
3037 return err;
3040 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3041 struct ib_udata *ucore,
3042 struct ib_udata *uhw)
3044 struct ib_uverbs_destroy_flow cmd;
3045 struct ib_flow *flow_id;
3046 struct ib_uobject *uobj;
3047 int ret;
3049 if (ucore->inlen < sizeof(cmd))
3050 return -EINVAL;
3052 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3053 if (ret)
3054 return ret;
3056 if (cmd.comp_mask)
3057 return -EINVAL;
3059 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3060 file->ucontext);
3061 if (!uobj)
3062 return -EINVAL;
3063 flow_id = uobj->object;
3065 ret = ib_destroy_flow(flow_id);
3066 if (!ret)
3067 uobj->live = 0;
3069 put_uobj_write(uobj);
3071 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3073 mutex_lock(&file->mutex);
3074 list_del(&uobj->list);
3075 mutex_unlock(&file->mutex);
3077 put_uobj(uobj);
3079 return ret;
3082 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3083 struct ib_uverbs_create_xsrq *cmd,
3084 struct ib_udata *udata)
3086 struct ib_uverbs_create_srq_resp resp;
3087 struct ib_usrq_object *obj;
3088 struct ib_pd *pd;
3089 struct ib_srq *srq;
3090 struct ib_uobject *uninitialized_var(xrcd_uobj);
3091 struct ib_srq_init_attr attr;
3092 int ret;
3094 obj = kmalloc(sizeof *obj, GFP_KERNEL);
3095 if (!obj)
3096 return -ENOMEM;
3098 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
3099 down_write(&obj->uevent.uobject.mutex);
3101 if (cmd->srq_type == IB_SRQT_XRC) {
3102 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3103 if (!attr.ext.xrc.xrcd) {
3104 ret = -EINVAL;
3105 goto err;
3108 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3109 atomic_inc(&obj->uxrcd->refcnt);
3111 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3112 if (!attr.ext.xrc.cq) {
3113 ret = -EINVAL;
3114 goto err_put_xrcd;
3118 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
3119 if (!pd) {
3120 ret = -EINVAL;
3121 goto err_put_cq;
3124 attr.event_handler = ib_uverbs_srq_event_handler;
3125 attr.srq_context = file;
3126 attr.srq_type = cmd->srq_type;
3127 attr.attr.max_wr = cmd->max_wr;
3128 attr.attr.max_sge = cmd->max_sge;
3129 attr.attr.srq_limit = cmd->srq_limit;
3131 obj->uevent.events_reported = 0;
3132 INIT_LIST_HEAD(&obj->uevent.event_list);
3134 srq = pd->device->create_srq(pd, &attr, udata);
3135 if (IS_ERR(srq)) {
3136 ret = PTR_ERR(srq);
3137 goto err_put;
3140 srq->device = pd->device;
3141 srq->pd = pd;
3142 srq->srq_type = cmd->srq_type;
3143 srq->uobject = &obj->uevent.uobject;
3144 srq->event_handler = attr.event_handler;
3145 srq->srq_context = attr.srq_context;
3147 if (cmd->srq_type == IB_SRQT_XRC) {
3148 srq->ext.xrc.cq = attr.ext.xrc.cq;
3149 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3150 atomic_inc(&attr.ext.xrc.cq->usecnt);
3151 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3154 atomic_inc(&pd->usecnt);
3155 atomic_set(&srq->usecnt, 0);
3157 obj->uevent.uobject.object = srq;
3158 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3159 if (ret)
3160 goto err_destroy;
3162 memset(&resp, 0, sizeof resp);
3163 resp.srq_handle = obj->uevent.uobject.id;
3164 resp.max_wr = attr.attr.max_wr;
3165 resp.max_sge = attr.attr.max_sge;
3166 if (cmd->srq_type == IB_SRQT_XRC)
3167 resp.srqn = srq->ext.xrc.srq_num;
3169 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3170 &resp, sizeof resp)) {
3171 ret = -EFAULT;
3172 goto err_copy;
3175 if (cmd->srq_type == IB_SRQT_XRC) {
3176 put_uobj_read(xrcd_uobj);
3177 put_cq_read(attr.ext.xrc.cq);
3179 put_pd_read(pd);
3181 mutex_lock(&file->mutex);
3182 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3183 mutex_unlock(&file->mutex);
3185 obj->uevent.uobject.live = 1;
3187 up_write(&obj->uevent.uobject.mutex);
3189 return 0;
3191 err_copy:
3192 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3194 err_destroy:
3195 ib_destroy_srq(srq);
3197 err_put:
3198 put_pd_read(pd);
3200 err_put_cq:
3201 if (cmd->srq_type == IB_SRQT_XRC)
3202 put_cq_read(attr.ext.xrc.cq);
3204 err_put_xrcd:
3205 if (cmd->srq_type == IB_SRQT_XRC) {
3206 atomic_dec(&obj->uxrcd->refcnt);
3207 put_uobj_read(xrcd_uobj);
3210 err:
3211 put_uobj_write(&obj->uevent.uobject);
3212 return ret;
3215 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3216 const char __user *buf, int in_len,
3217 int out_len)
3219 struct ib_uverbs_create_srq cmd;
3220 struct ib_uverbs_create_xsrq xcmd;
3221 struct ib_uverbs_create_srq_resp resp;
3222 struct ib_udata udata;
3223 int ret;
3225 if (out_len < sizeof resp)
3226 return -ENOSPC;
3228 if (copy_from_user(&cmd, buf, sizeof cmd))
3229 return -EFAULT;
3231 xcmd.response = cmd.response;
3232 xcmd.user_handle = cmd.user_handle;
3233 xcmd.srq_type = IB_SRQT_BASIC;
3234 xcmd.pd_handle = cmd.pd_handle;
3235 xcmd.max_wr = cmd.max_wr;
3236 xcmd.max_sge = cmd.max_sge;
3237 xcmd.srq_limit = cmd.srq_limit;
3239 INIT_UDATA(&udata, buf + sizeof cmd,
3240 (unsigned long) cmd.response + sizeof resp,
3241 in_len - sizeof cmd, out_len - sizeof resp);
3243 ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3244 if (ret)
3245 return ret;
3247 return in_len;
3250 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3251 const char __user *buf, int in_len, int out_len)
3253 struct ib_uverbs_create_xsrq cmd;
3254 struct ib_uverbs_create_srq_resp resp;
3255 struct ib_udata udata;
3256 int ret;
3258 if (out_len < sizeof resp)
3259 return -ENOSPC;
3261 if (copy_from_user(&cmd, buf, sizeof cmd))
3262 return -EFAULT;
3264 INIT_UDATA(&udata, buf + sizeof cmd,
3265 (unsigned long) cmd.response + sizeof resp,
3266 in_len - sizeof cmd, out_len - sizeof resp);
3268 ret = __uverbs_create_xsrq(file, &cmd, &udata);
3269 if (ret)
3270 return ret;
3272 return in_len;
3275 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3276 const char __user *buf, int in_len,
3277 int out_len)
3279 struct ib_uverbs_modify_srq cmd;
3280 struct ib_udata udata;
3281 struct ib_srq *srq;
3282 struct ib_srq_attr attr;
3283 int ret;
3285 if (copy_from_user(&cmd, buf, sizeof cmd))
3286 return -EFAULT;
3288 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3289 out_len);
3291 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3292 if (!srq)
3293 return -EINVAL;
3295 attr.max_wr = cmd.max_wr;
3296 attr.srq_limit = cmd.srq_limit;
3298 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3300 put_srq_read(srq);
3302 return ret ? ret : in_len;
3305 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3306 const char __user *buf,
3307 int in_len, int out_len)
3309 struct ib_uverbs_query_srq cmd;
3310 struct ib_uverbs_query_srq_resp resp;
3311 struct ib_srq_attr attr;
3312 struct ib_srq *srq;
3313 int ret;
3315 if (out_len < sizeof resp)
3316 return -ENOSPC;
3318 if (copy_from_user(&cmd, buf, sizeof cmd))
3319 return -EFAULT;
3321 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3322 if (!srq)
3323 return -EINVAL;
3325 ret = ib_query_srq(srq, &attr);
3327 put_srq_read(srq);
3329 if (ret)
3330 return ret;
3332 memset(&resp, 0, sizeof resp);
3334 resp.max_wr = attr.max_wr;
3335 resp.max_sge = attr.max_sge;
3336 resp.srq_limit = attr.srq_limit;
3338 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3339 &resp, sizeof resp))
3340 return -EFAULT;
3342 return in_len;
3345 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3346 const char __user *buf, int in_len,
3347 int out_len)
3349 struct ib_uverbs_destroy_srq cmd;
3350 struct ib_uverbs_destroy_srq_resp resp;
3351 struct ib_uobject *uobj;
3352 struct ib_srq *srq;
3353 struct ib_uevent_object *obj;
3354 int ret = -EINVAL;
3355 struct ib_usrq_object *us;
3356 enum ib_srq_type srq_type;
3358 if (copy_from_user(&cmd, buf, sizeof cmd))
3359 return -EFAULT;
3361 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3362 if (!uobj)
3363 return -EINVAL;
3364 srq = uobj->object;
3365 obj = container_of(uobj, struct ib_uevent_object, uobject);
3366 srq_type = srq->srq_type;
3368 ret = ib_destroy_srq(srq);
3369 if (!ret)
3370 uobj->live = 0;
3372 put_uobj_write(uobj);
3374 if (ret)
3375 return ret;
3377 if (srq_type == IB_SRQT_XRC) {
3378 us = container_of(obj, struct ib_usrq_object, uevent);
3379 atomic_dec(&us->uxrcd->refcnt);
3382 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3384 mutex_lock(&file->mutex);
3385 list_del(&uobj->list);
3386 mutex_unlock(&file->mutex);
3388 ib_uverbs_release_uevent(file, obj);
3390 memset(&resp, 0, sizeof resp);
3391 resp.events_reported = obj->events_reported;
3393 put_uobj(uobj);
3395 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3396 &resp, sizeof resp))
3397 ret = -EFAULT;
3399 return ret ? ret : in_len;
3402 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3403 struct ib_udata *ucore,
3404 struct ib_udata *uhw)
3406 struct ib_uverbs_ex_query_device_resp resp;
3407 struct ib_uverbs_ex_query_device cmd;
3408 struct ib_device_attr attr;
3409 struct ib_device *device;
3410 int err;
3412 device = file->device->ib_dev;
3413 if (ucore->inlen < sizeof(cmd))
3414 return -EINVAL;
3416 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3417 if (err)
3418 return err;
3420 if (cmd.comp_mask)
3421 return -EINVAL;
3423 if (cmd.reserved)
3424 return -EINVAL;
3426 resp.response_length = offsetof(typeof(resp), odp_caps);
3428 if (ucore->outlen < resp.response_length)
3429 return -ENOSPC;
3431 memset(&attr, 0, sizeof(attr));
3433 err = device->query_device(device, &attr, uhw);
3434 if (err)
3435 return err;
3437 copy_query_dev_fields(file, &resp.base, &attr);
3438 resp.comp_mask = 0;
3440 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3441 goto end;
3443 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3444 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3445 resp.odp_caps.per_transport_caps.rc_odp_caps =
3446 attr.odp_caps.per_transport_caps.rc_odp_caps;
3447 resp.odp_caps.per_transport_caps.uc_odp_caps =
3448 attr.odp_caps.per_transport_caps.uc_odp_caps;
3449 resp.odp_caps.per_transport_caps.ud_odp_caps =
3450 attr.odp_caps.per_transport_caps.ud_odp_caps;
3451 resp.odp_caps.reserved = 0;
3452 #else
3453 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3454 #endif
3455 resp.response_length += sizeof(resp.odp_caps);
3457 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3458 goto end;
3460 resp.timestamp_mask = attr.timestamp_mask;
3461 resp.response_length += sizeof(resp.timestamp_mask);
3463 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3464 goto end;
3466 resp.hca_core_clock = attr.hca_core_clock;
3467 resp.response_length += sizeof(resp.hca_core_clock);
3469 end:
3470 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3471 if (err)
3472 return err;
3474 return 0;