UBIFS: check return code of ubifs_lpt_lookup
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / rds / rdma.c
blob75fd13bb631bbc06bf8493c06c266951b6e47c95
1 /*
2 * Copyright (c) 2007 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
38 #include "rdma.h"
41 * XXX
42 * - build with sparse
43 * - should we limit the size of a mr region? let transport return failure?
44 * - should we detect duplicate keys on a socket? hmm.
45 * - an rdma is an mlock, apply rlimit?
49 * get the number of pages by looking at the page indices that the start and
50 * end addresses fall in.
52 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
53 * causes the address to wrap or overflows an unsigned int. This comes
54 * from being stored in the 'length' member of 'struct scatterlist'.
56 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
58 if ((vec->addr + vec->bytes <= vec->addr) ||
59 (vec->bytes > (u64)UINT_MAX))
60 return 0;
62 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
63 (vec->addr >> PAGE_SHIFT);
66 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
67 struct rds_mr *insert)
69 struct rb_node **p = &root->rb_node;
70 struct rb_node *parent = NULL;
71 struct rds_mr *mr;
73 while (*p) {
74 parent = *p;
75 mr = rb_entry(parent, struct rds_mr, r_rb_node);
77 if (key < mr->r_key)
78 p = &(*p)->rb_left;
79 else if (key > mr->r_key)
80 p = &(*p)->rb_right;
81 else
82 return mr;
85 if (insert) {
86 rb_link_node(&insert->r_rb_node, parent, p);
87 rb_insert_color(&insert->r_rb_node, root);
88 atomic_inc(&insert->r_refcount);
90 return NULL;
94 * Destroy the transport-specific part of a MR.
96 static void rds_destroy_mr(struct rds_mr *mr)
98 struct rds_sock *rs = mr->r_sock;
99 void *trans_private = NULL;
100 unsigned long flags;
102 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
103 mr->r_key, atomic_read(&mr->r_refcount));
105 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
106 return;
108 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
109 if (!RB_EMPTY_NODE(&mr->r_rb_node))
110 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
111 trans_private = mr->r_trans_private;
112 mr->r_trans_private = NULL;
113 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
115 if (trans_private)
116 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
119 void __rds_put_mr_final(struct rds_mr *mr)
121 rds_destroy_mr(mr);
122 kfree(mr);
126 * By the time this is called we can't have any more ioctls called on
127 * the socket so we don't need to worry about racing with others.
129 void rds_rdma_drop_keys(struct rds_sock *rs)
131 struct rds_mr *mr;
132 struct rb_node *node;
134 /* Release any MRs associated with this socket */
135 while ((node = rb_first(&rs->rs_rdma_keys))) {
136 mr = container_of(node, struct rds_mr, r_rb_node);
137 if (mr->r_trans == rs->rs_transport)
138 mr->r_invalidate = 0;
139 rds_mr_put(mr);
142 if (rs->rs_transport && rs->rs_transport->flush_mrs)
143 rs->rs_transport->flush_mrs();
147 * Helper function to pin user pages.
149 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
150 struct page **pages, int write)
152 int ret;
154 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
156 if (ret >= 0 && ret < nr_pages) {
157 while (ret--)
158 put_page(pages[ret]);
159 ret = -EFAULT;
162 return ret;
165 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
166 u64 *cookie_ret, struct rds_mr **mr_ret)
168 struct rds_mr *mr = NULL, *found;
169 unsigned int nr_pages;
170 struct page **pages = NULL;
171 struct scatterlist *sg;
172 void *trans_private;
173 unsigned long flags;
174 rds_rdma_cookie_t cookie;
175 unsigned int nents;
176 long i;
177 int ret;
179 if (rs->rs_bound_addr == 0) {
180 ret = -ENOTCONN; /* XXX not a great errno */
181 goto out;
184 if (rs->rs_transport->get_mr == NULL) {
185 ret = -EOPNOTSUPP;
186 goto out;
189 nr_pages = rds_pages_in_vec(&args->vec);
190 if (nr_pages == 0) {
191 ret = -EINVAL;
192 goto out;
195 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
196 args->vec.addr, args->vec.bytes, nr_pages);
198 /* XXX clamp nr_pages to limit the size of this alloc? */
199 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
200 if (pages == NULL) {
201 ret = -ENOMEM;
202 goto out;
205 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
206 if (mr == NULL) {
207 ret = -ENOMEM;
208 goto out;
211 atomic_set(&mr->r_refcount, 1);
212 RB_CLEAR_NODE(&mr->r_rb_node);
213 mr->r_trans = rs->rs_transport;
214 mr->r_sock = rs;
216 if (args->flags & RDS_RDMA_USE_ONCE)
217 mr->r_use_once = 1;
218 if (args->flags & RDS_RDMA_INVALIDATE)
219 mr->r_invalidate = 1;
220 if (args->flags & RDS_RDMA_READWRITE)
221 mr->r_write = 1;
224 * Pin the pages that make up the user buffer and transfer the page
225 * pointers to the mr's sg array. We check to see if we've mapped
226 * the whole region after transferring the partial page references
227 * to the sg array so that we can have one page ref cleanup path.
229 * For now we have no flag that tells us whether the mapping is
230 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
231 * the zero page.
233 ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1);
234 if (ret < 0)
235 goto out;
237 nents = ret;
238 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
239 if (sg == NULL) {
240 ret = -ENOMEM;
241 goto out;
243 WARN_ON(!nents);
244 sg_init_table(sg, nents);
246 /* Stick all pages into the scatterlist */
247 for (i = 0 ; i < nents; i++)
248 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
250 rdsdebug("RDS: trans_private nents is %u\n", nents);
252 /* Obtain a transport specific MR. If this succeeds, the
253 * s/g list is now owned by the MR.
254 * Note that dma_map() implies that pending writes are
255 * flushed to RAM, so no dma_sync is needed here. */
256 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
257 &mr->r_key);
259 if (IS_ERR(trans_private)) {
260 for (i = 0 ; i < nents; i++)
261 put_page(sg_page(&sg[i]));
262 kfree(sg);
263 ret = PTR_ERR(trans_private);
264 goto out;
267 mr->r_trans_private = trans_private;
269 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
270 mr->r_key, (void *)(unsigned long) args->cookie_addr);
272 /* The user may pass us an unaligned address, but we can only
273 * map page aligned regions. So we keep the offset, and build
274 * a 64bit cookie containing <R_Key, offset> and pass that
275 * around. */
276 cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
277 if (cookie_ret)
278 *cookie_ret = cookie;
280 if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
281 ret = -EFAULT;
282 goto out;
285 /* Inserting the new MR into the rbtree bumps its
286 * reference count. */
287 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
288 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
289 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
291 BUG_ON(found && found != mr);
293 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
294 if (mr_ret) {
295 atomic_inc(&mr->r_refcount);
296 *mr_ret = mr;
299 ret = 0;
300 out:
301 kfree(pages);
302 if (mr)
303 rds_mr_put(mr);
304 return ret;
307 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
309 struct rds_get_mr_args args;
311 if (optlen != sizeof(struct rds_get_mr_args))
312 return -EINVAL;
314 if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
315 sizeof(struct rds_get_mr_args)))
316 return -EFAULT;
318 return __rds_rdma_map(rs, &args, NULL, NULL);
321 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
323 struct rds_get_mr_for_dest_args args;
324 struct rds_get_mr_args new_args;
326 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
327 return -EINVAL;
329 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
330 sizeof(struct rds_get_mr_for_dest_args)))
331 return -EFAULT;
334 * Initially, just behave like get_mr().
335 * TODO: Implement get_mr as wrapper around this
336 * and deprecate it.
338 new_args.vec = args.vec;
339 new_args.cookie_addr = args.cookie_addr;
340 new_args.flags = args.flags;
342 return __rds_rdma_map(rs, &new_args, NULL, NULL);
346 * Free the MR indicated by the given R_Key
348 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
350 struct rds_free_mr_args args;
351 struct rds_mr *mr;
352 unsigned long flags;
354 if (optlen != sizeof(struct rds_free_mr_args))
355 return -EINVAL;
357 if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
358 sizeof(struct rds_free_mr_args)))
359 return -EFAULT;
361 /* Special case - a null cookie means flush all unused MRs */
362 if (args.cookie == 0) {
363 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
364 return -EINVAL;
365 rs->rs_transport->flush_mrs();
366 return 0;
369 /* Look up the MR given its R_key and remove it from the rbtree
370 * so nobody else finds it.
371 * This should also prevent races with rds_rdma_unuse.
373 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
374 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
375 if (mr) {
376 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
377 RB_CLEAR_NODE(&mr->r_rb_node);
378 if (args.flags & RDS_RDMA_INVALIDATE)
379 mr->r_invalidate = 1;
381 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
383 if (!mr)
384 return -EINVAL;
387 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
388 * we return. If we let rds_mr_put() do it it might not happen until
389 * someone else drops their ref.
391 rds_destroy_mr(mr);
392 rds_mr_put(mr);
393 return 0;
397 * This is called when we receive an extension header that
398 * tells us this MR was used. It allows us to implement
399 * use_once semantics
401 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
403 struct rds_mr *mr;
404 unsigned long flags;
405 int zot_me = 0;
407 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
408 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
409 if (mr && (mr->r_use_once || force)) {
410 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
411 RB_CLEAR_NODE(&mr->r_rb_node);
412 zot_me = 1;
413 } else if (mr)
414 atomic_inc(&mr->r_refcount);
415 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
417 /* May have to issue a dma_sync on this memory region.
418 * Note we could avoid this if the operation was a RDMA READ,
419 * but at this point we can't tell. */
420 if (mr != NULL) {
421 if (mr->r_trans->sync_mr)
422 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
424 /* If the MR was marked as invalidate, this will
425 * trigger an async flush. */
426 if (zot_me)
427 rds_destroy_mr(mr);
428 rds_mr_put(mr);
432 void rds_rdma_free_op(struct rds_rdma_op *ro)
434 unsigned int i;
436 for (i = 0; i < ro->r_nents; i++) {
437 struct page *page = sg_page(&ro->r_sg[i]);
439 /* Mark page dirty if it was possibly modified, which
440 * is the case for a RDMA_READ which copies from remote
441 * to local memory */
442 if (!ro->r_write) {
443 BUG_ON(in_interrupt());
444 set_page_dirty(page);
446 put_page(page);
449 kfree(ro->r_notifier);
450 kfree(ro);
454 * args is a pointer to an in-kernel copy in the sendmsg cmsg.
456 static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
457 struct rds_rdma_args *args)
459 struct rds_iovec vec;
460 struct rds_rdma_op *op = NULL;
461 unsigned int nr_pages;
462 unsigned int max_pages;
463 unsigned int nr_bytes;
464 struct page **pages = NULL;
465 struct rds_iovec __user *local_vec;
466 struct scatterlist *sg;
467 unsigned int nr;
468 unsigned int i, j;
469 int ret;
472 if (rs->rs_bound_addr == 0) {
473 ret = -ENOTCONN; /* XXX not a great errno */
474 goto out;
477 if (args->nr_local > (u64)UINT_MAX) {
478 ret = -EMSGSIZE;
479 goto out;
482 nr_pages = 0;
483 max_pages = 0;
485 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
487 /* figure out the number of pages in the vector */
488 for (i = 0; i < args->nr_local; i++) {
489 if (copy_from_user(&vec, &local_vec[i],
490 sizeof(struct rds_iovec))) {
491 ret = -EFAULT;
492 goto out;
495 nr = rds_pages_in_vec(&vec);
496 if (nr == 0) {
497 ret = -EINVAL;
498 goto out;
501 max_pages = max(nr, max_pages);
502 nr_pages += nr;
505 pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
506 if (pages == NULL) {
507 ret = -ENOMEM;
508 goto out;
511 op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL);
512 if (op == NULL) {
513 ret = -ENOMEM;
514 goto out;
517 op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
518 op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
519 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
520 op->r_recverr = rs->rs_recverr;
521 WARN_ON(!nr_pages);
522 sg_init_table(op->r_sg, nr_pages);
524 if (op->r_notify || op->r_recverr) {
525 /* We allocate an uninitialized notifier here, because
526 * we don't want to do that in the completion handler. We
527 * would have to use GFP_ATOMIC there, and don't want to deal
528 * with failed allocations.
530 op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
531 if (!op->r_notifier) {
532 ret = -ENOMEM;
533 goto out;
535 op->r_notifier->n_user_token = args->user_token;
536 op->r_notifier->n_status = RDS_RDMA_SUCCESS;
539 /* The cookie contains the R_Key of the remote memory region, and
540 * optionally an offset into it. This is how we implement RDMA into
541 * unaligned memory.
542 * When setting up the RDMA, we need to add that offset to the
543 * destination address (which is really an offset into the MR)
544 * FIXME: We may want to move this into ib_rdma.c
546 op->r_key = rds_rdma_cookie_key(args->cookie);
547 op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
549 nr_bytes = 0;
551 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
552 (unsigned long long)args->nr_local,
553 (unsigned long long)args->remote_vec.addr,
554 op->r_key);
556 for (i = 0; i < args->nr_local; i++) {
557 if (copy_from_user(&vec, &local_vec[i],
558 sizeof(struct rds_iovec))) {
559 ret = -EFAULT;
560 goto out;
563 nr = rds_pages_in_vec(&vec);
564 if (nr == 0) {
565 ret = -EINVAL;
566 goto out;
569 rs->rs_user_addr = vec.addr;
570 rs->rs_user_bytes = vec.bytes;
572 /* did the user change the vec under us? */
573 if (nr > max_pages || op->r_nents + nr > nr_pages) {
574 ret = -EINVAL;
575 goto out;
577 /* If it's a WRITE operation, we want to pin the pages for reading.
578 * If it's a READ operation, we need to pin the pages for writing.
580 ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write);
581 if (ret < 0)
582 goto out;
584 rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
585 nr_bytes, nr, vec.bytes, vec.addr);
587 nr_bytes += vec.bytes;
589 for (j = 0; j < nr; j++) {
590 unsigned int offset = vec.addr & ~PAGE_MASK;
592 sg = &op->r_sg[op->r_nents + j];
593 sg_set_page(sg, pages[j],
594 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
595 offset);
597 rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
598 sg->offset, sg->length, vec.addr, vec.bytes);
600 vec.addr += sg->length;
601 vec.bytes -= sg->length;
604 op->r_nents += nr;
608 if (nr_bytes > args->remote_vec.bytes) {
609 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
610 nr_bytes,
611 (unsigned int) args->remote_vec.bytes);
612 ret = -EINVAL;
613 goto out;
615 op->r_bytes = nr_bytes;
617 ret = 0;
618 out:
619 kfree(pages);
620 if (ret) {
621 if (op)
622 rds_rdma_free_op(op);
623 op = ERR_PTR(ret);
625 return op;
629 * The application asks for a RDMA transfer.
630 * Extract all arguments and set up the rdma_op
632 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
633 struct cmsghdr *cmsg)
635 struct rds_rdma_op *op;
637 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
638 rm->m_rdma_op != NULL)
639 return -EINVAL;
641 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
642 if (IS_ERR(op))
643 return PTR_ERR(op);
644 rds_stats_inc(s_send_rdma);
645 rm->m_rdma_op = op;
646 return 0;
650 * The application wants us to pass an RDMA destination (aka MR)
651 * to the remote
653 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
654 struct cmsghdr *cmsg)
656 unsigned long flags;
657 struct rds_mr *mr;
658 u32 r_key;
659 int err = 0;
661 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
662 rm->m_rdma_cookie != 0)
663 return -EINVAL;
665 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
667 /* We are reusing a previously mapped MR here. Most likely, the
668 * application has written to the buffer, so we need to explicitly
669 * flush those writes to RAM. Otherwise the HCA may not see them
670 * when doing a DMA from that buffer.
672 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
674 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
675 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
676 if (mr == NULL)
677 err = -EINVAL; /* invalid r_key */
678 else
679 atomic_inc(&mr->r_refcount);
680 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
682 if (mr) {
683 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
684 rm->m_rdma_mr = mr;
686 return err;
690 * The application passes us an address range it wants to enable RDMA
691 * to/from. We map the area, and save the <R_Key,offset> pair
692 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
693 * in an extension header.
695 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
696 struct cmsghdr *cmsg)
698 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
699 rm->m_rdma_cookie != 0)
700 return -EINVAL;
702 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);