RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / rds / rdma.c
blobc4c3c06bcfdbe3c14749f17d9b4db971beb69a22
1 /*
2 * Copyright (c) 2007 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
38 #include "rdma.h"
42 * get the number of pages by looking at the page indices that the start and
43 * end addresses fall in.
45 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
46 * causes the address to wrap or overflows an unsigned int. This comes
47 * from being stored in the 'length' member of 'struct scatterlist'.
49 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
51 if ((vec->addr + vec->bytes <= vec->addr) ||
52 (vec->bytes > (u64)UINT_MAX))
53 return 0;
55 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
56 (vec->addr >> PAGE_SHIFT);
59 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
60 struct rds_mr *insert)
62 struct rb_node **p = &root->rb_node;
63 struct rb_node *parent = NULL;
64 struct rds_mr *mr;
66 while (*p) {
67 parent = *p;
68 mr = rb_entry(parent, struct rds_mr, r_rb_node);
70 if (key < mr->r_key)
71 p = &(*p)->rb_left;
72 else if (key > mr->r_key)
73 p = &(*p)->rb_right;
74 else
75 return mr;
78 if (insert) {
79 rb_link_node(&insert->r_rb_node, parent, p);
80 rb_insert_color(&insert->r_rb_node, root);
81 atomic_inc(&insert->r_refcount);
83 return NULL;
87 * Destroy the transport-specific part of a MR.
89 static void rds_destroy_mr(struct rds_mr *mr)
91 struct rds_sock *rs = mr->r_sock;
92 void *trans_private = NULL;
93 unsigned long flags;
95 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
96 mr->r_key, atomic_read(&mr->r_refcount));
98 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
99 return;
101 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
102 if (!RB_EMPTY_NODE(&mr->r_rb_node))
103 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
104 trans_private = mr->r_trans_private;
105 mr->r_trans_private = NULL;
106 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
108 if (trans_private)
109 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
112 void __rds_put_mr_final(struct rds_mr *mr)
114 rds_destroy_mr(mr);
115 kfree(mr);
119 * By the time this is called we can't have any more ioctls called on
120 * the socket so we don't need to worry about racing with others.
122 void rds_rdma_drop_keys(struct rds_sock *rs)
124 struct rds_mr *mr;
125 struct rb_node *node;
127 /* Release any MRs associated with this socket */
128 while ((node = rb_first(&rs->rs_rdma_keys))) {
129 mr = container_of(node, struct rds_mr, r_rb_node);
130 if (mr->r_trans == rs->rs_transport)
131 mr->r_invalidate = 0;
132 rds_mr_put(mr);
135 if (rs->rs_transport && rs->rs_transport->flush_mrs)
136 rs->rs_transport->flush_mrs();
140 * Helper function to pin user pages.
142 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
143 struct page **pages, int write)
145 int ret;
147 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
149 if (ret >= 0 && ret < nr_pages) {
150 while (ret--)
151 put_page(pages[ret]);
152 ret = -EFAULT;
155 return ret;
158 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
159 u64 *cookie_ret, struct rds_mr **mr_ret)
161 struct rds_mr *mr = NULL, *found;
162 unsigned int nr_pages;
163 struct page **pages = NULL;
164 struct scatterlist *sg;
165 void *trans_private;
166 unsigned long flags;
167 rds_rdma_cookie_t cookie;
168 unsigned int nents;
169 long i;
170 int ret;
172 if (rs->rs_bound_addr == 0) {
173 ret = -ENOTCONN;
174 goto out;
177 if (rs->rs_transport->get_mr == NULL) {
178 ret = -EOPNOTSUPP;
179 goto out;
182 nr_pages = rds_pages_in_vec(&args->vec);
183 if (nr_pages == 0) {
184 ret = -EINVAL;
185 goto out;
188 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
189 args->vec.addr, args->vec.bytes, nr_pages);
191 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
192 if (pages == NULL) {
193 ret = -ENOMEM;
194 goto out;
197 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
198 if (mr == NULL) {
199 ret = -ENOMEM;
200 goto out;
203 atomic_set(&mr->r_refcount, 1);
204 RB_CLEAR_NODE(&mr->r_rb_node);
205 mr->r_trans = rs->rs_transport;
206 mr->r_sock = rs;
208 if (args->flags & RDS_RDMA_USE_ONCE)
209 mr->r_use_once = 1;
210 if (args->flags & RDS_RDMA_INVALIDATE)
211 mr->r_invalidate = 1;
212 if (args->flags & RDS_RDMA_READWRITE)
213 mr->r_write = 1;
216 * Pin the pages that make up the user buffer and transfer the page
217 * pointers to the mr's sg array. We check to see if we've mapped
218 * the whole region after transferring the partial page references
219 * to the sg array so that we can have one page ref cleanup path.
221 * For now we have no flag that tells us whether the mapping is
222 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
223 * the zero page.
225 ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1);
226 if (ret < 0)
227 goto out;
229 nents = ret;
230 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
231 if (sg == NULL) {
232 ret = -ENOMEM;
233 goto out;
235 WARN_ON(!nents);
236 sg_init_table(sg, nents);
238 /* Stick all pages into the scatterlist */
239 for (i = 0 ; i < nents; i++)
240 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
242 rdsdebug("RDS: trans_private nents is %u\n", nents);
244 /* Obtain a transport specific MR. If this succeeds, the
245 * s/g list is now owned by the MR.
246 * Note that dma_map() implies that pending writes are
247 * flushed to RAM, so no dma_sync is needed here. */
248 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
249 &mr->r_key);
251 if (IS_ERR(trans_private)) {
252 for (i = 0 ; i < nents; i++)
253 put_page(sg_page(&sg[i]));
254 kfree(sg);
255 ret = PTR_ERR(trans_private);
256 goto out;
259 mr->r_trans_private = trans_private;
261 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
262 mr->r_key, (void *)(unsigned long) args->cookie_addr);
264 /* The user may pass us an unaligned address, but we can only
265 * map page aligned regions. So we keep the offset, and build
266 * a 64bit cookie containing <R_Key, offset> and pass that
267 * around. */
268 cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
269 if (cookie_ret)
270 *cookie_ret = cookie;
272 if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
273 ret = -EFAULT;
274 goto out;
277 /* Inserting the new MR into the rbtree bumps its
278 * reference count. */
279 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
280 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
281 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
283 BUG_ON(found && found != mr);
285 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
286 if (mr_ret) {
287 atomic_inc(&mr->r_refcount);
288 *mr_ret = mr;
291 ret = 0;
292 out:
293 kfree(pages);
294 if (mr)
295 rds_mr_put(mr);
296 return ret;
299 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
301 struct rds_get_mr_args args;
303 if (optlen != sizeof(struct rds_get_mr_args))
304 return -EINVAL;
306 if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
307 sizeof(struct rds_get_mr_args)))
308 return -EFAULT;
310 return __rds_rdma_map(rs, &args, NULL, NULL);
313 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
315 struct rds_get_mr_for_dest_args args;
316 struct rds_get_mr_args new_args;
318 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
319 return -EINVAL;
321 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
322 sizeof(struct rds_get_mr_for_dest_args)))
323 return -EFAULT;
326 * Initially, just behave like get_mr().
327 * TODO: Implement get_mr as wrapper around this
328 * and deprecate it.
330 new_args.vec = args.vec;
331 new_args.cookie_addr = args.cookie_addr;
332 new_args.flags = args.flags;
334 return __rds_rdma_map(rs, &new_args, NULL, NULL);
338 * Free the MR indicated by the given R_Key
340 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
342 struct rds_free_mr_args args;
343 struct rds_mr *mr;
344 unsigned long flags;
346 if (optlen != sizeof(struct rds_free_mr_args))
347 return -EINVAL;
349 if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
350 sizeof(struct rds_free_mr_args)))
351 return -EFAULT;
353 /* Special case - a null cookie means flush all unused MRs */
354 if (args.cookie == 0) {
355 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
356 return -EINVAL;
357 rs->rs_transport->flush_mrs();
358 return 0;
361 /* Look up the MR given its R_key and remove it from the rbtree
362 * so nobody else finds it.
363 * This should also prevent races with rds_rdma_unuse.
365 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
366 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
367 if (mr) {
368 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
369 RB_CLEAR_NODE(&mr->r_rb_node);
370 if (args.flags & RDS_RDMA_INVALIDATE)
371 mr->r_invalidate = 1;
373 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
375 if (!mr)
376 return -EINVAL;
379 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
380 * we return. If we let rds_mr_put() do it it might not happen until
381 * someone else drops their ref.
383 rds_destroy_mr(mr);
384 rds_mr_put(mr);
385 return 0;
389 * This is called when we receive an extension header that
390 * tells us this MR was used. It allows us to implement
391 * use_once semantics
393 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
395 struct rds_mr *mr;
396 unsigned long flags;
397 int zot_me = 0;
399 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
400 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
401 if (mr && (mr->r_use_once || force)) {
402 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
403 RB_CLEAR_NODE(&mr->r_rb_node);
404 zot_me = 1;
405 } else if (mr)
406 atomic_inc(&mr->r_refcount);
407 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
409 /* May have to issue a dma_sync on this memory region.
410 * Note we could avoid this if the operation was a RDMA READ,
411 * but at this point we can't tell. */
412 if (mr != NULL) {
413 if (mr->r_trans->sync_mr)
414 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
416 /* If the MR was marked as invalidate, this will
417 * trigger an async flush. */
418 if (zot_me)
419 rds_destroy_mr(mr);
420 rds_mr_put(mr);
424 void rds_rdma_free_op(struct rds_rdma_op *ro)
426 unsigned int i;
428 for (i = 0; i < ro->r_nents; i++) {
429 struct page *page = sg_page(&ro->r_sg[i]);
431 /* Mark page dirty if it was possibly modified, which
432 * is the case for a RDMA_READ which copies from remote
433 * to local memory */
434 if (!ro->r_write) {
435 BUG_ON(in_interrupt());
436 set_page_dirty(page);
438 put_page(page);
441 kfree(ro->r_notifier);
442 kfree(ro);
446 * args is a pointer to an in-kernel copy in the sendmsg cmsg.
448 static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
449 struct rds_rdma_args *args)
451 struct rds_iovec vec;
452 struct rds_rdma_op *op = NULL;
453 unsigned int nr_pages;
454 unsigned int max_pages;
455 unsigned int nr_bytes;
456 struct page **pages = NULL;
457 struct rds_iovec __user *local_vec;
458 struct scatterlist *sg;
459 unsigned int nr;
460 unsigned int i, j;
461 int ret;
464 if (rs->rs_bound_addr == 0) {
465 ret = -ENOTCONN;
466 goto out;
469 if (args->nr_local > UIO_MAXIOV) {
470 ret = -EMSGSIZE;
471 goto out;
474 nr_pages = 0;
475 max_pages = 0;
477 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
479 /* figure out the number of pages in the vector */
480 for (i = 0; i < args->nr_local; i++) {
481 if (copy_from_user(&vec, &local_vec[i],
482 sizeof(struct rds_iovec))) {
483 ret = -EFAULT;
484 goto out;
487 nr = rds_pages_in_vec(&vec);
488 if (nr == 0) {
489 ret = -EINVAL;
490 goto out;
493 max_pages = max(nr, max_pages);
494 nr_pages += nr;
497 pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
498 if (pages == NULL) {
499 ret = -ENOMEM;
500 goto out;
503 op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL);
504 if (op == NULL) {
505 ret = -ENOMEM;
506 goto out;
509 op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
510 op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
511 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
512 op->r_recverr = rs->rs_recverr;
513 WARN_ON(!nr_pages);
514 sg_init_table(op->r_sg, nr_pages);
516 if (op->r_notify || op->r_recverr) {
517 /* We allocate an uninitialized notifier here, because
518 * we don't want to do that in the completion handler. We
519 * would have to use GFP_ATOMIC there, and don't want to deal
520 * with failed allocations.
522 op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
523 if (!op->r_notifier) {
524 ret = -ENOMEM;
525 goto out;
527 op->r_notifier->n_user_token = args->user_token;
528 op->r_notifier->n_status = RDS_RDMA_SUCCESS;
531 op->r_key = rds_rdma_cookie_key(args->cookie);
532 op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
534 nr_bytes = 0;
536 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
537 (unsigned long long)args->nr_local,
538 (unsigned long long)args->remote_vec.addr,
539 op->r_key);
541 for (i = 0; i < args->nr_local; i++) {
542 if (copy_from_user(&vec, &local_vec[i],
543 sizeof(struct rds_iovec))) {
544 ret = -EFAULT;
545 goto out;
548 nr = rds_pages_in_vec(&vec);
549 if (nr == 0) {
550 ret = -EINVAL;
551 goto out;
554 rs->rs_user_addr = vec.addr;
555 rs->rs_user_bytes = vec.bytes;
557 /* did the user change the vec under us? */
558 if (nr > max_pages || op->r_nents + nr > nr_pages) {
559 ret = -EINVAL;
560 goto out;
562 /* If it's a WRITE operation, we want to pin the pages for reading.
563 * If it's a READ operation, we need to pin the pages for writing.
565 ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write);
566 if (ret < 0)
567 goto out;
569 rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
570 nr_bytes, nr, vec.bytes, vec.addr);
572 nr_bytes += vec.bytes;
574 for (j = 0; j < nr; j++) {
575 unsigned int offset = vec.addr & ~PAGE_MASK;
577 sg = &op->r_sg[op->r_nents + j];
578 sg_set_page(sg, pages[j],
579 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
580 offset);
582 rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
583 sg->offset, sg->length, vec.addr, vec.bytes);
585 vec.addr += sg->length;
586 vec.bytes -= sg->length;
589 op->r_nents += nr;
593 if (nr_bytes > args->remote_vec.bytes) {
594 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
595 nr_bytes,
596 (unsigned int) args->remote_vec.bytes);
597 ret = -EINVAL;
598 goto out;
600 op->r_bytes = nr_bytes;
602 ret = 0;
603 out:
604 kfree(pages);
605 if (ret) {
606 if (op)
607 rds_rdma_free_op(op);
608 op = ERR_PTR(ret);
610 return op;
614 * The application asks for a RDMA transfer.
615 * Extract all arguments and set up the rdma_op
617 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
618 struct cmsghdr *cmsg)
620 struct rds_rdma_op *op;
622 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
623 rm->m_rdma_op != NULL)
624 return -EINVAL;
626 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
627 if (IS_ERR(op))
628 return PTR_ERR(op);
629 rds_stats_inc(s_send_rdma);
630 rm->m_rdma_op = op;
631 return 0;
635 * The application wants us to pass an RDMA destination (aka MR)
636 * to the remote
638 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
639 struct cmsghdr *cmsg)
641 unsigned long flags;
642 struct rds_mr *mr;
643 u32 r_key;
644 int err = 0;
646 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
647 rm->m_rdma_cookie != 0)
648 return -EINVAL;
650 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
652 /* We are reusing a previously mapped MR here. Most likely, the
653 * application has written to the buffer, so we need to explicitly
654 * flush those writes to RAM. Otherwise the HCA may not see them
655 * when doing a DMA from that buffer.
657 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
659 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
660 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
661 if (mr == NULL)
662 err = -EINVAL; /* invalid r_key */
663 else
664 atomic_inc(&mr->r_refcount);
665 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
667 if (mr) {
668 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
669 rm->m_rdma_mr = mr;
671 return err;
675 * The application passes us an address range it wants to enable RDMA
676 * to/from. We map the area, and save the <R_Key,offset> pair
677 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
678 * in an extension header.
680 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
681 struct cmsghdr *cmsg)
683 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
684 rm->m_rdma_cookie != 0)
685 return -EINVAL;
687 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);