2 * Copyright (c) 2007 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
42 * get the number of pages by looking at the page indices that the start and
43 * end addresses fall in.
45 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
46 * causes the address to wrap or overflows an unsigned int. This comes
47 * from being stored in the 'length' member of 'struct scatterlist'.
49 static unsigned int rds_pages_in_vec(struct rds_iovec
*vec
)
51 if ((vec
->addr
+ vec
->bytes
<= vec
->addr
) ||
52 (vec
->bytes
> (u64
)UINT_MAX
))
55 return ((vec
->addr
+ vec
->bytes
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
) -
56 (vec
->addr
>> PAGE_SHIFT
);
59 static struct rds_mr
*rds_mr_tree_walk(struct rb_root
*root
, u64 key
,
60 struct rds_mr
*insert
)
62 struct rb_node
**p
= &root
->rb_node
;
63 struct rb_node
*parent
= NULL
;
68 mr
= rb_entry(parent
, struct rds_mr
, r_rb_node
);
72 else if (key
> mr
->r_key
)
79 rb_link_node(&insert
->r_rb_node
, parent
, p
);
80 rb_insert_color(&insert
->r_rb_node
, root
);
81 atomic_inc(&insert
->r_refcount
);
87 * Destroy the transport-specific part of a MR.
89 static void rds_destroy_mr(struct rds_mr
*mr
)
91 struct rds_sock
*rs
= mr
->r_sock
;
92 void *trans_private
= NULL
;
95 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
96 mr
->r_key
, atomic_read(&mr
->r_refcount
));
98 if (test_and_set_bit(RDS_MR_DEAD
, &mr
->r_state
))
101 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
102 if (!RB_EMPTY_NODE(&mr
->r_rb_node
))
103 rb_erase(&mr
->r_rb_node
, &rs
->rs_rdma_keys
);
104 trans_private
= mr
->r_trans_private
;
105 mr
->r_trans_private
= NULL
;
106 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
109 mr
->r_trans
->free_mr(trans_private
, mr
->r_invalidate
);
112 void __rds_put_mr_final(struct rds_mr
*mr
)
119 * By the time this is called we can't have any more ioctls called on
120 * the socket so we don't need to worry about racing with others.
122 void rds_rdma_drop_keys(struct rds_sock
*rs
)
125 struct rb_node
*node
;
127 /* Release any MRs associated with this socket */
128 while ((node
= rb_first(&rs
->rs_rdma_keys
))) {
129 mr
= container_of(node
, struct rds_mr
, r_rb_node
);
130 if (mr
->r_trans
== rs
->rs_transport
)
131 mr
->r_invalidate
= 0;
135 if (rs
->rs_transport
&& rs
->rs_transport
->flush_mrs
)
136 rs
->rs_transport
->flush_mrs();
140 * Helper function to pin user pages.
142 static int rds_pin_pages(unsigned long user_addr
, unsigned int nr_pages
,
143 struct page
**pages
, int write
)
147 ret
= get_user_pages_fast(user_addr
, nr_pages
, write
, pages
);
149 if (ret
>= 0 && ret
< nr_pages
) {
151 put_page(pages
[ret
]);
158 static int __rds_rdma_map(struct rds_sock
*rs
, struct rds_get_mr_args
*args
,
159 u64
*cookie_ret
, struct rds_mr
**mr_ret
)
161 struct rds_mr
*mr
= NULL
, *found
;
162 unsigned int nr_pages
;
163 struct page
**pages
= NULL
;
164 struct scatterlist
*sg
;
167 rds_rdma_cookie_t cookie
;
172 if (rs
->rs_bound_addr
== 0) {
177 if (rs
->rs_transport
->get_mr
== NULL
) {
182 nr_pages
= rds_pages_in_vec(&args
->vec
);
188 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
189 args
->vec
.addr
, args
->vec
.bytes
, nr_pages
);
191 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL
);
197 mr
= kzalloc(sizeof(struct rds_mr
), GFP_KERNEL
);
203 atomic_set(&mr
->r_refcount
, 1);
204 RB_CLEAR_NODE(&mr
->r_rb_node
);
205 mr
->r_trans
= rs
->rs_transport
;
208 if (args
->flags
& RDS_RDMA_USE_ONCE
)
210 if (args
->flags
& RDS_RDMA_INVALIDATE
)
211 mr
->r_invalidate
= 1;
212 if (args
->flags
& RDS_RDMA_READWRITE
)
216 * Pin the pages that make up the user buffer and transfer the page
217 * pointers to the mr's sg array. We check to see if we've mapped
218 * the whole region after transferring the partial page references
219 * to the sg array so that we can have one page ref cleanup path.
221 * For now we have no flag that tells us whether the mapping is
222 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
225 ret
= rds_pin_pages(args
->vec
.addr
& PAGE_MASK
, nr_pages
, pages
, 1);
230 sg
= kcalloc(nents
, sizeof(*sg
), GFP_KERNEL
);
236 sg_init_table(sg
, nents
);
238 /* Stick all pages into the scatterlist */
239 for (i
= 0 ; i
< nents
; i
++)
240 sg_set_page(&sg
[i
], pages
[i
], PAGE_SIZE
, 0);
242 rdsdebug("RDS: trans_private nents is %u\n", nents
);
244 /* Obtain a transport specific MR. If this succeeds, the
245 * s/g list is now owned by the MR.
246 * Note that dma_map() implies that pending writes are
247 * flushed to RAM, so no dma_sync is needed here. */
248 trans_private
= rs
->rs_transport
->get_mr(sg
, nents
, rs
,
251 if (IS_ERR(trans_private
)) {
252 for (i
= 0 ; i
< nents
; i
++)
253 put_page(sg_page(&sg
[i
]));
255 ret
= PTR_ERR(trans_private
);
259 mr
->r_trans_private
= trans_private
;
261 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
262 mr
->r_key
, (void *)(unsigned long) args
->cookie_addr
);
264 /* The user may pass us an unaligned address, but we can only
265 * map page aligned regions. So we keep the offset, and build
266 * a 64bit cookie containing <R_Key, offset> and pass that
268 cookie
= rds_rdma_make_cookie(mr
->r_key
, args
->vec
.addr
& ~PAGE_MASK
);
270 *cookie_ret
= cookie
;
272 if (args
->cookie_addr
&& put_user(cookie
, (u64 __user
*)(unsigned long) args
->cookie_addr
)) {
277 /* Inserting the new MR into the rbtree bumps its
278 * reference count. */
279 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
280 found
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, mr
->r_key
, mr
);
281 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
283 BUG_ON(found
&& found
!= mr
);
285 rdsdebug("RDS: get_mr key is %x\n", mr
->r_key
);
287 atomic_inc(&mr
->r_refcount
);
299 int rds_get_mr(struct rds_sock
*rs
, char __user
*optval
, int optlen
)
301 struct rds_get_mr_args args
;
303 if (optlen
!= sizeof(struct rds_get_mr_args
))
306 if (copy_from_user(&args
, (struct rds_get_mr_args __user
*)optval
,
307 sizeof(struct rds_get_mr_args
)))
310 return __rds_rdma_map(rs
, &args
, NULL
, NULL
);
313 int rds_get_mr_for_dest(struct rds_sock
*rs
, char __user
*optval
, int optlen
)
315 struct rds_get_mr_for_dest_args args
;
316 struct rds_get_mr_args new_args
;
318 if (optlen
!= sizeof(struct rds_get_mr_for_dest_args
))
321 if (copy_from_user(&args
, (struct rds_get_mr_for_dest_args __user
*)optval
,
322 sizeof(struct rds_get_mr_for_dest_args
)))
326 * Initially, just behave like get_mr().
327 * TODO: Implement get_mr as wrapper around this
330 new_args
.vec
= args
.vec
;
331 new_args
.cookie_addr
= args
.cookie_addr
;
332 new_args
.flags
= args
.flags
;
334 return __rds_rdma_map(rs
, &new_args
, NULL
, NULL
);
338 * Free the MR indicated by the given R_Key
340 int rds_free_mr(struct rds_sock
*rs
, char __user
*optval
, int optlen
)
342 struct rds_free_mr_args args
;
346 if (optlen
!= sizeof(struct rds_free_mr_args
))
349 if (copy_from_user(&args
, (struct rds_free_mr_args __user
*)optval
,
350 sizeof(struct rds_free_mr_args
)))
353 /* Special case - a null cookie means flush all unused MRs */
354 if (args
.cookie
== 0) {
355 if (!rs
->rs_transport
|| !rs
->rs_transport
->flush_mrs
)
357 rs
->rs_transport
->flush_mrs();
361 /* Look up the MR given its R_key and remove it from the rbtree
362 * so nobody else finds it.
363 * This should also prevent races with rds_rdma_unuse.
365 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
366 mr
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, rds_rdma_cookie_key(args
.cookie
), NULL
);
368 rb_erase(&mr
->r_rb_node
, &rs
->rs_rdma_keys
);
369 RB_CLEAR_NODE(&mr
->r_rb_node
);
370 if (args
.flags
& RDS_RDMA_INVALIDATE
)
371 mr
->r_invalidate
= 1;
373 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
379 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
380 * we return. If we let rds_mr_put() do it it might not happen until
381 * someone else drops their ref.
389 * This is called when we receive an extension header that
390 * tells us this MR was used. It allows us to implement
393 void rds_rdma_unuse(struct rds_sock
*rs
, u32 r_key
, int force
)
399 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
400 mr
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, r_key
, NULL
);
401 if (mr
&& (mr
->r_use_once
|| force
)) {
402 rb_erase(&mr
->r_rb_node
, &rs
->rs_rdma_keys
);
403 RB_CLEAR_NODE(&mr
->r_rb_node
);
406 atomic_inc(&mr
->r_refcount
);
407 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
409 /* May have to issue a dma_sync on this memory region.
410 * Note we could avoid this if the operation was a RDMA READ,
411 * but at this point we can't tell. */
413 if (mr
->r_trans
->sync_mr
)
414 mr
->r_trans
->sync_mr(mr
->r_trans_private
, DMA_FROM_DEVICE
);
416 /* If the MR was marked as invalidate, this will
417 * trigger an async flush. */
424 void rds_rdma_free_op(struct rds_rdma_op
*ro
)
428 for (i
= 0; i
< ro
->r_nents
; i
++) {
429 struct page
*page
= sg_page(&ro
->r_sg
[i
]);
431 /* Mark page dirty if it was possibly modified, which
432 * is the case for a RDMA_READ which copies from remote
435 BUG_ON(in_interrupt());
436 set_page_dirty(page
);
441 kfree(ro
->r_notifier
);
446 * args is a pointer to an in-kernel copy in the sendmsg cmsg.
448 static struct rds_rdma_op
*rds_rdma_prepare(struct rds_sock
*rs
,
449 struct rds_rdma_args
*args
)
451 struct rds_iovec vec
;
452 struct rds_rdma_op
*op
= NULL
;
453 unsigned int nr_pages
;
454 unsigned int max_pages
;
455 unsigned int nr_bytes
;
456 struct page
**pages
= NULL
;
457 struct rds_iovec __user
*local_vec
;
458 struct scatterlist
*sg
;
464 if (rs
->rs_bound_addr
== 0) {
469 if (args
->nr_local
> UIO_MAXIOV
) {
477 local_vec
= (struct rds_iovec __user
*)(unsigned long) args
->local_vec_addr
;
479 /* figure out the number of pages in the vector */
480 for (i
= 0; i
< args
->nr_local
; i
++) {
481 if (copy_from_user(&vec
, &local_vec
[i
],
482 sizeof(struct rds_iovec
))) {
487 nr
= rds_pages_in_vec(&vec
);
493 max_pages
= max(nr
, max_pages
);
497 pages
= kcalloc(max_pages
, sizeof(struct page
*), GFP_KERNEL
);
503 op
= kzalloc(offsetof(struct rds_rdma_op
, r_sg
[nr_pages
]), GFP_KERNEL
);
509 op
->r_write
= !!(args
->flags
& RDS_RDMA_READWRITE
);
510 op
->r_fence
= !!(args
->flags
& RDS_RDMA_FENCE
);
511 op
->r_notify
= !!(args
->flags
& RDS_RDMA_NOTIFY_ME
);
512 op
->r_recverr
= rs
->rs_recverr
;
514 sg_init_table(op
->r_sg
, nr_pages
);
516 if (op
->r_notify
|| op
->r_recverr
) {
517 /* We allocate an uninitialized notifier here, because
518 * we don't want to do that in the completion handler. We
519 * would have to use GFP_ATOMIC there, and don't want to deal
520 * with failed allocations.
522 op
->r_notifier
= kmalloc(sizeof(struct rds_notifier
), GFP_KERNEL
);
523 if (!op
->r_notifier
) {
527 op
->r_notifier
->n_user_token
= args
->user_token
;
528 op
->r_notifier
->n_status
= RDS_RDMA_SUCCESS
;
531 op
->r_key
= rds_rdma_cookie_key(args
->cookie
);
532 op
->r_remote_addr
= args
->remote_vec
.addr
+ rds_rdma_cookie_offset(args
->cookie
);
536 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
537 (unsigned long long)args
->nr_local
,
538 (unsigned long long)args
->remote_vec
.addr
,
541 for (i
= 0; i
< args
->nr_local
; i
++) {
542 if (copy_from_user(&vec
, &local_vec
[i
],
543 sizeof(struct rds_iovec
))) {
548 nr
= rds_pages_in_vec(&vec
);
554 rs
->rs_user_addr
= vec
.addr
;
555 rs
->rs_user_bytes
= vec
.bytes
;
557 /* did the user change the vec under us? */
558 if (nr
> max_pages
|| op
->r_nents
+ nr
> nr_pages
) {
562 /* If it's a WRITE operation, we want to pin the pages for reading.
563 * If it's a READ operation, we need to pin the pages for writing.
565 ret
= rds_pin_pages(vec
.addr
& PAGE_MASK
, nr
, pages
, !op
->r_write
);
569 rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
570 nr_bytes
, nr
, vec
.bytes
, vec
.addr
);
572 nr_bytes
+= vec
.bytes
;
574 for (j
= 0; j
< nr
; j
++) {
575 unsigned int offset
= vec
.addr
& ~PAGE_MASK
;
577 sg
= &op
->r_sg
[op
->r_nents
+ j
];
578 sg_set_page(sg
, pages
[j
],
579 min_t(unsigned int, vec
.bytes
, PAGE_SIZE
- offset
),
582 rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
583 sg
->offset
, sg
->length
, vec
.addr
, vec
.bytes
);
585 vec
.addr
+= sg
->length
;
586 vec
.bytes
-= sg
->length
;
593 if (nr_bytes
> args
->remote_vec
.bytes
) {
594 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
596 (unsigned int) args
->remote_vec
.bytes
);
600 op
->r_bytes
= nr_bytes
;
607 rds_rdma_free_op(op
);
614 * The application asks for a RDMA transfer.
615 * Extract all arguments and set up the rdma_op
617 int rds_cmsg_rdma_args(struct rds_sock
*rs
, struct rds_message
*rm
,
618 struct cmsghdr
*cmsg
)
620 struct rds_rdma_op
*op
;
622 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(struct rds_rdma_args
)) ||
623 rm
->m_rdma_op
!= NULL
)
626 op
= rds_rdma_prepare(rs
, CMSG_DATA(cmsg
));
629 rds_stats_inc(s_send_rdma
);
635 * The application wants us to pass an RDMA destination (aka MR)
638 int rds_cmsg_rdma_dest(struct rds_sock
*rs
, struct rds_message
*rm
,
639 struct cmsghdr
*cmsg
)
646 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(rds_rdma_cookie_t
)) ||
647 rm
->m_rdma_cookie
!= 0)
650 memcpy(&rm
->m_rdma_cookie
, CMSG_DATA(cmsg
), sizeof(rm
->m_rdma_cookie
));
652 /* We are reusing a previously mapped MR here. Most likely, the
653 * application has written to the buffer, so we need to explicitly
654 * flush those writes to RAM. Otherwise the HCA may not see them
655 * when doing a DMA from that buffer.
657 r_key
= rds_rdma_cookie_key(rm
->m_rdma_cookie
);
659 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
660 mr
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, r_key
, NULL
);
662 err
= -EINVAL
; /* invalid r_key */
664 atomic_inc(&mr
->r_refcount
);
665 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
668 mr
->r_trans
->sync_mr(mr
->r_trans_private
, DMA_TO_DEVICE
);
675 * The application passes us an address range it wants to enable RDMA
676 * to/from. We map the area, and save the <R_Key,offset> pair
677 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
678 * in an extension header.
680 int rds_cmsg_rdma_map(struct rds_sock
*rs
, struct rds_message
*rm
,
681 struct cmsghdr
*cmsg
)
683 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(struct rds_get_mr_args
)) ||
684 rm
->m_rdma_cookie
!= 0)
687 return __rds_rdma_map(rs
, CMSG_DATA(cmsg
), &rm
->m_rdma_cookie
, &rm
->m_rdma_mr
);