2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
42 * This is stored as mr->r_trans_private.
45 struct rds_iw_device
*device
;
46 struct rds_iw_mr_pool
*pool
;
47 struct rdma_cm_id
*cm_id
;
50 struct ib_fast_reg_page_list
*page_list
;
52 struct rds_iw_mapping mapping
;
53 unsigned char remap_count
;
57 * Our own little MR pool
59 struct rds_iw_mr_pool
{
60 struct rds_iw_device
*device
; /* back ptr to the device that owns us */
62 struct mutex flush_lock
; /* serialize fmr invalidate */
63 struct work_struct flush_worker
; /* flush worker */
65 spinlock_t list_lock
; /* protect variables below */
66 atomic_t item_count
; /* total # of MRs */
67 atomic_t dirty_count
; /* # dirty of MRs */
68 struct list_head dirty_list
; /* dirty mappings */
69 struct list_head clean_list
; /* unused & unamapped MRs */
70 atomic_t free_pinned
; /* memory pinned by free MRs */
71 unsigned long max_message_size
; /* in pages */
72 unsigned long max_items
;
73 unsigned long max_items_soft
;
74 unsigned long max_free_pinned
;
78 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool
*pool
, int free_all
);
79 static void rds_iw_mr_pool_flush_worker(struct work_struct
*work
);
80 static int rds_iw_init_fastreg(struct rds_iw_mr_pool
*pool
, struct rds_iw_mr
*ibmr
);
81 static int rds_iw_map_fastreg(struct rds_iw_mr_pool
*pool
,
82 struct rds_iw_mr
*ibmr
,
83 struct scatterlist
*sg
, unsigned int nents
);
84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool
*pool
, struct rds_iw_mr
*ibmr
);
85 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool
*pool
,
86 struct list_head
*unmap_list
,
87 struct list_head
*kill_list
);
88 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool
*pool
, struct rds_iw_mr
*ibmr
);
90 static int rds_iw_get_device(struct rds_sock
*rs
, struct rds_iw_device
**rds_iwdev
, struct rdma_cm_id
**cm_id
)
92 struct rds_iw_device
*iwdev
;
93 struct rds_iw_cm_id
*i_cm_id
;
98 list_for_each_entry(iwdev
, &rds_iw_devices
, list
) {
99 spin_lock_irq(&iwdev
->spinlock
);
100 list_for_each_entry(i_cm_id
, &iwdev
->cm_id_list
, list
) {
101 struct sockaddr_in
*src_addr
, *dst_addr
;
103 src_addr
= (struct sockaddr_in
*)&i_cm_id
->cm_id
->route
.addr
.src_addr
;
104 dst_addr
= (struct sockaddr_in
*)&i_cm_id
->cm_id
->route
.addr
.dst_addr
;
106 rdsdebug("local ipaddr = %x port %d, "
107 "remote ipaddr = %x port %d"
108 "..looking for %x port %d, "
109 "remote ipaddr = %x port %d\n",
110 src_addr
->sin_addr
.s_addr
,
112 dst_addr
->sin_addr
.s_addr
,
118 #ifdef WORKING_TUPLE_DETECTION
119 if (src_addr
->sin_addr
.s_addr
== rs
->rs_bound_addr
&&
120 src_addr
->sin_port
== rs
->rs_bound_port
&&
121 dst_addr
->sin_addr
.s_addr
== rs
->rs_conn_addr
&&
122 dst_addr
->sin_port
== rs
->rs_conn_port
) {
124 if (src_addr
->sin_addr
.s_addr
== rs
->rs_bound_addr
) {
126 spin_unlock_irq(&iwdev
->spinlock
);
128 *cm_id
= i_cm_id
->cm_id
;
132 spin_unlock_irq(&iwdev
->spinlock
);
138 static int rds_iw_add_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
)
140 struct rds_iw_cm_id
*i_cm_id
;
142 i_cm_id
= kmalloc(sizeof *i_cm_id
, GFP_KERNEL
);
146 i_cm_id
->cm_id
= cm_id
;
148 spin_lock_irq(&rds_iwdev
->spinlock
);
149 list_add_tail(&i_cm_id
->list
, &rds_iwdev
->cm_id_list
);
150 spin_unlock_irq(&rds_iwdev
->spinlock
);
155 void rds_iw_remove_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
)
157 struct rds_iw_cm_id
*i_cm_id
;
159 spin_lock_irq(&rds_iwdev
->spinlock
);
160 list_for_each_entry(i_cm_id
, &rds_iwdev
->cm_id_list
, list
) {
161 if (i_cm_id
->cm_id
== cm_id
) {
162 list_del(&i_cm_id
->list
);
167 spin_unlock_irq(&rds_iwdev
->spinlock
);
171 int rds_iw_update_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
)
173 struct sockaddr_in
*src_addr
, *dst_addr
;
174 struct rds_iw_device
*rds_iwdev_old
;
176 struct rdma_cm_id
*pcm_id
;
179 src_addr
= (struct sockaddr_in
*)&cm_id
->route
.addr
.src_addr
;
180 dst_addr
= (struct sockaddr_in
*)&cm_id
->route
.addr
.dst_addr
;
182 rs
.rs_bound_addr
= src_addr
->sin_addr
.s_addr
;
183 rs
.rs_bound_port
= src_addr
->sin_port
;
184 rs
.rs_conn_addr
= dst_addr
->sin_addr
.s_addr
;
185 rs
.rs_conn_port
= dst_addr
->sin_port
;
187 rc
= rds_iw_get_device(&rs
, &rds_iwdev_old
, &pcm_id
);
189 rds_iw_remove_cm_id(rds_iwdev
, cm_id
);
191 return rds_iw_add_cm_id(rds_iwdev
, cm_id
);
194 void rds_iw_add_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
)
196 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
198 /* conn was previously on the nodev_conns_list */
199 spin_lock_irq(&iw_nodev_conns_lock
);
200 BUG_ON(list_empty(&iw_nodev_conns
));
201 BUG_ON(list_empty(&ic
->iw_node
));
202 list_del(&ic
->iw_node
);
204 spin_lock_irq(&rds_iwdev
->spinlock
);
205 list_add_tail(&ic
->iw_node
, &rds_iwdev
->conn_list
);
206 spin_unlock_irq(&rds_iwdev
->spinlock
);
207 spin_unlock_irq(&iw_nodev_conns_lock
);
209 ic
->rds_iwdev
= rds_iwdev
;
212 void rds_iw_remove_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
)
214 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
216 /* place conn on nodev_conns_list */
217 spin_lock(&iw_nodev_conns_lock
);
219 spin_lock_irq(&rds_iwdev
->spinlock
);
220 BUG_ON(list_empty(&ic
->iw_node
));
221 list_del(&ic
->iw_node
);
222 spin_unlock_irq(&rds_iwdev
->spinlock
);
224 list_add_tail(&ic
->iw_node
, &iw_nodev_conns
);
226 spin_unlock(&iw_nodev_conns_lock
);
228 rds_iw_remove_cm_id(ic
->rds_iwdev
, ic
->i_cm_id
);
229 ic
->rds_iwdev
= NULL
;
232 void __rds_iw_destroy_conns(struct list_head
*list
, spinlock_t
*list_lock
)
234 struct rds_iw_connection
*ic
, *_ic
;
237 /* avoid calling conn_destroy with irqs off */
238 spin_lock_irq(list_lock
);
239 list_splice(list
, &tmp_list
);
240 INIT_LIST_HEAD(list
);
241 spin_unlock_irq(list_lock
);
243 list_for_each_entry_safe(ic
, _ic
, &tmp_list
, iw_node
)
244 rds_conn_destroy(ic
->conn
);
247 static void rds_iw_set_scatterlist(struct rds_iw_scatterlist
*sg
,
248 struct scatterlist
*list
, unsigned int sg_len
)
257 static u64
*rds_iw_map_scatterlist(struct rds_iw_device
*rds_iwdev
,
258 struct rds_iw_scatterlist
*sg
)
260 struct ib_device
*dev
= rds_iwdev
->dev
;
261 u64
*dma_pages
= NULL
;
264 WARN_ON(sg
->dma_len
);
266 sg
->dma_len
= ib_dma_map_sg(dev
, sg
->list
, sg
->len
, DMA_BIDIRECTIONAL
);
267 if (unlikely(!sg
->dma_len
)) {
268 printk(KERN_WARNING
"RDS/IW: dma_map_sg failed!\n");
269 return ERR_PTR(-EBUSY
);
276 for (i
= 0; i
< sg
->dma_len
; ++i
) {
277 unsigned int dma_len
= ib_sg_dma_len(dev
, &sg
->list
[i
]);
278 u64 dma_addr
= ib_sg_dma_address(dev
, &sg
->list
[i
]);
281 sg
->bytes
+= dma_len
;
283 end_addr
= dma_addr
+ dma_len
;
284 if (dma_addr
& PAGE_MASK
) {
287 dma_addr
&= ~PAGE_MASK
;
289 if (end_addr
& PAGE_MASK
) {
290 if (i
< sg
->dma_len
- 1)
292 end_addr
= (end_addr
+ PAGE_MASK
) & ~PAGE_MASK
;
295 sg
->dma_npages
+= (end_addr
- dma_addr
) >> PAGE_SHIFT
;
298 /* Now gather the dma addrs into one list */
299 if (sg
->dma_npages
> fastreg_message_size
)
302 dma_pages
= kmalloc(sizeof(u64
) * sg
->dma_npages
, GFP_ATOMIC
);
308 for (i
= j
= 0; i
< sg
->dma_len
; ++i
) {
309 unsigned int dma_len
= ib_sg_dma_len(dev
, &sg
->list
[i
]);
310 u64 dma_addr
= ib_sg_dma_address(dev
, &sg
->list
[i
]);
313 end_addr
= dma_addr
+ dma_len
;
314 dma_addr
&= ~PAGE_MASK
;
315 for (; dma_addr
< end_addr
; dma_addr
+= PAGE_SIZE
)
316 dma_pages
[j
++] = dma_addr
;
317 BUG_ON(j
> sg
->dma_npages
);
323 ib_dma_unmap_sg(rds_iwdev
->dev
, sg
->list
, sg
->len
, DMA_BIDIRECTIONAL
);
330 struct rds_iw_mr_pool
*rds_iw_create_mr_pool(struct rds_iw_device
*rds_iwdev
)
332 struct rds_iw_mr_pool
*pool
;
334 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
336 printk(KERN_WARNING
"RDS/IW: rds_iw_create_mr_pool alloc error\n");
337 return ERR_PTR(-ENOMEM
);
340 pool
->device
= rds_iwdev
;
341 INIT_LIST_HEAD(&pool
->dirty_list
);
342 INIT_LIST_HEAD(&pool
->clean_list
);
343 mutex_init(&pool
->flush_lock
);
344 spin_lock_init(&pool
->list_lock
);
345 INIT_WORK(&pool
->flush_worker
, rds_iw_mr_pool_flush_worker
);
347 pool
->max_message_size
= fastreg_message_size
;
348 pool
->max_items
= fastreg_pool_size
;
349 pool
->max_free_pinned
= pool
->max_items
* pool
->max_message_size
/ 4;
350 pool
->max_pages
= fastreg_message_size
;
352 /* We never allow more than max_items MRs to be allocated.
353 * When we exceed more than max_items_soft, we start freeing
354 * items more aggressively.
355 * Make sure that max_items > max_items_soft > max_items / 2
357 pool
->max_items_soft
= pool
->max_items
* 3 / 4;
362 void rds_iw_get_mr_info(struct rds_iw_device
*rds_iwdev
, struct rds_info_rdma_connection
*iinfo
)
364 struct rds_iw_mr_pool
*pool
= rds_iwdev
->mr_pool
;
366 iinfo
->rdma_mr_max
= pool
->max_items
;
367 iinfo
->rdma_mr_size
= pool
->max_pages
;
370 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool
*pool
)
372 flush_workqueue(rds_wq
);
373 rds_iw_flush_mr_pool(pool
, 1);
374 BUG_ON(atomic_read(&pool
->item_count
));
375 BUG_ON(atomic_read(&pool
->free_pinned
));
379 static inline struct rds_iw_mr
*rds_iw_reuse_fmr(struct rds_iw_mr_pool
*pool
)
381 struct rds_iw_mr
*ibmr
= NULL
;
384 spin_lock_irqsave(&pool
->list_lock
, flags
);
385 if (!list_empty(&pool
->clean_list
)) {
386 ibmr
= list_entry(pool
->clean_list
.next
, struct rds_iw_mr
, mapping
.m_list
);
387 list_del_init(&ibmr
->mapping
.m_list
);
389 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
394 static struct rds_iw_mr
*rds_iw_alloc_mr(struct rds_iw_device
*rds_iwdev
)
396 struct rds_iw_mr_pool
*pool
= rds_iwdev
->mr_pool
;
397 struct rds_iw_mr
*ibmr
= NULL
;
398 int err
= 0, iter
= 0;
401 ibmr
= rds_iw_reuse_fmr(pool
);
405 /* No clean MRs - now we have the choice of either
406 * allocating a fresh MR up to the limit imposed by the
407 * driver, or flush any dirty unused MRs.
408 * We try to avoid stalling in the send path if possible,
409 * so we allocate as long as we're allowed to.
411 * We're fussy with enforcing the FMR limit, though. If the driver
412 * tells us we can't use more than N fmrs, we shouldn't start
414 if (atomic_inc_return(&pool
->item_count
) <= pool
->max_items
)
417 atomic_dec(&pool
->item_count
);
420 rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted
);
421 return ERR_PTR(-EAGAIN
);
424 /* We do have some empty MRs. Flush them out. */
425 rds_iw_stats_inc(s_iw_rdma_mr_pool_wait
);
426 rds_iw_flush_mr_pool(pool
, 0);
429 ibmr
= kzalloc(sizeof(*ibmr
), GFP_KERNEL
);
435 spin_lock_init(&ibmr
->mapping
.m_lock
);
436 INIT_LIST_HEAD(&ibmr
->mapping
.m_list
);
437 ibmr
->mapping
.m_mr
= ibmr
;
439 err
= rds_iw_init_fastreg(pool
, ibmr
);
443 rds_iw_stats_inc(s_iw_rdma_mr_alloc
);
448 rds_iw_destroy_fastreg(pool
, ibmr
);
451 atomic_dec(&pool
->item_count
);
455 void rds_iw_sync_mr(void *trans_private
, int direction
)
457 struct rds_iw_mr
*ibmr
= trans_private
;
458 struct rds_iw_device
*rds_iwdev
= ibmr
->device
;
461 case DMA_FROM_DEVICE
:
462 ib_dma_sync_sg_for_cpu(rds_iwdev
->dev
, ibmr
->mapping
.m_sg
.list
,
463 ibmr
->mapping
.m_sg
.dma_len
, DMA_BIDIRECTIONAL
);
466 ib_dma_sync_sg_for_device(rds_iwdev
->dev
, ibmr
->mapping
.m_sg
.list
,
467 ibmr
->mapping
.m_sg
.dma_len
, DMA_BIDIRECTIONAL
);
472 static inline unsigned int rds_iw_flush_goal(struct rds_iw_mr_pool
*pool
, int free_all
)
474 unsigned int item_count
;
476 item_count
= atomic_read(&pool
->item_count
);
484 * Flush our pool of MRs.
485 * At a minimum, all currently unused MRs are unmapped.
486 * If the number of MRs allocated exceeds the limit, we also try
487 * to free as many MRs as needed to get back to this limit.
489 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool
*pool
, int free_all
)
491 struct rds_iw_mr
*ibmr
, *next
;
492 LIST_HEAD(unmap_list
);
493 LIST_HEAD(kill_list
);
495 unsigned int nfreed
= 0, ncleaned
= 0, free_goal
;
498 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush
);
500 mutex_lock(&pool
->flush_lock
);
502 spin_lock_irqsave(&pool
->list_lock
, flags
);
503 /* Get the list of all mappings to be destroyed */
504 list_splice_init(&pool
->dirty_list
, &unmap_list
);
506 list_splice_init(&pool
->clean_list
, &kill_list
);
507 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
509 free_goal
= rds_iw_flush_goal(pool
, free_all
);
511 /* Batched invalidate of dirty MRs.
512 * For FMR based MRs, the mappings on the unmap list are
513 * actually members of an ibmr (ibmr->mapping). They either
514 * migrate to the kill_list, or have been cleaned and should be
515 * moved to the clean_list.
516 * For fastregs, they will be dynamically allocated, and
517 * will be destroyed by the unmap function.
519 if (!list_empty(&unmap_list
)) {
520 ncleaned
= rds_iw_unmap_fastreg_list(pool
, &unmap_list
, &kill_list
);
521 /* If we've been asked to destroy all MRs, move those
522 * that were simply cleaned to the kill list */
524 list_splice_init(&unmap_list
, &kill_list
);
527 /* Destroy any MRs that are past their best before date */
528 list_for_each_entry_safe(ibmr
, next
, &kill_list
, mapping
.m_list
) {
529 rds_iw_stats_inc(s_iw_rdma_mr_free
);
530 list_del(&ibmr
->mapping
.m_list
);
531 rds_iw_destroy_fastreg(pool
, ibmr
);
536 /* Anything that remains are laundered ibmrs, which we can add
537 * back to the clean list. */
538 if (!list_empty(&unmap_list
)) {
539 spin_lock_irqsave(&pool
->list_lock
, flags
);
540 list_splice(&unmap_list
, &pool
->clean_list
);
541 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
544 atomic_sub(ncleaned
, &pool
->dirty_count
);
545 atomic_sub(nfreed
, &pool
->item_count
);
547 mutex_unlock(&pool
->flush_lock
);
551 static void rds_iw_mr_pool_flush_worker(struct work_struct
*work
)
553 struct rds_iw_mr_pool
*pool
= container_of(work
, struct rds_iw_mr_pool
, flush_worker
);
555 rds_iw_flush_mr_pool(pool
, 0);
558 void rds_iw_free_mr(void *trans_private
, int invalidate
)
560 struct rds_iw_mr
*ibmr
= trans_private
;
561 struct rds_iw_mr_pool
*pool
= ibmr
->device
->mr_pool
;
563 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr
->mapping
.m_sg
.len
);
567 /* Return it to the pool's free list */
568 rds_iw_free_fastreg(pool
, ibmr
);
570 /* If we've pinned too many pages, request a flush */
571 if (atomic_read(&pool
->free_pinned
) >= pool
->max_free_pinned
||
572 atomic_read(&pool
->dirty_count
) >= pool
->max_items
/ 10)
573 queue_work(rds_wq
, &pool
->flush_worker
);
576 if (likely(!in_interrupt())) {
577 rds_iw_flush_mr_pool(pool
, 0);
579 /* We get here if the user created a MR marked
580 * as use_once and invalidate at the same time. */
581 queue_work(rds_wq
, &pool
->flush_worker
);
586 void rds_iw_flush_mrs(void)
588 struct rds_iw_device
*rds_iwdev
;
590 list_for_each_entry(rds_iwdev
, &rds_iw_devices
, list
) {
591 struct rds_iw_mr_pool
*pool
= rds_iwdev
->mr_pool
;
594 rds_iw_flush_mr_pool(pool
, 0);
598 void *rds_iw_get_mr(struct scatterlist
*sg
, unsigned long nents
,
599 struct rds_sock
*rs
, u32
*key_ret
)
601 struct rds_iw_device
*rds_iwdev
;
602 struct rds_iw_mr
*ibmr
= NULL
;
603 struct rdma_cm_id
*cm_id
;
606 ret
= rds_iw_get_device(rs
, &rds_iwdev
, &cm_id
);
612 if (!rds_iwdev
->mr_pool
) {
617 ibmr
= rds_iw_alloc_mr(rds_iwdev
);
622 ibmr
->device
= rds_iwdev
;
624 ret
= rds_iw_map_fastreg(rds_iwdev
->mr_pool
, ibmr
, sg
, nents
);
626 *key_ret
= ibmr
->mr
->rkey
;
628 printk(KERN_WARNING
"RDS/IW: failed to map mr (errno=%d)\n", ret
);
633 rds_iw_free_mr(ibmr
, 0);
640 * iWARP fastreg handling
642 * The life cycle of a fastreg registration is a bit different from
644 * The idea behind fastreg is to have one MR, to which we bind different
645 * mappings over time. To avoid stalling on the expensive map and invalidate
646 * operations, these operations are pipelined on the same send queue on
647 * which we want to send the message containing the r_key.
649 * This creates a bit of a problem for us, as we do not have the destination
650 * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
651 * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit
652 * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request
653 * before queuing the SEND. When completions for these arrive, they are
654 * dispatched to the MR has a bit set showing that RDMa can be performed.
656 * There is another interesting aspect that's related to invalidation.
657 * The application can request that a mapping is invalidated in FREE_MR.
658 * The expectation there is that this invalidation step includes ALL
659 * PREVIOUSLY FREED MRs.
661 static int rds_iw_init_fastreg(struct rds_iw_mr_pool
*pool
,
662 struct rds_iw_mr
*ibmr
)
664 struct rds_iw_device
*rds_iwdev
= pool
->device
;
665 struct ib_fast_reg_page_list
*page_list
= NULL
;
669 mr
= ib_alloc_fast_reg_mr(rds_iwdev
->pd
, pool
->max_message_size
);
673 printk(KERN_WARNING
"RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err
);
677 page_list
= ib_alloc_fast_reg_page_list(rds_iwdev
->dev
, pool
->max_message_size
);
678 if (IS_ERR(page_list
)) {
679 err
= PTR_ERR(page_list
);
681 printk(KERN_WARNING
"RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err
);
686 ibmr
->page_list
= page_list
;
691 static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping
*mapping
)
693 struct rds_iw_mr
*ibmr
= mapping
->m_mr
;
694 struct ib_send_wr f_wr
, *failed_wr
;
698 * Perform a WR for the fast_reg_mr. Each individual page
699 * in the sg list is added to the fast reg page list and placed
700 * inside the fast_reg_mr WR. The key used is a rolling 8bit
701 * counter, which should guarantee uniqueness.
703 ib_update_fast_reg_key(ibmr
->mr
, ibmr
->remap_count
++);
704 mapping
->m_rkey
= ibmr
->mr
->rkey
;
706 memset(&f_wr
, 0, sizeof(f_wr
));
707 f_wr
.wr_id
= RDS_IW_FAST_REG_WR_ID
;
708 f_wr
.opcode
= IB_WR_FAST_REG_MR
;
709 f_wr
.wr
.fast_reg
.length
= mapping
->m_sg
.bytes
;
710 f_wr
.wr
.fast_reg
.rkey
= mapping
->m_rkey
;
711 f_wr
.wr
.fast_reg
.page_list
= ibmr
->page_list
;
712 f_wr
.wr
.fast_reg
.page_list_len
= mapping
->m_sg
.dma_len
;
713 f_wr
.wr
.fast_reg
.page_shift
= PAGE_SHIFT
;
714 f_wr
.wr
.fast_reg
.access_flags
= IB_ACCESS_LOCAL_WRITE
|
715 IB_ACCESS_REMOTE_READ
|
716 IB_ACCESS_REMOTE_WRITE
;
717 f_wr
.wr
.fast_reg
.iova_start
= 0;
718 f_wr
.send_flags
= IB_SEND_SIGNALED
;
721 ret
= ib_post_send(ibmr
->cm_id
->qp
, &f_wr
, &failed_wr
);
722 BUG_ON(failed_wr
!= &f_wr
);
723 if (ret
&& printk_ratelimit())
724 printk(KERN_WARNING
"RDS/IW: %s:%d ib_post_send returned %d\n",
725 __func__
, __LINE__
, ret
);
729 static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr
*ibmr
)
731 struct ib_send_wr s_wr
, *failed_wr
;
734 if (!ibmr
->cm_id
->qp
|| !ibmr
->mr
)
737 memset(&s_wr
, 0, sizeof(s_wr
));
738 s_wr
.wr_id
= RDS_IW_LOCAL_INV_WR_ID
;
739 s_wr
.opcode
= IB_WR_LOCAL_INV
;
740 s_wr
.ex
.invalidate_rkey
= ibmr
->mr
->rkey
;
741 s_wr
.send_flags
= IB_SEND_SIGNALED
;
744 ret
= ib_post_send(ibmr
->cm_id
->qp
, &s_wr
, &failed_wr
);
745 if (ret
&& printk_ratelimit()) {
746 printk(KERN_WARNING
"RDS/IW: %s:%d ib_post_send returned %d\n",
747 __func__
, __LINE__
, ret
);
754 static int rds_iw_map_fastreg(struct rds_iw_mr_pool
*pool
,
755 struct rds_iw_mr
*ibmr
,
756 struct scatterlist
*sg
,
759 struct rds_iw_device
*rds_iwdev
= pool
->device
;
760 struct rds_iw_mapping
*mapping
= &ibmr
->mapping
;
764 rds_iw_set_scatterlist(&mapping
->m_sg
, sg
, sg_len
);
766 dma_pages
= rds_iw_map_scatterlist(rds_iwdev
, &mapping
->m_sg
);
767 if (IS_ERR(dma_pages
)) {
768 ret
= PTR_ERR(dma_pages
);
773 if (mapping
->m_sg
.dma_len
> pool
->max_message_size
) {
778 for (i
= 0; i
< mapping
->m_sg
.dma_npages
; ++i
)
779 ibmr
->page_list
->page_list
[i
] = dma_pages
[i
];
781 ret
= rds_iw_rdma_build_fastreg(mapping
);
785 rds_iw_stats_inc(s_iw_rdma_mr_used
);
794 * "Free" a fastreg MR.
796 static void rds_iw_free_fastreg(struct rds_iw_mr_pool
*pool
,
797 struct rds_iw_mr
*ibmr
)
802 if (!ibmr
->mapping
.m_sg
.dma_len
)
805 ret
= rds_iw_rdma_fastreg_inv(ibmr
);
809 /* Try to post the LOCAL_INV WR to the queue. */
810 spin_lock_irqsave(&pool
->list_lock
, flags
);
812 list_add_tail(&ibmr
->mapping
.m_list
, &pool
->dirty_list
);
813 atomic_add(ibmr
->mapping
.m_sg
.len
, &pool
->free_pinned
);
814 atomic_inc(&pool
->dirty_count
);
816 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
819 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool
*pool
,
820 struct list_head
*unmap_list
,
821 struct list_head
*kill_list
)
823 struct rds_iw_mapping
*mapping
, *next
;
824 unsigned int ncleaned
= 0;
825 LIST_HEAD(laundered
);
827 /* Batched invalidation of fastreg MRs.
828 * Why do we do it this way, even though we could pipeline unmap
829 * and remap? The reason is the application semantics - when the
830 * application requests an invalidation of MRs, it expects all
831 * previously released R_Keys to become invalid.
833 * If we implement MR reuse naively, we risk memory corruption
834 * (this has actually been observed). So the default behavior
835 * requires that a MR goes through an explicit unmap operation before
836 * we can reuse it again.
838 * We could probably improve on this a little, by allowing immediate
839 * reuse of a MR on the same socket (eg you could add small
840 * cache of unused MRs to strct rds_socket - GET_MR could grab one
841 * of these without requiring an explicit invalidate).
843 while (!list_empty(unmap_list
)) {
846 spin_lock_irqsave(&pool
->list_lock
, flags
);
847 list_for_each_entry_safe(mapping
, next
, unmap_list
, m_list
) {
848 list_move(&mapping
->m_list
, &laundered
);
851 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
854 /* Move all laundered mappings back to the unmap list.
855 * We do not kill any WRs right now - it doesn't seem the
856 * fastreg API has a max_remap limit. */
857 list_splice_init(&laundered
, unmap_list
);
862 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool
*pool
,
863 struct rds_iw_mr
*ibmr
)
866 ib_free_fast_reg_page_list(ibmr
->page_list
);
868 ib_dereg_mr(ibmr
->mr
);