Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / net / rds / ib_rdma.c
blob63c8d107adcfbec096b3dbcead8de98ec6327bc1
1 /*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
36 #include <linux/llist.h>
38 #include "rds_single_path.h"
39 #include "ib_mr.h"
41 struct workqueue_struct *rds_ib_mr_wq;
43 static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44 #define CLEAN_LIST_BUSY_BIT 0
46 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
48 struct rds_ib_device *rds_ibdev;
49 struct rds_ib_ipaddr *i_ipaddr;
51 rcu_read_lock();
52 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
53 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
54 if (i_ipaddr->ipaddr == ipaddr) {
55 refcount_inc(&rds_ibdev->refcount);
56 rcu_read_unlock();
57 return rds_ibdev;
61 rcu_read_unlock();
63 return NULL;
66 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
68 struct rds_ib_ipaddr *i_ipaddr;
70 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
71 if (!i_ipaddr)
72 return -ENOMEM;
74 i_ipaddr->ipaddr = ipaddr;
76 spin_lock_irq(&rds_ibdev->spinlock);
77 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
78 spin_unlock_irq(&rds_ibdev->spinlock);
80 return 0;
83 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
85 struct rds_ib_ipaddr *i_ipaddr;
86 struct rds_ib_ipaddr *to_free = NULL;
89 spin_lock_irq(&rds_ibdev->spinlock);
90 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
91 if (i_ipaddr->ipaddr == ipaddr) {
92 list_del_rcu(&i_ipaddr->list);
93 to_free = i_ipaddr;
94 break;
97 spin_unlock_irq(&rds_ibdev->spinlock);
99 if (to_free)
100 kfree_rcu(to_free, rcu);
103 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
104 struct in6_addr *ipaddr)
106 struct rds_ib_device *rds_ibdev_old;
108 rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
109 if (!rds_ibdev_old)
110 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
112 if (rds_ibdev_old != rds_ibdev) {
113 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
114 rds_ib_dev_put(rds_ibdev_old);
115 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
117 rds_ib_dev_put(rds_ibdev_old);
119 return 0;
122 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
124 struct rds_ib_connection *ic = conn->c_transport_data;
126 /* conn was previously on the nodev_conns_list */
127 spin_lock_irq(&ib_nodev_conns_lock);
128 BUG_ON(list_empty(&ib_nodev_conns));
129 BUG_ON(list_empty(&ic->ib_node));
130 list_del(&ic->ib_node);
132 spin_lock(&rds_ibdev->spinlock);
133 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
134 spin_unlock(&rds_ibdev->spinlock);
135 spin_unlock_irq(&ib_nodev_conns_lock);
137 ic->rds_ibdev = rds_ibdev;
138 refcount_inc(&rds_ibdev->refcount);
141 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
143 struct rds_ib_connection *ic = conn->c_transport_data;
145 /* place conn on nodev_conns_list */
146 spin_lock(&ib_nodev_conns_lock);
148 spin_lock_irq(&rds_ibdev->spinlock);
149 BUG_ON(list_empty(&ic->ib_node));
150 list_del(&ic->ib_node);
151 spin_unlock_irq(&rds_ibdev->spinlock);
153 list_add_tail(&ic->ib_node, &ib_nodev_conns);
155 spin_unlock(&ib_nodev_conns_lock);
157 ic->rds_ibdev = NULL;
158 rds_ib_dev_put(rds_ibdev);
161 void rds_ib_destroy_nodev_conns(void)
163 struct rds_ib_connection *ic, *_ic;
164 LIST_HEAD(tmp_list);
166 /* avoid calling conn_destroy with irqs off */
167 spin_lock_irq(&ib_nodev_conns_lock);
168 list_splice(&ib_nodev_conns, &tmp_list);
169 spin_unlock_irq(&ib_nodev_conns_lock);
171 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
172 rds_conn_destroy(ic->conn);
175 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
177 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
179 iinfo->rdma_mr_max = pool_1m->max_items;
180 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
183 #if IS_ENABLED(CONFIG_IPV6)
184 void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
185 struct rds6_info_rdma_connection *iinfo6)
187 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
189 iinfo6->rdma_mr_max = pool_1m->max_items;
190 iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
192 #endif
194 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
196 struct rds_ib_mr *ibmr = NULL;
197 struct llist_node *ret;
198 unsigned long *flag;
200 preempt_disable();
201 flag = this_cpu_ptr(&clean_list_grace);
202 set_bit(CLEAN_LIST_BUSY_BIT, flag);
203 ret = llist_del_first(&pool->clean_list);
204 if (ret) {
205 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
206 if (pool->pool_type == RDS_IB_MR_8K_POOL)
207 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
208 else
209 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
212 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
213 preempt_enable();
214 return ibmr;
217 static inline void wait_clean_list_grace(void)
219 int cpu;
220 unsigned long *flag;
222 for_each_online_cpu(cpu) {
223 flag = &per_cpu(clean_list_grace, cpu);
224 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
225 cpu_relax();
229 void rds_ib_sync_mr(void *trans_private, int direction)
231 struct rds_ib_mr *ibmr = trans_private;
232 struct rds_ib_device *rds_ibdev = ibmr->device;
234 switch (direction) {
235 case DMA_FROM_DEVICE:
236 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
237 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
238 break;
239 case DMA_TO_DEVICE:
240 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
241 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
242 break;
246 void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
248 struct rds_ib_device *rds_ibdev = ibmr->device;
250 if (ibmr->sg_dma_len) {
251 ib_dma_unmap_sg(rds_ibdev->dev,
252 ibmr->sg, ibmr->sg_len,
253 DMA_BIDIRECTIONAL);
254 ibmr->sg_dma_len = 0;
257 /* Release the s/g list */
258 if (ibmr->sg_len) {
259 unsigned int i;
261 for (i = 0; i < ibmr->sg_len; ++i) {
262 struct page *page = sg_page(&ibmr->sg[i]);
264 /* FIXME we need a way to tell a r/w MR
265 * from a r/o MR */
266 WARN_ON(!page->mapping && irqs_disabled());
267 set_page_dirty(page);
268 put_page(page);
270 kfree(ibmr->sg);
272 ibmr->sg = NULL;
273 ibmr->sg_len = 0;
277 void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
279 unsigned int pinned = ibmr->sg_len;
281 __rds_ib_teardown_mr(ibmr);
282 if (pinned) {
283 struct rds_ib_mr_pool *pool = ibmr->pool;
285 atomic_sub(pinned, &pool->free_pinned);
289 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
291 unsigned int item_count;
293 item_count = atomic_read(&pool->item_count);
294 if (free_all)
295 return item_count;
297 return 0;
301 * given an llist of mrs, put them all into the list_head for more processing
303 static unsigned int llist_append_to_list(struct llist_head *llist,
304 struct list_head *list)
306 struct rds_ib_mr *ibmr;
307 struct llist_node *node;
308 struct llist_node *next;
309 unsigned int count = 0;
311 node = llist_del_all(llist);
312 while (node) {
313 next = node->next;
314 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
315 list_add_tail(&ibmr->unmap_list, list);
316 node = next;
317 count++;
319 return count;
323 * this takes a list head of mrs and turns it into linked llist nodes
324 * of clusters. Each cluster has linked llist nodes of
325 * MR_CLUSTER_SIZE mrs that are ready for reuse.
327 static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
328 struct list_head *list,
329 struct llist_node **nodes_head,
330 struct llist_node **nodes_tail)
332 struct rds_ib_mr *ibmr;
333 struct llist_node *cur = NULL;
334 struct llist_node **next = nodes_head;
336 list_for_each_entry(ibmr, list, unmap_list) {
337 cur = &ibmr->llnode;
338 *next = cur;
339 next = &cur->next;
341 *next = NULL;
342 *nodes_tail = cur;
346 * Flush our pool of MRs.
347 * At a minimum, all currently unused MRs are unmapped.
348 * If the number of MRs allocated exceeds the limit, we also try
349 * to free as many MRs as needed to get back to this limit.
351 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
352 int free_all, struct rds_ib_mr **ibmr_ret)
354 struct rds_ib_mr *ibmr;
355 struct llist_node *clean_nodes;
356 struct llist_node *clean_tail;
357 LIST_HEAD(unmap_list);
358 unsigned long unpinned = 0;
359 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
361 if (pool->pool_type == RDS_IB_MR_8K_POOL)
362 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
363 else
364 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
366 if (ibmr_ret) {
367 DEFINE_WAIT(wait);
368 while (!mutex_trylock(&pool->flush_lock)) {
369 ibmr = rds_ib_reuse_mr(pool);
370 if (ibmr) {
371 *ibmr_ret = ibmr;
372 finish_wait(&pool->flush_wait, &wait);
373 goto out_nolock;
376 prepare_to_wait(&pool->flush_wait, &wait,
377 TASK_UNINTERRUPTIBLE);
378 if (llist_empty(&pool->clean_list))
379 schedule();
381 ibmr = rds_ib_reuse_mr(pool);
382 if (ibmr) {
383 *ibmr_ret = ibmr;
384 finish_wait(&pool->flush_wait, &wait);
385 goto out_nolock;
388 finish_wait(&pool->flush_wait, &wait);
389 } else
390 mutex_lock(&pool->flush_lock);
392 if (ibmr_ret) {
393 ibmr = rds_ib_reuse_mr(pool);
394 if (ibmr) {
395 *ibmr_ret = ibmr;
396 goto out;
400 /* Get the list of all MRs to be dropped. Ordering matters -
401 * we want to put drop_list ahead of free_list.
403 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
404 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
405 if (free_all)
406 llist_append_to_list(&pool->clean_list, &unmap_list);
408 free_goal = rds_ib_flush_goal(pool, free_all);
410 if (list_empty(&unmap_list))
411 goto out;
413 if (pool->use_fastreg)
414 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
415 else
416 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
418 if (!list_empty(&unmap_list)) {
419 /* we have to make sure that none of the things we're about
420 * to put on the clean list would race with other cpus trying
421 * to pull items off. The llist would explode if we managed to
422 * remove something from the clean list and then add it back again
423 * while another CPU was spinning on that same item in llist_del_first.
425 * This is pretty unlikely, but just in case wait for an llist grace period
426 * here before adding anything back into the clean list.
428 wait_clean_list_grace();
430 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
431 if (ibmr_ret)
432 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
434 /* more than one entry in llist nodes */
435 if (clean_nodes->next)
436 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
440 atomic_sub(unpinned, &pool->free_pinned);
441 atomic_sub(dirty_to_clean, &pool->dirty_count);
442 atomic_sub(nfreed, &pool->item_count);
444 out:
445 mutex_unlock(&pool->flush_lock);
446 if (waitqueue_active(&pool->flush_wait))
447 wake_up(&pool->flush_wait);
448 out_nolock:
449 return 0;
452 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
454 struct rds_ib_mr *ibmr = NULL;
455 int iter = 0;
457 if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
458 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
460 while (1) {
461 ibmr = rds_ib_reuse_mr(pool);
462 if (ibmr)
463 return ibmr;
465 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
466 break;
468 atomic_dec(&pool->item_count);
470 if (++iter > 2) {
471 if (pool->pool_type == RDS_IB_MR_8K_POOL)
472 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
473 else
474 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
475 return ERR_PTR(-EAGAIN);
478 /* We do have some empty MRs. Flush them out. */
479 if (pool->pool_type == RDS_IB_MR_8K_POOL)
480 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
481 else
482 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
484 rds_ib_flush_mr_pool(pool, 0, &ibmr);
485 if (ibmr)
486 return ibmr;
489 return ibmr;
492 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
494 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
496 rds_ib_flush_mr_pool(pool, 0, NULL);
499 void rds_ib_free_mr(void *trans_private, int invalidate)
501 struct rds_ib_mr *ibmr = trans_private;
502 struct rds_ib_mr_pool *pool = ibmr->pool;
503 struct rds_ib_device *rds_ibdev = ibmr->device;
505 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
507 /* Return it to the pool's free list */
508 if (rds_ibdev->use_fastreg)
509 rds_ib_free_frmr_list(ibmr);
510 else
511 rds_ib_free_fmr_list(ibmr);
513 atomic_add(ibmr->sg_len, &pool->free_pinned);
514 atomic_inc(&pool->dirty_count);
516 /* If we've pinned too many pages, request a flush */
517 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
518 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
519 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
521 if (invalidate) {
522 if (likely(!in_interrupt())) {
523 rds_ib_flush_mr_pool(pool, 0, NULL);
524 } else {
525 /* We get here if the user created a MR marked
526 * as use_once and invalidate at the same time.
528 queue_delayed_work(rds_ib_mr_wq,
529 &pool->flush_worker, 10);
533 rds_ib_dev_put(rds_ibdev);
536 void rds_ib_flush_mrs(void)
538 struct rds_ib_device *rds_ibdev;
540 down_read(&rds_ib_devices_lock);
541 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
542 if (rds_ibdev->mr_8k_pool)
543 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
545 if (rds_ibdev->mr_1m_pool)
546 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
548 up_read(&rds_ib_devices_lock);
551 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
552 struct rds_sock *rs, u32 *key_ret,
553 struct rds_connection *conn)
555 struct rds_ib_device *rds_ibdev;
556 struct rds_ib_mr *ibmr = NULL;
557 struct rds_ib_connection *ic = NULL;
558 int ret;
560 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
561 if (!rds_ibdev) {
562 ret = -ENODEV;
563 goto out;
566 if (conn)
567 ic = conn->c_transport_data;
569 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
570 ret = -ENODEV;
571 goto out;
574 if (rds_ibdev->use_fastreg)
575 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
576 else
577 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
578 if (IS_ERR(ibmr)) {
579 ret = PTR_ERR(ibmr);
580 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
581 } else {
582 return ibmr;
585 out:
586 if (rds_ibdev)
587 rds_ib_dev_put(rds_ibdev);
589 return ERR_PTR(ret);
592 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
594 cancel_delayed_work_sync(&pool->flush_worker);
595 rds_ib_flush_mr_pool(pool, 1, NULL);
596 WARN_ON(atomic_read(&pool->item_count));
597 WARN_ON(atomic_read(&pool->free_pinned));
598 kfree(pool);
601 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
602 int pool_type)
604 struct rds_ib_mr_pool *pool;
606 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
607 if (!pool)
608 return ERR_PTR(-ENOMEM);
610 pool->pool_type = pool_type;
611 init_llist_head(&pool->free_list);
612 init_llist_head(&pool->drop_list);
613 init_llist_head(&pool->clean_list);
614 mutex_init(&pool->flush_lock);
615 init_waitqueue_head(&pool->flush_wait);
616 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
618 if (pool_type == RDS_IB_MR_1M_POOL) {
619 /* +1 allows for unaligned MRs */
620 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
621 pool->max_items = rds_ibdev->max_1m_mrs;
622 } else {
623 /* pool_type == RDS_IB_MR_8K_POOL */
624 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
625 pool->max_items = rds_ibdev->max_8k_mrs;
628 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
629 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
630 pool->fmr_attr.page_shift = PAGE_SHIFT;
631 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
632 pool->use_fastreg = rds_ibdev->use_fastreg;
634 return pool;
637 int rds_ib_mr_init(void)
639 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
640 if (!rds_ib_mr_wq)
641 return -ENOMEM;
642 return 0;
645 /* By the time this is called all the IB devices should have been torn down and
646 * had their pools freed. As each pool is freed its work struct is waited on,
647 * so the pool flushing work queue should be idle by the time we get here.
649 void rds_ib_mr_exit(void)
651 destroy_workqueue(rds_ib_mr_wq);