2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR
= 8,
48 #define MLX5_UMR_ALIGN 2048
50 static void clean_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
51 static void dereg_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
52 static int mr_cache_max_order(struct mlx5_ib_dev
*dev
);
53 static int unreg_umr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
54 static bool umr_can_modify_entity_size(struct mlx5_ib_dev
*dev
)
56 return !MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
);
59 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev
*dev
)
61 return !MLX5_CAP_GEN(dev
->mdev
, umr_indirect_mkey_disabled
);
64 static bool use_umr(struct mlx5_ib_dev
*dev
, int order
)
66 return order
<= mr_cache_max_order(dev
) &&
67 umr_can_modify_entity_size(dev
);
70 static int destroy_mkey(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
72 int err
= mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
74 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev
->mr_srcu
);
82 static int order2idx(struct mlx5_ib_dev
*dev
, int order
)
84 struct mlx5_mr_cache
*cache
= &dev
->cache
;
86 if (order
< cache
->ent
[0].order
)
89 return order
- cache
->ent
[0].order
;
92 static bool use_umr_mtt_update(struct mlx5_ib_mr
*mr
, u64 start
, u64 length
)
94 return ((u64
)1 << mr
->order
) * MLX5_ADAPTER_PAGE_SIZE
>=
95 length
+ (start
& (MLX5_ADAPTER_PAGE_SIZE
- 1));
98 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
99 static void update_odp_mr(struct mlx5_ib_mr
*mr
)
101 if (mr
->umem
->odp_data
) {
103 * This barrier prevents the compiler from moving the
104 * setting of umem->odp_data->private to point to our
105 * MR, before reg_umr finished, to ensure that the MR
106 * initialization have finished before starting to
107 * handle invalidations.
110 mr
->umem
->odp_data
->private = mr
;
112 * Make sure we will see the new
113 * umem->odp_data->private value in the invalidation
114 * routines, before we can get page faults on the
115 * MR. Page faults can happen once we put the MR in
116 * the tree, below this line. Without the barrier,
117 * there can be a fault handling and an invalidation
118 * before umem->odp_data->private == mr is visible to
119 * the invalidation handler.
126 static void reg_mr_callback(int status
, void *context
)
128 struct mlx5_ib_mr
*mr
= context
;
129 struct mlx5_ib_dev
*dev
= mr
->dev
;
130 struct mlx5_mr_cache
*cache
= &dev
->cache
;
131 int c
= order2idx(dev
, mr
->order
);
132 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
135 struct mlx5_mkey_table
*table
= &dev
->mdev
->priv
.mkey_table
;
138 spin_lock_irqsave(&ent
->lock
, flags
);
140 spin_unlock_irqrestore(&ent
->lock
, flags
);
142 mlx5_ib_warn(dev
, "async reg mr failed. status %d\n", status
);
145 mod_timer(&dev
->delay_timer
, jiffies
+ HZ
);
149 mr
->mmkey
.type
= MLX5_MKEY_MR
;
150 spin_lock_irqsave(&dev
->mdev
->priv
.mkey_lock
, flags
);
151 key
= dev
->mdev
->priv
.mkey_key
++;
152 spin_unlock_irqrestore(&dev
->mdev
->priv
.mkey_lock
, flags
);
153 mr
->mmkey
.key
= mlx5_idx_to_mkey(MLX5_GET(create_mkey_out
, mr
->out
, mkey_index
)) | key
;
155 cache
->last_add
= jiffies
;
157 spin_lock_irqsave(&ent
->lock
, flags
);
158 list_add_tail(&mr
->list
, &ent
->head
);
161 spin_unlock_irqrestore(&ent
->lock
, flags
);
163 write_lock_irqsave(&table
->lock
, flags
);
164 err
= radix_tree_insert(&table
->tree
, mlx5_base_mkey(mr
->mmkey
.key
),
167 pr_err("Error inserting to mkey tree. 0x%x\n", -err
);
168 write_unlock_irqrestore(&table
->lock
, flags
);
170 if (!completion_done(&ent
->compl))
171 complete(&ent
->compl);
174 static int add_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
176 struct mlx5_mr_cache
*cache
= &dev
->cache
;
177 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
178 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
179 struct mlx5_ib_mr
*mr
;
185 in
= kzalloc(inlen
, GFP_KERNEL
);
189 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
190 for (i
= 0; i
< num
; i
++) {
191 if (ent
->pending
>= MAX_PENDING_REG_MR
) {
196 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
201 mr
->order
= ent
->order
;
202 mr
->allocated_from_cache
= 1;
205 MLX5_SET(mkc
, mkc
, free
, 1);
206 MLX5_SET(mkc
, mkc
, umr_en
, 1);
207 MLX5_SET(mkc
, mkc
, access_mode_1_0
, ent
->access_mode
& 0x3);
208 MLX5_SET(mkc
, mkc
, access_mode_4_2
,
209 (ent
->access_mode
>> 2) & 0x7);
211 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
212 MLX5_SET(mkc
, mkc
, translations_octword_size
, ent
->xlt
);
213 MLX5_SET(mkc
, mkc
, log_page_size
, ent
->page
);
215 spin_lock_irq(&ent
->lock
);
217 spin_unlock_irq(&ent
->lock
);
218 err
= mlx5_core_create_mkey_cb(dev
->mdev
, &mr
->mmkey
,
220 mr
->out
, sizeof(mr
->out
),
221 reg_mr_callback
, mr
);
223 spin_lock_irq(&ent
->lock
);
225 spin_unlock_irq(&ent
->lock
);
226 mlx5_ib_warn(dev
, "create mkey failed %d\n", err
);
236 static void remove_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
238 struct mlx5_mr_cache
*cache
= &dev
->cache
;
239 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
240 struct mlx5_ib_mr
*tmp_mr
;
241 struct mlx5_ib_mr
*mr
;
245 for (i
= 0; i
< num
; i
++) {
246 spin_lock_irq(&ent
->lock
);
247 if (list_empty(&ent
->head
)) {
248 spin_unlock_irq(&ent
->lock
);
251 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
252 list_move(&mr
->list
, &del_list
);
255 spin_unlock_irq(&ent
->lock
);
256 mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
259 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
260 synchronize_srcu(&dev
->mr_srcu
);
263 list_for_each_entry_safe(mr
, tmp_mr
, &del_list
, list
) {
269 static ssize_t
size_write(struct file
*filp
, const char __user
*buf
,
270 size_t count
, loff_t
*pos
)
272 struct mlx5_cache_ent
*ent
= filp
->private_data
;
273 struct mlx5_ib_dev
*dev
= ent
->dev
;
279 count
= min(count
, sizeof(lbuf
) - 1);
280 if (copy_from_user(lbuf
, buf
, count
))
283 c
= order2idx(dev
, ent
->order
);
285 if (sscanf(lbuf
, "%u", &var
) != 1)
288 if (var
< ent
->limit
)
291 if (var
> ent
->size
) {
293 err
= add_keys(dev
, c
, var
- ent
->size
);
294 if (err
&& err
!= -EAGAIN
)
297 usleep_range(3000, 5000);
299 } else if (var
< ent
->size
) {
300 remove_keys(dev
, c
, ent
->size
- var
);
306 static ssize_t
size_read(struct file
*filp
, char __user
*buf
, size_t count
,
309 struct mlx5_cache_ent
*ent
= filp
->private_data
;
313 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->size
);
317 return simple_read_from_buffer(buf
, count
, pos
, lbuf
, err
);
320 static const struct file_operations size_fops
= {
321 .owner
= THIS_MODULE
,
327 static ssize_t
limit_write(struct file
*filp
, const char __user
*buf
,
328 size_t count
, loff_t
*pos
)
330 struct mlx5_cache_ent
*ent
= filp
->private_data
;
331 struct mlx5_ib_dev
*dev
= ent
->dev
;
337 count
= min(count
, sizeof(lbuf
) - 1);
338 if (copy_from_user(lbuf
, buf
, count
))
341 c
= order2idx(dev
, ent
->order
);
343 if (sscanf(lbuf
, "%u", &var
) != 1)
351 if (ent
->cur
< ent
->limit
) {
352 err
= add_keys(dev
, c
, 2 * ent
->limit
- ent
->cur
);
360 static ssize_t
limit_read(struct file
*filp
, char __user
*buf
, size_t count
,
363 struct mlx5_cache_ent
*ent
= filp
->private_data
;
367 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->limit
);
371 return simple_read_from_buffer(buf
, count
, pos
, lbuf
, err
);
374 static const struct file_operations limit_fops
= {
375 .owner
= THIS_MODULE
,
377 .write
= limit_write
,
381 static int someone_adding(struct mlx5_mr_cache
*cache
)
385 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
386 if (cache
->ent
[i
].cur
< cache
->ent
[i
].limit
)
393 static void __cache_work_func(struct mlx5_cache_ent
*ent
)
395 struct mlx5_ib_dev
*dev
= ent
->dev
;
396 struct mlx5_mr_cache
*cache
= &dev
->cache
;
397 int i
= order2idx(dev
, ent
->order
);
403 ent
= &dev
->cache
.ent
[i
];
404 if (ent
->cur
< 2 * ent
->limit
&& !dev
->fill_delay
) {
405 err
= add_keys(dev
, i
, 1);
406 if (ent
->cur
< 2 * ent
->limit
) {
407 if (err
== -EAGAIN
) {
408 mlx5_ib_dbg(dev
, "returned eagain, order %d\n",
410 queue_delayed_work(cache
->wq
, &ent
->dwork
,
411 msecs_to_jiffies(3));
413 mlx5_ib_warn(dev
, "command failed order %d, err %d\n",
415 queue_delayed_work(cache
->wq
, &ent
->dwork
,
416 msecs_to_jiffies(1000));
418 queue_work(cache
->wq
, &ent
->work
);
421 } else if (ent
->cur
> 2 * ent
->limit
) {
423 * The remove_keys() logic is performed as garbage collection
424 * task. Such task is intended to be run when no other active
425 * processes are running.
427 * The need_resched() will return TRUE if there are user tasks
428 * to be activated in near future.
430 * In such case, we don't execute remove_keys() and postpone
431 * the garbage collection work to try to run in next cycle,
432 * in order to free CPU resources to other tasks.
434 if (!need_resched() && !someone_adding(cache
) &&
435 time_after(jiffies
, cache
->last_add
+ 300 * HZ
)) {
436 remove_keys(dev
, i
, 1);
437 if (ent
->cur
> ent
->limit
)
438 queue_work(cache
->wq
, &ent
->work
);
440 queue_delayed_work(cache
->wq
, &ent
->dwork
, 300 * HZ
);
445 static void delayed_cache_work_func(struct work_struct
*work
)
447 struct mlx5_cache_ent
*ent
;
449 ent
= container_of(work
, struct mlx5_cache_ent
, dwork
.work
);
450 __cache_work_func(ent
);
453 static void cache_work_func(struct work_struct
*work
)
455 struct mlx5_cache_ent
*ent
;
457 ent
= container_of(work
, struct mlx5_cache_ent
, work
);
458 __cache_work_func(ent
);
461 struct mlx5_ib_mr
*mlx5_mr_cache_alloc(struct mlx5_ib_dev
*dev
, int entry
)
463 struct mlx5_mr_cache
*cache
= &dev
->cache
;
464 struct mlx5_cache_ent
*ent
;
465 struct mlx5_ib_mr
*mr
;
468 if (entry
< 0 || entry
>= MAX_MR_CACHE_ENTRIES
) {
469 mlx5_ib_err(dev
, "cache entry %d is out of range\n", entry
);
473 ent
= &cache
->ent
[entry
];
475 spin_lock_irq(&ent
->lock
);
476 if (list_empty(&ent
->head
)) {
477 spin_unlock_irq(&ent
->lock
);
479 err
= add_keys(dev
, entry
, 1);
480 if (err
&& err
!= -EAGAIN
)
483 wait_for_completion(&ent
->compl);
485 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
,
489 spin_unlock_irq(&ent
->lock
);
490 if (ent
->cur
< ent
->limit
)
491 queue_work(cache
->wq
, &ent
->work
);
497 static struct mlx5_ib_mr
*alloc_cached_mr(struct mlx5_ib_dev
*dev
, int order
)
499 struct mlx5_mr_cache
*cache
= &dev
->cache
;
500 struct mlx5_ib_mr
*mr
= NULL
;
501 struct mlx5_cache_ent
*ent
;
502 int last_umr_cache_entry
;
506 c
= order2idx(dev
, order
);
507 last_umr_cache_entry
= order2idx(dev
, mr_cache_max_order(dev
));
508 if (c
< 0 || c
> last_umr_cache_entry
) {
509 mlx5_ib_warn(dev
, "order %d, cache index %d\n", order
, c
);
513 for (i
= c
; i
<= last_umr_cache_entry
; i
++) {
514 ent
= &cache
->ent
[i
];
516 mlx5_ib_dbg(dev
, "order %d, cache index %d\n", ent
->order
, i
);
518 spin_lock_irq(&ent
->lock
);
519 if (!list_empty(&ent
->head
)) {
520 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
,
524 spin_unlock_irq(&ent
->lock
);
525 if (ent
->cur
< ent
->limit
)
526 queue_work(cache
->wq
, &ent
->work
);
529 spin_unlock_irq(&ent
->lock
);
531 queue_work(cache
->wq
, &ent
->work
);
535 cache
->ent
[c
].miss
++;
540 void mlx5_mr_cache_free(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
542 struct mlx5_mr_cache
*cache
= &dev
->cache
;
543 struct mlx5_cache_ent
*ent
;
547 c
= order2idx(dev
, mr
->order
);
548 if (c
< 0 || c
>= MAX_MR_CACHE_ENTRIES
) {
549 mlx5_ib_warn(dev
, "order %d, cache index %d\n", mr
->order
, c
);
553 if (unreg_umr(dev
, mr
))
556 ent
= &cache
->ent
[c
];
557 spin_lock_irq(&ent
->lock
);
558 list_add_tail(&mr
->list
, &ent
->head
);
560 if (ent
->cur
> 2 * ent
->limit
)
562 spin_unlock_irq(&ent
->lock
);
565 queue_work(cache
->wq
, &ent
->work
);
568 static void clean_keys(struct mlx5_ib_dev
*dev
, int c
)
570 struct mlx5_mr_cache
*cache
= &dev
->cache
;
571 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
572 struct mlx5_ib_mr
*tmp_mr
;
573 struct mlx5_ib_mr
*mr
;
576 cancel_delayed_work(&ent
->dwork
);
578 spin_lock_irq(&ent
->lock
);
579 if (list_empty(&ent
->head
)) {
580 spin_unlock_irq(&ent
->lock
);
583 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
584 list_move(&mr
->list
, &del_list
);
587 spin_unlock_irq(&ent
->lock
);
588 mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
591 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
592 synchronize_srcu(&dev
->mr_srcu
);
595 list_for_each_entry_safe(mr
, tmp_mr
, &del_list
, list
) {
601 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev
*dev
)
603 if (!mlx5_debugfs_root
|| dev
->rep
)
606 debugfs_remove_recursive(dev
->cache
.root
);
607 dev
->cache
.root
= NULL
;
610 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev
*dev
)
612 struct mlx5_mr_cache
*cache
= &dev
->cache
;
613 struct mlx5_cache_ent
*ent
;
616 if (!mlx5_debugfs_root
|| dev
->rep
)
619 cache
->root
= debugfs_create_dir("mr_cache", dev
->mdev
->priv
.dbg_root
);
623 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
624 ent
= &cache
->ent
[i
];
625 sprintf(ent
->name
, "%d", ent
->order
);
626 ent
->dir
= debugfs_create_dir(ent
->name
, cache
->root
);
630 ent
->fsize
= debugfs_create_file("size", 0600, ent
->dir
, ent
,
635 ent
->flimit
= debugfs_create_file("limit", 0600, ent
->dir
, ent
,
640 ent
->fcur
= debugfs_create_u32("cur", 0400, ent
->dir
,
645 ent
->fmiss
= debugfs_create_u32("miss", 0600, ent
->dir
,
653 mlx5_mr_cache_debugfs_cleanup(dev
);
658 static void delay_time_func(struct timer_list
*t
)
660 struct mlx5_ib_dev
*dev
= from_timer(dev
, t
, delay_timer
);
665 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
)
667 struct mlx5_mr_cache
*cache
= &dev
->cache
;
668 struct mlx5_cache_ent
*ent
;
672 mutex_init(&dev
->slow_path_mutex
);
673 cache
->wq
= alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM
);
675 mlx5_ib_warn(dev
, "failed to create work queue\n");
679 timer_setup(&dev
->delay_timer
, delay_time_func
, 0);
680 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
681 ent
= &cache
->ent
[i
];
682 INIT_LIST_HEAD(&ent
->head
);
683 spin_lock_init(&ent
->lock
);
688 init_completion(&ent
->compl);
689 INIT_WORK(&ent
->work
, cache_work_func
);
690 INIT_DELAYED_WORK(&ent
->dwork
, delayed_cache_work_func
);
691 queue_work(cache
->wq
, &ent
->work
);
693 if (i
> MR_CACHE_LAST_STD_ENTRY
) {
694 mlx5_odp_init_mr_cache_entry(ent
);
698 if (ent
->order
> mr_cache_max_order(dev
))
701 ent
->page
= PAGE_SHIFT
;
702 ent
->xlt
= (1 << ent
->order
) * sizeof(struct mlx5_mtt
) /
703 MLX5_IB_UMR_OCTOWORD
;
704 ent
->access_mode
= MLX5_MKC_ACCESS_MODE_MTT
;
705 if ((dev
->mdev
->profile
->mask
& MLX5_PROF_MASK_MR_CACHE
) &&
707 mlx5_core_is_pf(dev
->mdev
))
708 ent
->limit
= dev
->mdev
->profile
->mr_cache
[i
].limit
;
713 err
= mlx5_mr_cache_debugfs_init(dev
);
715 mlx5_ib_warn(dev
, "cache debugfs failure\n");
718 * We don't want to fail driver if debugfs failed to initialize,
719 * so we are not forwarding error to the user.
725 static void wait_for_async_commands(struct mlx5_ib_dev
*dev
)
727 struct mlx5_mr_cache
*cache
= &dev
->cache
;
728 struct mlx5_cache_ent
*ent
;
733 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
734 ent
= &cache
->ent
[i
];
735 for (j
= 0 ; j
< 1000; j
++) {
741 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
742 ent
= &cache
->ent
[i
];
743 total
+= ent
->pending
;
747 mlx5_ib_warn(dev
, "aborted while there are %d pending mr requests\n", total
);
749 mlx5_ib_warn(dev
, "done with all pending requests\n");
752 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
)
759 dev
->cache
.stopped
= 1;
760 flush_workqueue(dev
->cache
.wq
);
762 mlx5_mr_cache_debugfs_cleanup(dev
);
764 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++)
767 destroy_workqueue(dev
->cache
.wq
);
768 wait_for_async_commands(dev
);
769 del_timer_sync(&dev
->delay_timer
);
774 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
776 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
777 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
778 struct mlx5_core_dev
*mdev
= dev
->mdev
;
779 struct mlx5_ib_mr
*mr
;
784 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
786 return ERR_PTR(-ENOMEM
);
788 in
= kzalloc(inlen
, GFP_KERNEL
);
794 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
796 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_PA
);
797 MLX5_SET(mkc
, mkc
, a
, !!(acc
& IB_ACCESS_REMOTE_ATOMIC
));
798 MLX5_SET(mkc
, mkc
, rw
, !!(acc
& IB_ACCESS_REMOTE_WRITE
));
799 MLX5_SET(mkc
, mkc
, rr
, !!(acc
& IB_ACCESS_REMOTE_READ
));
800 MLX5_SET(mkc
, mkc
, lw
, !!(acc
& IB_ACCESS_LOCAL_WRITE
));
801 MLX5_SET(mkc
, mkc
, lr
, 1);
803 MLX5_SET(mkc
, mkc
, length64
, 1);
804 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
805 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
806 MLX5_SET64(mkc
, mkc
, start_addr
, 0);
808 err
= mlx5_core_create_mkey(mdev
, &mr
->mmkey
, in
, inlen
);
813 mr
->mmkey
.type
= MLX5_MKEY_MR
;
814 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
815 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
829 static int get_octo_len(u64 addr
, u64 len
, int page_shift
)
831 u64 page_size
= 1ULL << page_shift
;
835 offset
= addr
& (page_size
- 1);
836 npages
= ALIGN(len
+ offset
, page_size
) >> page_shift
;
837 return (npages
+ 1) / 2;
840 static int mr_cache_max_order(struct mlx5_ib_dev
*dev
)
842 if (MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
))
843 return MR_CACHE_LAST_STD_ENTRY
+ 2;
844 return MLX5_MAX_UMR_SHIFT
;
847 static int mr_umem_get(struct ib_pd
*pd
, u64 start
, u64 length
,
848 int access_flags
, struct ib_umem
**umem
,
849 int *npages
, int *page_shift
, int *ncont
,
852 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
858 u
= ib_umem_get(pd
->uobject
->context
, start
, length
, access_flags
, 0);
859 err
= PTR_ERR_OR_ZERO(u
);
861 mlx5_ib_dbg(dev
, "umem get failed (%d)\n", err
);
865 mlx5_ib_cont_pages(u
, start
, MLX5_MKEY_PAGE_SHIFT_MASK
, npages
,
866 page_shift
, ncont
, order
);
868 mlx5_ib_warn(dev
, "avoid zero region\n");
875 mlx5_ib_dbg(dev
, "npages %d, ncont %d, order %d, page_shift %d\n",
876 *npages
, *ncont
, *order
, *page_shift
);
881 static void mlx5_ib_umr_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
883 struct mlx5_ib_umr_context
*context
=
884 container_of(wc
->wr_cqe
, struct mlx5_ib_umr_context
, cqe
);
886 context
->status
= wc
->status
;
887 complete(&context
->done
);
890 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context
*context
)
892 context
->cqe
.done
= mlx5_ib_umr_done
;
893 context
->status
= -1;
894 init_completion(&context
->done
);
897 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev
*dev
,
898 struct mlx5_umr_wr
*umrwr
)
900 struct umr_common
*umrc
= &dev
->umrc
;
901 const struct ib_send_wr
*bad
;
903 struct mlx5_ib_umr_context umr_context
;
905 mlx5_ib_init_umr_context(&umr_context
);
906 umrwr
->wr
.wr_cqe
= &umr_context
.cqe
;
909 err
= ib_post_send(umrc
->qp
, &umrwr
->wr
, &bad
);
911 mlx5_ib_warn(dev
, "UMR post send failed, err %d\n", err
);
913 wait_for_completion(&umr_context
.done
);
914 if (umr_context
.status
!= IB_WC_SUCCESS
) {
915 mlx5_ib_warn(dev
, "reg umr failed (%u)\n",
924 static struct mlx5_ib_mr
*alloc_mr_from_cache(
925 struct ib_pd
*pd
, struct ib_umem
*umem
,
926 u64 virt_addr
, u64 len
, int npages
,
927 int page_shift
, int order
, int access_flags
)
929 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
930 struct mlx5_ib_mr
*mr
;
934 for (i
= 0; i
< 1; i
++) {
935 mr
= alloc_cached_mr(dev
, order
);
939 err
= add_keys(dev
, order2idx(dev
, order
), 1);
940 if (err
&& err
!= -EAGAIN
) {
941 mlx5_ib_warn(dev
, "add_keys failed, err %d\n", err
);
947 return ERR_PTR(-EAGAIN
);
951 mr
->access_flags
= access_flags
;
952 mr
->desc_size
= sizeof(struct mlx5_mtt
);
953 mr
->mmkey
.iova
= virt_addr
;
954 mr
->mmkey
.size
= len
;
955 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
960 static inline int populate_xlt(struct mlx5_ib_mr
*mr
, int idx
, int npages
,
961 void *xlt
, int page_shift
, size_t size
,
964 struct mlx5_ib_dev
*dev
= mr
->dev
;
965 struct ib_umem
*umem
= mr
->umem
;
967 if (flags
& MLX5_IB_UPD_XLT_INDIRECT
) {
968 if (!umr_can_use_indirect_mkey(dev
))
970 mlx5_odp_populate_klm(xlt
, idx
, npages
, mr
, flags
);
974 npages
= min_t(size_t, npages
, ib_umem_num_pages(umem
) - idx
);
976 if (!(flags
& MLX5_IB_UPD_XLT_ZAP
)) {
977 __mlx5_ib_populate_pas(dev
, umem
, page_shift
,
979 MLX5_IB_MTT_PRESENT
);
980 /* Clear padding after the pages
981 * brought from the umem.
983 memset(xlt
+ (npages
* sizeof(struct mlx5_mtt
)), 0,
984 size
- npages
* sizeof(struct mlx5_mtt
));
990 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
991 MLX5_UMR_MTT_ALIGNMENT)
992 #define MLX5_SPARE_UMR_CHUNK 0x10000
994 int mlx5_ib_update_xlt(struct mlx5_ib_mr
*mr
, u64 idx
, int npages
,
995 int page_shift
, int flags
)
997 struct mlx5_ib_dev
*dev
= mr
->dev
;
998 struct device
*ddev
= dev
->ib_dev
.dev
.parent
;
1002 struct mlx5_umr_wr wr
;
1005 int desc_size
= (flags
& MLX5_IB_UPD_XLT_INDIRECT
)
1006 ? sizeof(struct mlx5_klm
)
1007 : sizeof(struct mlx5_mtt
);
1008 const int page_align
= MLX5_UMR_MTT_ALIGNMENT
/ desc_size
;
1009 const int page_mask
= page_align
- 1;
1010 size_t pages_mapped
= 0;
1011 size_t pages_to_map
= 0;
1012 size_t pages_iter
= 0;
1014 bool use_emergency_page
= false;
1016 if ((flags
& MLX5_IB_UPD_XLT_INDIRECT
) &&
1017 !umr_can_use_indirect_mkey(dev
))
1020 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1021 * so we need to align the offset and length accordingly
1023 if (idx
& page_mask
) {
1024 npages
+= idx
& page_mask
;
1028 gfp
= flags
& MLX5_IB_UPD_XLT_ATOMIC
? GFP_ATOMIC
: GFP_KERNEL
;
1029 gfp
|= __GFP_ZERO
| __GFP_NOWARN
;
1031 pages_to_map
= ALIGN(npages
, page_align
);
1032 size
= desc_size
* pages_to_map
;
1033 size
= min_t(int, size
, MLX5_MAX_UMR_CHUNK
);
1035 xlt
= (void *)__get_free_pages(gfp
, get_order(size
));
1036 if (!xlt
&& size
> MLX5_SPARE_UMR_CHUNK
) {
1037 mlx5_ib_dbg(dev
, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1038 size
, get_order(size
), MLX5_SPARE_UMR_CHUNK
);
1040 size
= MLX5_SPARE_UMR_CHUNK
;
1041 xlt
= (void *)__get_free_pages(gfp
, get_order(size
));
1045 mlx5_ib_warn(dev
, "Using XLT emergency buffer\n");
1046 xlt
= (void *)mlx5_ib_get_xlt_emergency_page();
1048 memset(xlt
, 0, size
);
1049 use_emergency_page
= true;
1051 pages_iter
= size
/ desc_size
;
1052 dma
= dma_map_single(ddev
, xlt
, size
, DMA_TO_DEVICE
);
1053 if (dma_mapping_error(ddev
, dma
)) {
1054 mlx5_ib_err(dev
, "unable to map DMA during XLT update.\n");
1060 sg
.lkey
= dev
->umrc
.pd
->local_dma_lkey
;
1062 memset(&wr
, 0, sizeof(wr
));
1063 wr
.wr
.send_flags
= MLX5_IB_SEND_UMR_UPDATE_XLT
;
1064 if (!(flags
& MLX5_IB_UPD_XLT_ENABLE
))
1065 wr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1066 wr
.wr
.sg_list
= &sg
;
1068 wr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1070 wr
.pd
= mr
->ibmr
.pd
;
1071 wr
.mkey
= mr
->mmkey
.key
;
1072 wr
.length
= mr
->mmkey
.size
;
1073 wr
.virt_addr
= mr
->mmkey
.iova
;
1074 wr
.access_flags
= mr
->access_flags
;
1075 wr
.page_shift
= page_shift
;
1077 for (pages_mapped
= 0;
1078 pages_mapped
< pages_to_map
&& !err
;
1079 pages_mapped
+= pages_iter
, idx
+= pages_iter
) {
1080 npages
= min_t(int, pages_iter
, pages_to_map
- pages_mapped
);
1081 dma_sync_single_for_cpu(ddev
, dma
, size
, DMA_TO_DEVICE
);
1082 npages
= populate_xlt(mr
, idx
, npages
, xlt
,
1083 page_shift
, size
, flags
);
1085 dma_sync_single_for_device(ddev
, dma
, size
, DMA_TO_DEVICE
);
1087 sg
.length
= ALIGN(npages
* desc_size
,
1088 MLX5_UMR_MTT_ALIGNMENT
);
1090 if (pages_mapped
+ pages_iter
>= pages_to_map
) {
1091 if (flags
& MLX5_IB_UPD_XLT_ENABLE
)
1093 MLX5_IB_SEND_UMR_ENABLE_MR
|
1094 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
|
1095 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1096 if (flags
& MLX5_IB_UPD_XLT_PD
||
1097 flags
& MLX5_IB_UPD_XLT_ACCESS
)
1099 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1100 if (flags
& MLX5_IB_UPD_XLT_ADDR
)
1102 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1105 wr
.offset
= idx
* desc_size
;
1106 wr
.xlt_size
= sg
.length
;
1108 err
= mlx5_ib_post_send_wait(dev
, &wr
);
1110 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
1113 if (use_emergency_page
)
1114 mlx5_ib_put_xlt_emergency_page();
1116 free_pages((unsigned long)xlt
, get_order(size
));
1122 * If ibmr is NULL it will be allocated by reg_create.
1123 * Else, the given ibmr will be used.
1125 static struct mlx5_ib_mr
*reg_create(struct ib_mr
*ibmr
, struct ib_pd
*pd
,
1126 u64 virt_addr
, u64 length
,
1127 struct ib_umem
*umem
, int npages
,
1128 int page_shift
, int access_flags
,
1131 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1132 struct mlx5_ib_mr
*mr
;
1138 bool pg_cap
= !!(MLX5_CAP_GEN(dev
->mdev
, pg
));
1140 mr
= ibmr
? to_mmr(ibmr
) : kzalloc(sizeof(*mr
), GFP_KERNEL
);
1142 return ERR_PTR(-ENOMEM
);
1145 mr
->access_flags
= access_flags
;
1147 inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1149 inlen
+= sizeof(*pas
) * roundup(npages
, 2);
1150 in
= kvzalloc(inlen
, GFP_KERNEL
);
1155 pas
= (__be64
*)MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
1156 if (populate
&& !(access_flags
& IB_ACCESS_ON_DEMAND
))
1157 mlx5_ib_populate_pas(dev
, umem
, page_shift
, pas
,
1158 pg_cap
? MLX5_IB_MTT_PRESENT
: 0);
1160 /* The pg_access bit allows setting the access flags
1161 * in the page list submitted with the command. */
1162 MLX5_SET(create_mkey_in
, in
, pg_access
, !!(pg_cap
));
1164 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1165 MLX5_SET(mkc
, mkc
, free
, !populate
);
1166 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_MTT
);
1167 MLX5_SET(mkc
, mkc
, a
, !!(access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
1168 MLX5_SET(mkc
, mkc
, rw
, !!(access_flags
& IB_ACCESS_REMOTE_WRITE
));
1169 MLX5_SET(mkc
, mkc
, rr
, !!(access_flags
& IB_ACCESS_REMOTE_READ
));
1170 MLX5_SET(mkc
, mkc
, lw
, !!(access_flags
& IB_ACCESS_LOCAL_WRITE
));
1171 MLX5_SET(mkc
, mkc
, lr
, 1);
1172 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1174 MLX5_SET64(mkc
, mkc
, start_addr
, virt_addr
);
1175 MLX5_SET64(mkc
, mkc
, len
, length
);
1176 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1177 MLX5_SET(mkc
, mkc
, bsf_octword_size
, 0);
1178 MLX5_SET(mkc
, mkc
, translations_octword_size
,
1179 get_octo_len(virt_addr
, length
, page_shift
));
1180 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
1181 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1183 MLX5_SET(create_mkey_in
, in
, translations_octword_actual_size
,
1184 get_octo_len(virt_addr
, length
, page_shift
));
1187 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, inlen
);
1189 mlx5_ib_warn(dev
, "create mkey failed\n");
1192 mr
->mmkey
.type
= MLX5_MKEY_MR
;
1193 mr
->desc_size
= sizeof(struct mlx5_mtt
);
1197 mlx5_ib_dbg(dev
, "mkey = 0x%x\n", mr
->mmkey
.key
);
1208 return ERR_PTR(err
);
1211 static void set_mr_fileds(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
,
1212 int npages
, u64 length
, int access_flags
)
1214 mr
->npages
= npages
;
1215 atomic_add(npages
, &dev
->mdev
->priv
.reg_pages
);
1216 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1217 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1218 mr
->ibmr
.length
= length
;
1219 mr
->access_flags
= access_flags
;
1222 static struct ib_mr
*mlx5_ib_get_memic_mr(struct ib_pd
*pd
, u64 memic_addr
,
1223 u64 length
, int acc
)
1225 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1226 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1227 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1228 struct mlx5_ib_mr
*mr
;
1233 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1235 return ERR_PTR(-ENOMEM
);
1237 in
= kzalloc(inlen
, GFP_KERNEL
);
1243 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1245 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_MEMIC
& 0x3);
1246 MLX5_SET(mkc
, mkc
, access_mode_4_2
,
1247 (MLX5_MKC_ACCESS_MODE_MEMIC
>> 2) & 0x7);
1248 MLX5_SET(mkc
, mkc
, a
, !!(acc
& IB_ACCESS_REMOTE_ATOMIC
));
1249 MLX5_SET(mkc
, mkc
, rw
, !!(acc
& IB_ACCESS_REMOTE_WRITE
));
1250 MLX5_SET(mkc
, mkc
, rr
, !!(acc
& IB_ACCESS_REMOTE_READ
));
1251 MLX5_SET(mkc
, mkc
, lw
, !!(acc
& IB_ACCESS_LOCAL_WRITE
));
1252 MLX5_SET(mkc
, mkc
, lr
, 1);
1254 MLX5_SET64(mkc
, mkc
, len
, length
);
1255 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1256 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1257 MLX5_SET64(mkc
, mkc
, start_addr
,
1258 memic_addr
- pci_resource_start(dev
->mdev
->pdev
, 0));
1260 err
= mlx5_core_create_mkey(mdev
, &mr
->mmkey
, in
, inlen
);
1267 set_mr_fileds(dev
, mr
, 0, length
, acc
);
1277 return ERR_PTR(err
);
1280 struct ib_mr
*mlx5_ib_reg_dm_mr(struct ib_pd
*pd
, struct ib_dm
*dm
,
1281 struct ib_dm_mr_attr
*attr
,
1282 struct uverbs_attr_bundle
*attrs
)
1284 struct mlx5_ib_dm
*mdm
= to_mdm(dm
);
1287 if (attr
->access_flags
& ~MLX5_IB_DM_ALLOWED_ACCESS
)
1288 return ERR_PTR(-EINVAL
);
1290 memic_addr
= mdm
->dev_addr
+ attr
->offset
;
1292 return mlx5_ib_get_memic_mr(pd
, memic_addr
, attr
->length
,
1293 attr
->access_flags
);
1296 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1297 u64 virt_addr
, int access_flags
,
1298 struct ib_udata
*udata
)
1300 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1301 struct mlx5_ib_mr
*mr
= NULL
;
1302 bool populate_mtts
= false;
1303 struct ib_umem
*umem
;
1310 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM
))
1311 return ERR_PTR(-EOPNOTSUPP
);
1313 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1314 start
, virt_addr
, length
, access_flags
);
1316 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1317 if (!start
&& length
== U64_MAX
) {
1318 if (!(access_flags
& IB_ACCESS_ON_DEMAND
) ||
1319 !(dev
->odp_caps
.general_caps
& IB_ODP_SUPPORT_IMPLICIT
))
1320 return ERR_PTR(-EINVAL
);
1322 mr
= mlx5_ib_alloc_implicit_mr(to_mpd(pd
), access_flags
);
1324 return ERR_CAST(mr
);
1329 err
= mr_umem_get(pd
, start
, length
, access_flags
, &umem
, &npages
,
1330 &page_shift
, &ncont
, &order
);
1333 return ERR_PTR(err
);
1335 if (use_umr(dev
, order
)) {
1336 mr
= alloc_mr_from_cache(pd
, umem
, virt_addr
, length
, ncont
,
1337 page_shift
, order
, access_flags
);
1338 if (PTR_ERR(mr
) == -EAGAIN
) {
1339 mlx5_ib_dbg(dev
, "cache empty for order %d\n", order
);
1342 populate_mtts
= false;
1343 } else if (!MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
)) {
1344 if (access_flags
& IB_ACCESS_ON_DEMAND
) {
1346 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1349 populate_mtts
= true;
1353 if (!umr_can_modify_entity_size(dev
))
1354 populate_mtts
= true;
1355 mutex_lock(&dev
->slow_path_mutex
);
1356 mr
= reg_create(NULL
, pd
, virt_addr
, length
, umem
, ncont
,
1357 page_shift
, access_flags
, populate_mtts
);
1358 mutex_unlock(&dev
->slow_path_mutex
);
1366 mlx5_ib_dbg(dev
, "mkey 0x%x\n", mr
->mmkey
.key
);
1369 set_mr_fileds(dev
, mr
, npages
, length
, access_flags
);
1371 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1375 if (!populate_mtts
) {
1376 int update_xlt_flags
= MLX5_IB_UPD_XLT_ENABLE
;
1378 if (access_flags
& IB_ACCESS_ON_DEMAND
)
1379 update_xlt_flags
|= MLX5_IB_UPD_XLT_ZAP
;
1381 err
= mlx5_ib_update_xlt(mr
, 0, ncont
, page_shift
,
1386 return ERR_PTR(err
);
1390 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1395 ib_umem_release(umem
);
1396 return ERR_PTR(err
);
1399 static int unreg_umr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1401 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1402 struct mlx5_umr_wr umrwr
= {};
1404 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
1407 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_DISABLE_MR
|
1408 MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1409 umrwr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1410 umrwr
.mkey
= mr
->mmkey
.key
;
1412 return mlx5_ib_post_send_wait(dev
, &umrwr
);
1415 static int rereg_umr(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1416 int access_flags
, int flags
)
1418 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1419 struct mlx5_umr_wr umrwr
= {};
1422 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1424 umrwr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1425 umrwr
.mkey
= mr
->mmkey
.key
;
1427 if (flags
& IB_MR_REREG_PD
|| flags
& IB_MR_REREG_ACCESS
) {
1429 umrwr
.access_flags
= access_flags
;
1430 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1433 err
= mlx5_ib_post_send_wait(dev
, &umrwr
);
1438 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1439 u64 length
, u64 virt_addr
, int new_access_flags
,
1440 struct ib_pd
*new_pd
, struct ib_udata
*udata
)
1442 struct mlx5_ib_dev
*dev
= to_mdev(ib_mr
->device
);
1443 struct mlx5_ib_mr
*mr
= to_mmr(ib_mr
);
1444 struct ib_pd
*pd
= (flags
& IB_MR_REREG_PD
) ? new_pd
: ib_mr
->pd
;
1445 int access_flags
= flags
& IB_MR_REREG_ACCESS
?
1456 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1457 start
, virt_addr
, length
, access_flags
);
1459 atomic_sub(mr
->npages
, &dev
->mdev
->priv
.reg_pages
);
1464 if (flags
& IB_MR_REREG_TRANS
) {
1468 addr
= mr
->umem
->address
;
1469 len
= mr
->umem
->length
;
1472 if (flags
!= IB_MR_REREG_PD
) {
1474 * Replace umem. This needs to be done whether or not UMR is
1477 flags
|= IB_MR_REREG_TRANS
;
1478 ib_umem_release(mr
->umem
);
1480 err
= mr_umem_get(pd
, addr
, len
, access_flags
, &mr
->umem
,
1481 &npages
, &page_shift
, &ncont
, &order
);
1486 if (flags
& IB_MR_REREG_TRANS
&& !use_umr_mtt_update(mr
, addr
, len
)) {
1488 * UMR can't be used - MKey needs to be replaced.
1490 if (mr
->allocated_from_cache
)
1491 err
= unreg_umr(dev
, mr
);
1493 err
= destroy_mkey(dev
, mr
);
1497 mr
= reg_create(ib_mr
, pd
, addr
, len
, mr
->umem
, ncont
,
1498 page_shift
, access_flags
, true);
1506 mr
->allocated_from_cache
= 0;
1507 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1515 mr
->access_flags
= access_flags
;
1516 mr
->mmkey
.iova
= addr
;
1517 mr
->mmkey
.size
= len
;
1518 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
1520 if (flags
& IB_MR_REREG_TRANS
) {
1521 upd_flags
= MLX5_IB_UPD_XLT_ADDR
;
1522 if (flags
& IB_MR_REREG_PD
)
1523 upd_flags
|= MLX5_IB_UPD_XLT_PD
;
1524 if (flags
& IB_MR_REREG_ACCESS
)
1525 upd_flags
|= MLX5_IB_UPD_XLT_ACCESS
;
1526 err
= mlx5_ib_update_xlt(mr
, 0, npages
, page_shift
,
1529 err
= rereg_umr(pd
, mr
, access_flags
, flags
);
1536 set_mr_fileds(dev
, mr
, npages
, len
, access_flags
);
1538 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1545 ib_umem_release(mr
->umem
);
1553 mlx5_alloc_priv_descs(struct ib_device
*device
,
1554 struct mlx5_ib_mr
*mr
,
1558 int size
= ndescs
* desc_size
;
1562 add_size
= max_t(int, MLX5_UMR_ALIGN
- ARCH_KMALLOC_MINALIGN
, 0);
1564 mr
->descs_alloc
= kzalloc(size
+ add_size
, GFP_KERNEL
);
1565 if (!mr
->descs_alloc
)
1568 mr
->descs
= PTR_ALIGN(mr
->descs_alloc
, MLX5_UMR_ALIGN
);
1570 mr
->desc_map
= dma_map_single(device
->dev
.parent
, mr
->descs
,
1571 size
, DMA_TO_DEVICE
);
1572 if (dma_mapping_error(device
->dev
.parent
, mr
->desc_map
)) {
1579 kfree(mr
->descs_alloc
);
1585 mlx5_free_priv_descs(struct mlx5_ib_mr
*mr
)
1588 struct ib_device
*device
= mr
->ibmr
.device
;
1589 int size
= mr
->max_descs
* mr
->desc_size
;
1591 dma_unmap_single(device
->dev
.parent
, mr
->desc_map
,
1592 size
, DMA_TO_DEVICE
);
1593 kfree(mr
->descs_alloc
);
1598 static void clean_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1600 int allocated_from_cache
= mr
->allocated_from_cache
;
1603 if (mlx5_core_destroy_psv(dev
->mdev
,
1604 mr
->sig
->psv_memory
.psv_idx
))
1605 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1606 mr
->sig
->psv_memory
.psv_idx
);
1607 if (mlx5_core_destroy_psv(dev
->mdev
,
1608 mr
->sig
->psv_wire
.psv_idx
))
1609 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1610 mr
->sig
->psv_wire
.psv_idx
);
1615 mlx5_free_priv_descs(mr
);
1617 if (!allocated_from_cache
)
1618 destroy_mkey(dev
, mr
);
1621 static void dereg_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1623 int npages
= mr
->npages
;
1624 struct ib_umem
*umem
= mr
->umem
;
1626 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1627 if (umem
&& umem
->odp_data
) {
1628 /* Prevent new page faults from succeeding */
1630 /* Wait for all running page-fault handlers to finish. */
1631 synchronize_srcu(&dev
->mr_srcu
);
1632 /* Destroy all page mappings */
1633 if (umem
->odp_data
->page_list
)
1634 mlx5_ib_invalidate_range(umem
, ib_umem_start(umem
),
1637 mlx5_ib_free_implicit_mr(mr
);
1639 * We kill the umem before the MR for ODP,
1640 * so that there will not be any invalidations in
1641 * flight, looking at the *mr struct.
1643 ib_umem_release(umem
);
1644 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1646 /* Avoid double-freeing the umem. */
1654 ib_umem_release(umem
);
1655 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1658 if (!mr
->allocated_from_cache
)
1661 mlx5_mr_cache_free(dev
, mr
);
1664 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
)
1666 dereg_mr(to_mdev(ibmr
->device
), to_mmr(ibmr
));
1670 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
,
1671 enum ib_mr_type mr_type
,
1674 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1675 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1676 int ndescs
= ALIGN(max_num_sg
, 4);
1677 struct mlx5_ib_mr
*mr
;
1682 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1684 return ERR_PTR(-ENOMEM
);
1686 in
= kzalloc(inlen
, GFP_KERNEL
);
1692 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1693 MLX5_SET(mkc
, mkc
, free
, 1);
1694 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
1695 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1696 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1698 if (mr_type
== IB_MR_TYPE_MEM_REG
) {
1699 mr
->access_mode
= MLX5_MKC_ACCESS_MODE_MTT
;
1700 MLX5_SET(mkc
, mkc
, log_page_size
, PAGE_SHIFT
);
1701 err
= mlx5_alloc_priv_descs(pd
->device
, mr
,
1702 ndescs
, sizeof(struct mlx5_mtt
));
1706 mr
->desc_size
= sizeof(struct mlx5_mtt
);
1707 mr
->max_descs
= ndescs
;
1708 } else if (mr_type
== IB_MR_TYPE_SG_GAPS
) {
1709 mr
->access_mode
= MLX5_MKC_ACCESS_MODE_KLMS
;
1711 err
= mlx5_alloc_priv_descs(pd
->device
, mr
,
1712 ndescs
, sizeof(struct mlx5_klm
));
1715 mr
->desc_size
= sizeof(struct mlx5_klm
);
1716 mr
->max_descs
= ndescs
;
1717 } else if (mr_type
== IB_MR_TYPE_SIGNATURE
) {
1720 MLX5_SET(mkc
, mkc
, bsf_en
, 1);
1721 MLX5_SET(mkc
, mkc
, bsf_octword_size
, MLX5_MKEY_BSF_OCTO_SIZE
);
1722 mr
->sig
= kzalloc(sizeof(*mr
->sig
), GFP_KERNEL
);
1728 /* create mem & wire PSVs */
1729 err
= mlx5_core_create_psv(dev
->mdev
, to_mpd(pd
)->pdn
,
1734 mr
->access_mode
= MLX5_MKC_ACCESS_MODE_KLMS
;
1735 mr
->sig
->psv_memory
.psv_idx
= psv_index
[0];
1736 mr
->sig
->psv_wire
.psv_idx
= psv_index
[1];
1738 mr
->sig
->sig_status_checked
= true;
1739 mr
->sig
->sig_err_exists
= false;
1740 /* Next UMR, Arm SIGERR */
1741 ++mr
->sig
->sigerr_count
;
1743 mlx5_ib_warn(dev
, "Invalid mr type %d\n", mr_type
);
1748 MLX5_SET(mkc
, mkc
, access_mode_1_0
, mr
->access_mode
& 0x3);
1749 MLX5_SET(mkc
, mkc
, access_mode_4_2
, (mr
->access_mode
>> 2) & 0x7);
1750 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1752 mr
->ibmr
.device
= pd
->device
;
1753 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, inlen
);
1755 goto err_destroy_psv
;
1757 mr
->mmkey
.type
= MLX5_MKEY_MR
;
1758 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1759 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1767 if (mlx5_core_destroy_psv(dev
->mdev
,
1768 mr
->sig
->psv_memory
.psv_idx
))
1769 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1770 mr
->sig
->psv_memory
.psv_idx
);
1771 if (mlx5_core_destroy_psv(dev
->mdev
,
1772 mr
->sig
->psv_wire
.psv_idx
))
1773 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1774 mr
->sig
->psv_wire
.psv_idx
);
1776 mlx5_free_priv_descs(mr
);
1783 return ERR_PTR(err
);
1786 struct ib_mw
*mlx5_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
1787 struct ib_udata
*udata
)
1789 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1790 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1791 struct mlx5_ib_mw
*mw
= NULL
;
1796 struct mlx5_ib_alloc_mw req
= {};
1799 __u32 response_length
;
1802 err
= ib_copy_from_udata(&req
, udata
, min(udata
->inlen
, sizeof(req
)));
1804 return ERR_PTR(err
);
1806 if (req
.comp_mask
|| req
.reserved1
|| req
.reserved2
)
1807 return ERR_PTR(-EOPNOTSUPP
);
1809 if (udata
->inlen
> sizeof(req
) &&
1810 !ib_is_udata_cleared(udata
, sizeof(req
),
1811 udata
->inlen
- sizeof(req
)))
1812 return ERR_PTR(-EOPNOTSUPP
);
1814 ndescs
= req
.num_klms
? roundup(req
.num_klms
, 4) : roundup(1, 4);
1816 mw
= kzalloc(sizeof(*mw
), GFP_KERNEL
);
1817 in
= kzalloc(inlen
, GFP_KERNEL
);
1823 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1825 MLX5_SET(mkc
, mkc
, free
, 1);
1826 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
1827 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1828 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1829 MLX5_SET(mkc
, mkc
, lr
, 1);
1830 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_KLMS
);
1831 MLX5_SET(mkc
, mkc
, en_rinval
, !!((type
== IB_MW_TYPE_2
)));
1832 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1834 err
= mlx5_core_create_mkey(dev
->mdev
, &mw
->mmkey
, in
, inlen
);
1838 mw
->mmkey
.type
= MLX5_MKEY_MW
;
1839 mw
->ibmw
.rkey
= mw
->mmkey
.key
;
1840 mw
->ndescs
= ndescs
;
1842 resp
.response_length
= min(offsetof(typeof(resp
), response_length
) +
1843 sizeof(resp
.response_length
), udata
->outlen
);
1844 if (resp
.response_length
) {
1845 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
1847 mlx5_core_destroy_mkey(dev
->mdev
, &mw
->mmkey
);
1858 return ERR_PTR(err
);
1861 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
)
1863 struct mlx5_ib_mw
*mmw
= to_mmw(mw
);
1866 err
= mlx5_core_destroy_mkey((to_mdev(mw
->device
))->mdev
,
1873 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
1874 struct ib_mr_status
*mr_status
)
1876 struct mlx5_ib_mr
*mmr
= to_mmr(ibmr
);
1879 if (check_mask
& ~IB_MR_CHECK_SIG_STATUS
) {
1880 pr_err("Invalid status check mask\n");
1885 mr_status
->fail_status
= 0;
1886 if (check_mask
& IB_MR_CHECK_SIG_STATUS
) {
1889 pr_err("signature status check requested on a non-signature enabled MR\n");
1893 mmr
->sig
->sig_status_checked
= true;
1894 if (!mmr
->sig
->sig_err_exists
)
1897 if (ibmr
->lkey
== mmr
->sig
->err_item
.key
)
1898 memcpy(&mr_status
->sig_err
, &mmr
->sig
->err_item
,
1899 sizeof(mr_status
->sig_err
));
1901 mr_status
->sig_err
.err_type
= IB_SIG_BAD_GUARD
;
1902 mr_status
->sig_err
.sig_err_offset
= 0;
1903 mr_status
->sig_err
.key
= mmr
->sig
->err_item
.key
;
1906 mmr
->sig
->sig_err_exists
= false;
1907 mr_status
->fail_status
|= IB_MR_CHECK_SIG_STATUS
;
1915 mlx5_ib_sg_to_klms(struct mlx5_ib_mr
*mr
,
1916 struct scatterlist
*sgl
,
1917 unsigned short sg_nents
,
1918 unsigned int *sg_offset_p
)
1920 struct scatterlist
*sg
= sgl
;
1921 struct mlx5_klm
*klms
= mr
->descs
;
1922 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
1923 u32 lkey
= mr
->ibmr
.pd
->local_dma_lkey
;
1926 mr
->ibmr
.iova
= sg_dma_address(sg
) + sg_offset
;
1927 mr
->ibmr
.length
= 0;
1929 for_each_sg(sgl
, sg
, sg_nents
, i
) {
1930 if (unlikely(i
>= mr
->max_descs
))
1932 klms
[i
].va
= cpu_to_be64(sg_dma_address(sg
) + sg_offset
);
1933 klms
[i
].bcount
= cpu_to_be32(sg_dma_len(sg
) - sg_offset
);
1934 klms
[i
].key
= cpu_to_be32(lkey
);
1935 mr
->ibmr
.length
+= sg_dma_len(sg
) - sg_offset
;
1942 *sg_offset_p
= sg_offset
;
1947 static int mlx5_set_page(struct ib_mr
*ibmr
, u64 addr
)
1949 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1952 if (unlikely(mr
->ndescs
== mr
->max_descs
))
1956 descs
[mr
->ndescs
++] = cpu_to_be64(addr
| MLX5_EN_RD
| MLX5_EN_WR
);
1961 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1962 unsigned int *sg_offset
)
1964 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1969 ib_dma_sync_single_for_cpu(ibmr
->device
, mr
->desc_map
,
1970 mr
->desc_size
* mr
->max_descs
,
1973 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
1974 n
= mlx5_ib_sg_to_klms(mr
, sg
, sg_nents
, sg_offset
);
1976 n
= ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
,
1979 ib_dma_sync_single_for_device(ibmr
->device
, mr
->desc_map
,
1980 mr
->desc_size
* mr
->max_descs
,