2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
31 #if defined(CONFIG_MMU_NOTIFIER)
32 #include <linux/interval_tree.h>
34 struct i915_mmu_notifier
{
36 struct hlist_node node
;
37 struct mmu_notifier mn
;
38 struct rb_root objects
;
39 struct drm_device
*dev
;
41 struct work_struct work
;
46 struct i915_mmu_object
{
47 struct i915_mmu_notifier
*mmu
;
48 struct interval_tree_node it
;
49 struct drm_i915_gem_object
*obj
;
52 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier
*_mn
,
57 struct i915_mmu_notifier
*mn
= container_of(_mn
, struct i915_mmu_notifier
, mn
);
58 struct interval_tree_node
*it
= NULL
;
59 unsigned long serial
= 0;
61 end
--; /* interval ranges are inclusive, but invalidate range is exclusive */
63 struct drm_i915_gem_object
*obj
;
67 if (serial
== mn
->serial
)
68 it
= interval_tree_iter_next(it
, start
, end
);
70 it
= interval_tree_iter_first(&mn
->objects
, start
, end
);
72 obj
= container_of(it
, struct i915_mmu_object
, it
)->obj
;
73 drm_gem_object_reference(&obj
->base
);
76 spin_unlock(&mn
->lock
);
80 mutex_lock(&mn
->dev
->struct_mutex
);
81 /* Cancel any active worker and force us to re-evaluate gup */
82 obj
->userptr
.work
= NULL
;
84 if (obj
->pages
!= NULL
) {
85 struct drm_i915_private
*dev_priv
= to_i915(mn
->dev
);
86 struct i915_vma
*vma
, *tmp
;
87 bool was_interruptible
;
89 was_interruptible
= dev_priv
->mm
.interruptible
;
90 dev_priv
->mm
.interruptible
= false;
92 list_for_each_entry_safe(vma
, tmp
, &obj
->vma_list
, vma_link
) {
93 int ret
= i915_vma_unbind(vma
);
94 WARN_ON(ret
&& ret
!= -EIO
);
96 WARN_ON(i915_gem_object_put_pages(obj
));
98 dev_priv
->mm
.interruptible
= was_interruptible
;
101 start
= obj
->userptr
.ptr
+ obj
->base
.size
;
103 drm_gem_object_unreference(&obj
->base
);
104 mutex_unlock(&mn
->dev
->struct_mutex
);
108 static const struct mmu_notifier_ops i915_gem_userptr_notifier
= {
109 .invalidate_range_start
= i915_gem_userptr_mn_invalidate_range_start
,
112 static struct i915_mmu_notifier
*
113 __i915_mmu_notifier_lookup(struct drm_device
*dev
, struct mm_struct
*mm
)
115 struct drm_i915_private
*dev_priv
= to_i915(dev
);
116 struct i915_mmu_notifier
*mmu
;
118 /* Protected by dev->struct_mutex */
119 hash_for_each_possible(dev_priv
->mmu_notifiers
, mmu
, node
, (unsigned long)mm
)
126 static struct i915_mmu_notifier
*
127 i915_mmu_notifier_get(struct drm_device
*dev
, struct mm_struct
*mm
)
129 struct drm_i915_private
*dev_priv
= to_i915(dev
);
130 struct i915_mmu_notifier
*mmu
;
133 lockdep_assert_held(&dev
->struct_mutex
);
135 mmu
= __i915_mmu_notifier_lookup(dev
, mm
);
139 mmu
= kmalloc(sizeof(*mmu
), GFP_KERNEL
);
141 return ERR_PTR(-ENOMEM
);
143 spin_lock_init(&mmu
->lock
);
145 mmu
->mn
.ops
= &i915_gem_userptr_notifier
;
147 mmu
->objects
= RB_ROOT
;
151 /* Protected by mmap_sem (write-lock) */
152 ret
= __mmu_notifier_register(&mmu
->mn
, mm
);
158 /* Protected by dev->struct_mutex */
159 hash_add(dev_priv
->mmu_notifiers
, &mmu
->node
, (unsigned long)mm
);
164 __i915_mmu_notifier_destroy_worker(struct work_struct
*work
)
166 struct i915_mmu_notifier
*mmu
= container_of(work
, typeof(*mmu
), work
);
167 mmu_notifier_unregister(&mmu
->mn
, mmu
->mm
);
172 __i915_mmu_notifier_destroy(struct i915_mmu_notifier
*mmu
)
174 lockdep_assert_held(&mmu
->dev
->struct_mutex
);
176 /* Protected by dev->struct_mutex */
177 hash_del(&mmu
->node
);
179 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
180 * We enter the function holding struct_mutex, therefore we need
181 * to drop our mutex prior to calling mmu_notifier_unregister in
182 * order to prevent lock inversion (and system-wide deadlock)
183 * between the mmap_sem and struct-mutex. Hence we defer the
184 * unregistration to a workqueue where we hold no locks.
186 INIT_WORK(&mmu
->work
, __i915_mmu_notifier_destroy_worker
);
187 schedule_work(&mmu
->work
);
190 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier
*mmu
)
192 if (++mmu
->serial
== 0)
197 i915_mmu_notifier_del(struct i915_mmu_notifier
*mmu
,
198 struct i915_mmu_object
*mn
)
200 lockdep_assert_held(&mmu
->dev
->struct_mutex
);
202 spin_lock(&mmu
->lock
);
203 interval_tree_remove(&mn
->it
, &mmu
->objects
);
204 __i915_mmu_notifier_update_serial(mmu
);
205 spin_unlock(&mmu
->lock
);
207 /* Protected against _add() by dev->struct_mutex */
208 if (--mmu
->count
== 0)
209 __i915_mmu_notifier_destroy(mmu
);
213 i915_mmu_notifier_add(struct i915_mmu_notifier
*mmu
,
214 struct i915_mmu_object
*mn
)
216 struct interval_tree_node
*it
;
219 ret
= i915_mutex_lock_interruptible(mmu
->dev
);
223 /* Make sure we drop the final active reference (and thereby
224 * remove the objects from the interval tree) before we do
225 * the check for overlapping objects.
227 i915_gem_retire_requests(mmu
->dev
);
229 /* Disallow overlapping userptr objects */
230 spin_lock(&mmu
->lock
);
231 it
= interval_tree_iter_first(&mmu
->objects
,
232 mn
->it
.start
, mn
->it
.last
);
234 struct drm_i915_gem_object
*obj
;
236 /* We only need to check the first object in the range as it
237 * either has cancelled gup work queued and we need to
238 * return back to the user to give time for the gup-workers
239 * to flush their object references upon which the object will
240 * be removed from the interval-tree, or the the range is
241 * still in use by another client and the overlap is invalid.
244 obj
= container_of(it
, struct i915_mmu_object
, it
)->obj
;
245 ret
= obj
->userptr
.workers
? -EAGAIN
: -EINVAL
;
247 interval_tree_insert(&mn
->it
, &mmu
->objects
);
248 __i915_mmu_notifier_update_serial(mmu
);
251 spin_unlock(&mmu
->lock
);
252 mutex_unlock(&mmu
->dev
->struct_mutex
);
258 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
260 struct i915_mmu_object
*mn
;
262 mn
= obj
->userptr
.mn
;
266 i915_mmu_notifier_del(mn
->mmu
, mn
);
267 obj
->userptr
.mn
= NULL
;
271 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
274 struct i915_mmu_notifier
*mmu
;
275 struct i915_mmu_object
*mn
;
278 if (flags
& I915_USERPTR_UNSYNCHRONIZED
)
279 return capable(CAP_SYS_ADMIN
) ? 0 : -EPERM
;
281 down_write(&obj
->userptr
.mm
->mmap_sem
);
282 ret
= i915_mutex_lock_interruptible(obj
->base
.dev
);
284 mmu
= i915_mmu_notifier_get(obj
->base
.dev
, obj
->userptr
.mm
);
286 mmu
->count
++; /* preemptive add to act as a refcount */
289 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
291 up_write(&obj
->userptr
.mm
->mmap_sem
);
295 mn
= kzalloc(sizeof(*mn
), GFP_KERNEL
);
302 mn
->it
.start
= obj
->userptr
.ptr
;
303 mn
->it
.last
= mn
->it
.start
+ obj
->base
.size
- 1;
306 ret
= i915_mmu_notifier_add(mmu
, mn
);
310 obj
->userptr
.mn
= mn
;
316 mutex_lock(&obj
->base
.dev
->struct_mutex
);
317 if (--mmu
->count
== 0)
318 __i915_mmu_notifier_destroy(mmu
);
319 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
327 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
332 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
335 if ((flags
& I915_USERPTR_UNSYNCHRONIZED
) == 0)
344 struct get_pages_work
{
345 struct work_struct work
;
346 struct drm_i915_gem_object
*obj
;
347 struct task_struct
*task
;
351 #if IS_ENABLED(CONFIG_SWIOTLB)
352 #define swiotlb_active() swiotlb_nr_tbl()
354 #define swiotlb_active() 0
359 st_set_pages(struct sg_table
**st
, struct vm_page
**pvec
, int num_pages
)
361 struct scatterlist
*sg
;
364 *st
= kmalloc(sizeof(**st
), M_DRM
, M_WAITOK
);
368 if (swiotlb_active()) {
369 ret
= sg_alloc_table(*st
, num_pages
, GFP_KERNEL
);
373 for_each_sg((*st
)->sgl
, sg
, num_pages
, n
)
374 sg_set_page(sg
, pvec
[n
], PAGE_SIZE
, 0);
376 ret
= sg_alloc_table_from_pages(*st
, pvec
, num_pages
,
377 0, num_pages
<< PAGE_SHIFT
,
392 __i915_gem_userptr_get_pages_worker(struct work_struct
*_work
)
394 struct get_pages_work
*work
= container_of(_work
, typeof(*work
), work
);
395 struct drm_i915_gem_object
*obj
= work
->obj
;
396 struct drm_device
*dev
= obj
->base
.dev
;
397 const int num_pages
= obj
->base
.size
>> PAGE_SHIFT
;
404 pvec
= kmalloc(num_pages
*sizeof(struct page
*),
405 GFP_TEMPORARY
| __GFP_NOWARN
| __GFP_NORETRY
);
407 pvec
= drm_malloc_ab(num_pages
, sizeof(struct page
*));
409 struct mm_struct
*mm
= obj
->userptr
.mm
;
411 down_read(&mm
->mmap_sem
);
412 while (pinned
< num_pages
) {
413 ret
= get_user_pages(work
->task
, mm
,
414 obj
->userptr
.ptr
+ pinned
* PAGE_SIZE
,
416 !obj
->userptr
.read_only
, 0,
417 pvec
+ pinned
, NULL
);
423 up_read(&mm
->mmap_sem
);
426 mutex_lock(&dev
->struct_mutex
);
427 if (obj
->userptr
.work
!= &work
->work
) {
429 } else if (pinned
== num_pages
) {
430 ret
= st_set_pages(&obj
->pages
, pvec
, num_pages
);
432 list_add_tail(&obj
->global_list
, &to_i915(dev
)->mm
.unbound_list
);
437 obj
->userptr
.work
= ERR_PTR(ret
);
438 obj
->userptr
.workers
--;
439 drm_gem_object_unreference(&obj
->base
);
440 mutex_unlock(&dev
->struct_mutex
);
442 release_pages(pvec
, pinned
, 0);
443 drm_free_large(pvec
);
445 put_task_struct(work
->task
);
450 i915_gem_userptr_get_pages(struct drm_i915_gem_object
*obj
)
452 const int num_pages
= obj
->base
.size
>> PAGE_SHIFT
;
456 /* If userspace should engineer that these pages are replaced in
457 * the vma between us binding this page into the GTT and completion
458 * of rendering... Their loss. If they change the mapping of their
459 * pages they need to create a new bo to point to the new vma.
461 * However, that still leaves open the possibility of the vma
462 * being copied upon fork. Which falls under the same userspace
463 * synchronisation issue as a regular bo, except that this time
464 * the process may not be expecting that a particular piece of
465 * memory is tied to the GPU.
467 * Fortunately, we can hook into the mmu_notifier in order to
468 * discard the page references prior to anything nasty happening
469 * to the vma (discard or cloning) which should prevent the more
470 * egregious cases from causing harm.
475 if (obj
->userptr
.mm
== current
->mm
) {
476 pvec
= kmalloc(num_pages
*sizeof(struct page
*),
477 GFP_TEMPORARY
| __GFP_NOWARN
| __GFP_NORETRY
);
479 pvec
= drm_malloc_ab(num_pages
, sizeof(struct page
*));
484 pinned
= __get_user_pages_fast(obj
->userptr
.ptr
, num_pages
,
485 !obj
->userptr
.read_only
, pvec
);
487 if (pinned
< num_pages
) {
492 /* Spawn a worker so that we can acquire the
493 * user pages without holding our mutex. Access
494 * to the user pages requires mmap_sem, and we have
495 * a strict lock ordering of mmap_sem, struct_mutex -
496 * we already hold struct_mutex here and so cannot
497 * call gup without encountering a lock inversion.
499 * Userspace will keep on repeating the operation
500 * (thanks to EAGAIN) until either we hit the fast
501 * path or the worker completes. If the worker is
502 * cancelled or superseded, the task is still run
503 * but the results ignored. (This leads to
504 * complications that we may have a stray object
505 * refcount that we need to be wary of when
506 * checking for existing objects during creation.)
507 * If the worker encounters an error, it reports
508 * that error back to this function through
509 * obj->userptr.work = ERR_PTR.
512 if (obj
->userptr
.work
== NULL
&&
513 obj
->userptr
.workers
< I915_GEM_USERPTR_MAX_WORKERS
) {
514 struct get_pages_work
*work
;
516 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
518 obj
->userptr
.work
= &work
->work
;
519 obj
->userptr
.workers
++;
522 drm_gem_object_reference(&obj
->base
);
524 work
->task
= current
;
525 get_task_struct(work
->task
);
527 INIT_WORK(&work
->work
, __i915_gem_userptr_get_pages_worker
);
528 schedule_work(&work
->work
);
532 if (IS_ERR(obj
->userptr
.work
)) {
533 ret
= PTR_ERR(obj
->userptr
.work
);
534 obj
->userptr
.work
= NULL
;
539 ret
= st_set_pages(&obj
->pages
, pvec
, num_pages
);
541 obj
->userptr
.work
= NULL
;
546 release_pages(pvec
, pinned
, 0);
547 drm_free_large(pvec
);
552 i915_gem_userptr_put_pages(struct drm_i915_gem_object
*obj
)
554 struct scatterlist
*sg
;
557 BUG_ON(obj
->userptr
.work
!= NULL
);
559 if (obj
->madv
!= I915_MADV_WILLNEED
)
562 for_each_sg(obj
->pages
->sgl
, sg
, obj
->pages
->nents
, i
) {
563 struct page
*page
= sg_page(sg
);
566 set_page_dirty(page
);
568 mark_page_accessed(page
);
569 page_cache_release(page
);
573 sg_free_table(obj
->pages
);
578 i915_gem_userptr_release(struct drm_i915_gem_object
*obj
)
580 i915_gem_userptr_release__mmu_notifier(obj
);
582 if (obj
->userptr
.mm
) {
583 mmput(obj
->userptr
.mm
);
584 obj
->userptr
.mm
= NULL
;
589 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object
*obj
)
594 return i915_gem_userptr_init__mmu_notifier(obj
, 0);
597 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops
= {
598 .dmabuf_export
= i915_gem_userptr_dmabuf_export
,
599 .get_pages
= i915_gem_userptr_get_pages
,
600 .put_pages
= i915_gem_userptr_put_pages
,
601 .release
= i915_gem_userptr_release
,
605 * Creates a new mm object that wraps some normal memory from the process
606 * context - user memory.
608 * We impose several restrictions upon the memory being mapped
610 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
611 * 2. It cannot overlap any other userptr object in the same address space.
612 * 3. It must be normal system memory, not a pointer into another map of IO
613 * space (e.g. it must not be a GTT mmapping of another object).
614 * 4. We only allow a bo as large as we could in theory map into the GTT,
615 * that is we limit the size to the total size of the GTT.
616 * 5. The bo is marked as being snoopable. The backing pages are left
617 * accessible directly by the CPU, but reads and writes by the GPU may
618 * incur the cost of a snoop (unless you have an LLC architecture).
620 * Synchronisation between multiple users and the GPU is left to userspace
621 * through the normal set-domain-ioctl. The kernel will enforce that the
622 * GPU relinquishes the VMA before it is returned back to the system
623 * i.e. upon free(), munmap() or process termination. However, the userspace
624 * malloc() library may not immediately relinquish the VMA after free() and
625 * instead reuse it whilst the GPU is still reading and writing to the VMA.
628 * Also note, that the object created here is not currently a "first class"
629 * object, in that several ioctls are banned. These are the CPU access
630 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
631 * direct access via your pointer rather than use those ioctls.
633 * If you think this is a good interface to use to pass GPU memory between
634 * drivers, please use dma-buf instead. In fact, wherever possible use
638 i915_gem_userptr_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
640 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
641 struct drm_i915_gem_userptr
*args
= data
;
642 struct drm_i915_gem_object
*obj
;
646 if (args
->flags
& ~(I915_USERPTR_READ_ONLY
|
647 I915_USERPTR_UNSYNCHRONIZED
))
650 if (offset_in_page(args
->user_ptr
| args
->user_size
))
653 if (args
->user_size
> dev_priv
->gtt
.base
.total
)
656 if (!access_ok(args
->flags
& I915_USERPTR_READ_ONLY
? VERIFY_READ
: VERIFY_WRITE
,
657 (char __user
*)(unsigned long)args
->user_ptr
, args
->user_size
))
660 if (args
->flags
& I915_USERPTR_READ_ONLY
) {
661 /* On almost all of the current hw, we cannot tell the GPU that a
662 * page is readonly, so this is just a placeholder in the uAPI.
667 /* Allocate the new object */
668 obj
= i915_gem_object_alloc(dev
);
672 drm_gem_private_object_init(dev
, &obj
->base
, args
->user_size
);
673 i915_gem_object_init(obj
, &i915_gem_userptr_ops
);
674 obj
->cache_level
= I915_CACHE_LLC
;
675 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
676 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
678 obj
->userptr
.ptr
= args
->user_ptr
;
679 obj
->userptr
.read_only
= !!(args
->flags
& I915_USERPTR_READ_ONLY
);
681 /* And keep a pointer to the current->mm for resolving the user pages
682 * at binding. This means that we need to hook into the mmu_notifier
683 * in order to detect if the mmu is destroyed.
686 if ((obj
->userptr
.mm
= get_task_mm(current
)))
687 ret
= i915_gem_userptr_init__mmu_notifier(obj
, args
->flags
);
689 ret
= drm_gem_handle_create(file
, &obj
->base
, &handle
);
691 /* drop reference from allocate - handle holds it now */
692 drm_gem_object_unreference_unlocked(&obj
->base
);
696 args
->handle
= handle
;
702 i915_gem_init_userptr(struct drm_device
*dev
)
704 #if defined(CONFIG_MMU_NOTIFIER)
705 struct drm_i915_private
*dev_priv
= to_i915(dev
);
706 hash_init(dev_priv
->mmu_notifiers
);