drm/i915: Update to Linux 3.16
[dragonfly.git] / sys / dev / drm / i915 / i915_gem_userptr.c
blobdeb6c83671654fafc1cd1cf71f1383ae2d1bf169
1 /*
2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include <drm/drmP.h>
26 #include <drm/i915_drm.h>
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "intel_drv.h"
31 #if defined(CONFIG_MMU_NOTIFIER)
32 #include <linux/interval_tree.h>
34 struct i915_mmu_notifier {
35 spinlock_t lock;
36 struct hlist_node node;
37 struct mmu_notifier mn;
38 struct rb_root objects;
39 struct drm_device *dev;
40 struct mm_struct *mm;
41 struct work_struct work;
42 unsigned long count;
43 unsigned long serial;
46 struct i915_mmu_object {
47 struct i915_mmu_notifier *mmu;
48 struct interval_tree_node it;
49 struct drm_i915_gem_object *obj;
52 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
53 struct mm_struct *mm,
54 unsigned long start,
55 unsigned long end)
57 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
58 struct interval_tree_node *it = NULL;
59 unsigned long serial = 0;
61 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
62 while (start < end) {
63 struct drm_i915_gem_object *obj;
65 obj = NULL;
66 spin_lock(&mn->lock);
67 if (serial == mn->serial)
68 it = interval_tree_iter_next(it, start, end);
69 else
70 it = interval_tree_iter_first(&mn->objects, start, end);
71 if (it != NULL) {
72 obj = container_of(it, struct i915_mmu_object, it)->obj;
73 drm_gem_object_reference(&obj->base);
74 serial = mn->serial;
76 spin_unlock(&mn->lock);
77 if (obj == NULL)
78 return;
80 mutex_lock(&mn->dev->struct_mutex);
81 /* Cancel any active worker and force us to re-evaluate gup */
82 obj->userptr.work = NULL;
84 if (obj->pages != NULL) {
85 struct drm_i915_private *dev_priv = to_i915(mn->dev);
86 struct i915_vma *vma, *tmp;
87 bool was_interruptible;
89 was_interruptible = dev_priv->mm.interruptible;
90 dev_priv->mm.interruptible = false;
92 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
93 int ret = i915_vma_unbind(vma);
94 WARN_ON(ret && ret != -EIO);
96 WARN_ON(i915_gem_object_put_pages(obj));
98 dev_priv->mm.interruptible = was_interruptible;
101 start = obj->userptr.ptr + obj->base.size;
103 drm_gem_object_unreference(&obj->base);
104 mutex_unlock(&mn->dev->struct_mutex);
108 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
109 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
112 static struct i915_mmu_notifier *
113 __i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
115 struct drm_i915_private *dev_priv = to_i915(dev);
116 struct i915_mmu_notifier *mmu;
118 /* Protected by dev->struct_mutex */
119 hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
120 if (mmu->mm == mm)
121 return mmu;
123 return NULL;
126 static struct i915_mmu_notifier *
127 i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
129 struct drm_i915_private *dev_priv = to_i915(dev);
130 struct i915_mmu_notifier *mmu;
131 int ret;
133 lockdep_assert_held(&dev->struct_mutex);
135 mmu = __i915_mmu_notifier_lookup(dev, mm);
136 if (mmu)
137 return mmu;
139 mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
140 if (mmu == NULL)
141 return ERR_PTR(-ENOMEM);
143 spin_lock_init(&mmu->lock);
144 mmu->dev = dev;
145 mmu->mn.ops = &i915_gem_userptr_notifier;
146 mmu->mm = mm;
147 mmu->objects = RB_ROOT;
148 mmu->count = 0;
149 mmu->serial = 0;
151 /* Protected by mmap_sem (write-lock) */
152 ret = __mmu_notifier_register(&mmu->mn, mm);
153 if (ret) {
154 kfree(mmu);
155 return ERR_PTR(ret);
158 /* Protected by dev->struct_mutex */
159 hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
160 return mmu;
163 static void
164 __i915_mmu_notifier_destroy_worker(struct work_struct *work)
166 struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
167 mmu_notifier_unregister(&mmu->mn, mmu->mm);
168 kfree(mmu);
171 static void
172 __i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
174 lockdep_assert_held(&mmu->dev->struct_mutex);
176 /* Protected by dev->struct_mutex */
177 hash_del(&mmu->node);
179 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
180 * We enter the function holding struct_mutex, therefore we need
181 * to drop our mutex prior to calling mmu_notifier_unregister in
182 * order to prevent lock inversion (and system-wide deadlock)
183 * between the mmap_sem and struct-mutex. Hence we defer the
184 * unregistration to a workqueue where we hold no locks.
186 INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
187 schedule_work(&mmu->work);
190 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
192 if (++mmu->serial == 0)
193 mmu->serial = 1;
196 static void
197 i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
198 struct i915_mmu_object *mn)
200 lockdep_assert_held(&mmu->dev->struct_mutex);
202 spin_lock(&mmu->lock);
203 interval_tree_remove(&mn->it, &mmu->objects);
204 __i915_mmu_notifier_update_serial(mmu);
205 spin_unlock(&mmu->lock);
207 /* Protected against _add() by dev->struct_mutex */
208 if (--mmu->count == 0)
209 __i915_mmu_notifier_destroy(mmu);
212 static int
213 i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
214 struct i915_mmu_object *mn)
216 struct interval_tree_node *it;
217 int ret;
219 ret = i915_mutex_lock_interruptible(mmu->dev);
220 if (ret)
221 return ret;
223 /* Make sure we drop the final active reference (and thereby
224 * remove the objects from the interval tree) before we do
225 * the check for overlapping objects.
227 i915_gem_retire_requests(mmu->dev);
229 /* Disallow overlapping userptr objects */
230 spin_lock(&mmu->lock);
231 it = interval_tree_iter_first(&mmu->objects,
232 mn->it.start, mn->it.last);
233 if (it) {
234 struct drm_i915_gem_object *obj;
236 /* We only need to check the first object in the range as it
237 * either has cancelled gup work queued and we need to
238 * return back to the user to give time for the gup-workers
239 * to flush their object references upon which the object will
240 * be removed from the interval-tree, or the the range is
241 * still in use by another client and the overlap is invalid.
244 obj = container_of(it, struct i915_mmu_object, it)->obj;
245 ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
246 } else {
247 interval_tree_insert(&mn->it, &mmu->objects);
248 __i915_mmu_notifier_update_serial(mmu);
249 ret = 0;
251 spin_unlock(&mmu->lock);
252 mutex_unlock(&mmu->dev->struct_mutex);
254 return ret;
257 static void
258 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
260 struct i915_mmu_object *mn;
262 mn = obj->userptr.mn;
263 if (mn == NULL)
264 return;
266 i915_mmu_notifier_del(mn->mmu, mn);
267 obj->userptr.mn = NULL;
270 static int
271 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
272 unsigned flags)
274 struct i915_mmu_notifier *mmu;
275 struct i915_mmu_object *mn;
276 int ret;
278 if (flags & I915_USERPTR_UNSYNCHRONIZED)
279 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
281 down_write(&obj->userptr.mm->mmap_sem);
282 ret = i915_mutex_lock_interruptible(obj->base.dev);
283 if (ret == 0) {
284 mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
285 if (!IS_ERR(mmu))
286 mmu->count++; /* preemptive add to act as a refcount */
287 else
288 ret = PTR_ERR(mmu);
289 mutex_unlock(&obj->base.dev->struct_mutex);
291 up_write(&obj->userptr.mm->mmap_sem);
292 if (ret)
293 return ret;
295 mn = kzalloc(sizeof(*mn), GFP_KERNEL);
296 if (mn == NULL) {
297 ret = -ENOMEM;
298 goto destroy_mmu;
301 mn->mmu = mmu;
302 mn->it.start = obj->userptr.ptr;
303 mn->it.last = mn->it.start + obj->base.size - 1;
304 mn->obj = obj;
306 ret = i915_mmu_notifier_add(mmu, mn);
307 if (ret)
308 goto free_mn;
310 obj->userptr.mn = mn;
311 return 0;
313 free_mn:
314 kfree(mn);
315 destroy_mmu:
316 mutex_lock(&obj->base.dev->struct_mutex);
317 if (--mmu->count == 0)
318 __i915_mmu_notifier_destroy(mmu);
319 mutex_unlock(&obj->base.dev->struct_mutex);
320 return ret;
323 #else
325 #if 0
326 static void
327 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
331 static int
332 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
333 unsigned flags)
335 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
336 return -ENODEV;
339 return 0;
341 #endif
342 #endif
344 struct get_pages_work {
345 struct work_struct work;
346 struct drm_i915_gem_object *obj;
347 struct task_struct *task;
351 #if IS_ENABLED(CONFIG_SWIOTLB)
352 #define swiotlb_active() swiotlb_nr_tbl()
353 #else
354 #define swiotlb_active() 0
355 #endif
357 #if 0
358 static int
359 st_set_pages(struct sg_table **st, struct vm_page **pvec, int num_pages)
361 struct scatterlist *sg;
362 int ret, n;
364 *st = kmalloc(sizeof(**st), M_DRM, M_WAITOK);
365 if (*st == NULL)
366 return -ENOMEM;
368 if (swiotlb_active()) {
369 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
370 if (ret)
371 goto err;
373 for_each_sg((*st)->sgl, sg, num_pages, n)
374 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
375 } else {
376 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
377 0, num_pages << PAGE_SHIFT,
378 GFP_KERNEL);
379 if (ret)
380 goto err;
383 return 0;
385 err:
386 kfree(*st);
387 *st = NULL;
388 return ret;
391 static void
392 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
394 struct get_pages_work *work = container_of(_work, typeof(*work), work);
395 struct drm_i915_gem_object *obj = work->obj;
396 struct drm_device *dev = obj->base.dev;
397 const int num_pages = obj->base.size >> PAGE_SHIFT;
398 struct page **pvec;
399 int pinned, ret;
401 ret = -ENOMEM;
402 pinned = 0;
404 pvec = kmalloc(num_pages*sizeof(struct page *),
405 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
406 if (pvec == NULL)
407 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
408 if (pvec != NULL) {
409 struct mm_struct *mm = obj->userptr.mm;
411 down_read(&mm->mmap_sem);
412 while (pinned < num_pages) {
413 ret = get_user_pages(work->task, mm,
414 obj->userptr.ptr + pinned * PAGE_SIZE,
415 num_pages - pinned,
416 !obj->userptr.read_only, 0,
417 pvec + pinned, NULL);
418 if (ret < 0)
419 break;
421 pinned += ret;
423 up_read(&mm->mmap_sem);
426 mutex_lock(&dev->struct_mutex);
427 if (obj->userptr.work != &work->work) {
428 ret = 0;
429 } else if (pinned == num_pages) {
430 ret = st_set_pages(&obj->pages, pvec, num_pages);
431 if (ret == 0) {
432 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
433 pinned = 0;
437 obj->userptr.work = ERR_PTR(ret);
438 obj->userptr.workers--;
439 drm_gem_object_unreference(&obj->base);
440 mutex_unlock(&dev->struct_mutex);
442 release_pages(pvec, pinned, 0);
443 drm_free_large(pvec);
445 put_task_struct(work->task);
446 kfree(work);
449 static int
450 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
452 const int num_pages = obj->base.size >> PAGE_SHIFT;
453 struct page **pvec;
454 int pinned, ret;
456 /* If userspace should engineer that these pages are replaced in
457 * the vma between us binding this page into the GTT and completion
458 * of rendering... Their loss. If they change the mapping of their
459 * pages they need to create a new bo to point to the new vma.
461 * However, that still leaves open the possibility of the vma
462 * being copied upon fork. Which falls under the same userspace
463 * synchronisation issue as a regular bo, except that this time
464 * the process may not be expecting that a particular piece of
465 * memory is tied to the GPU.
467 * Fortunately, we can hook into the mmu_notifier in order to
468 * discard the page references prior to anything nasty happening
469 * to the vma (discard or cloning) which should prevent the more
470 * egregious cases from causing harm.
473 pvec = NULL;
474 pinned = 0;
475 if (obj->userptr.mm == current->mm) {
476 pvec = kmalloc(num_pages*sizeof(struct page *),
477 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
478 if (pvec == NULL) {
479 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
480 if (pvec == NULL)
481 return -ENOMEM;
484 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
485 !obj->userptr.read_only, pvec);
487 if (pinned < num_pages) {
488 if (pinned < 0) {
489 ret = pinned;
490 pinned = 0;
491 } else {
492 /* Spawn a worker so that we can acquire the
493 * user pages without holding our mutex. Access
494 * to the user pages requires mmap_sem, and we have
495 * a strict lock ordering of mmap_sem, struct_mutex -
496 * we already hold struct_mutex here and so cannot
497 * call gup without encountering a lock inversion.
499 * Userspace will keep on repeating the operation
500 * (thanks to EAGAIN) until either we hit the fast
501 * path or the worker completes. If the worker is
502 * cancelled or superseded, the task is still run
503 * but the results ignored. (This leads to
504 * complications that we may have a stray object
505 * refcount that we need to be wary of when
506 * checking for existing objects during creation.)
507 * If the worker encounters an error, it reports
508 * that error back to this function through
509 * obj->userptr.work = ERR_PTR.
511 ret = -EAGAIN;
512 if (obj->userptr.work == NULL &&
513 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
514 struct get_pages_work *work;
516 work = kmalloc(sizeof(*work), GFP_KERNEL);
517 if (work != NULL) {
518 obj->userptr.work = &work->work;
519 obj->userptr.workers++;
521 work->obj = obj;
522 drm_gem_object_reference(&obj->base);
524 work->task = current;
525 get_task_struct(work->task);
527 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
528 schedule_work(&work->work);
529 } else
530 ret = -ENOMEM;
531 } else {
532 if (IS_ERR(obj->userptr.work)) {
533 ret = PTR_ERR(obj->userptr.work);
534 obj->userptr.work = NULL;
538 } else {
539 ret = st_set_pages(&obj->pages, pvec, num_pages);
540 if (ret == 0) {
541 obj->userptr.work = NULL;
542 pinned = 0;
546 release_pages(pvec, pinned, 0);
547 drm_free_large(pvec);
548 return ret;
551 static void
552 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
554 struct scatterlist *sg;
555 int i;
557 BUG_ON(obj->userptr.work != NULL);
559 if (obj->madv != I915_MADV_WILLNEED)
560 obj->dirty = 0;
562 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
563 struct page *page = sg_page(sg);
565 if (obj->dirty)
566 set_page_dirty(page);
568 mark_page_accessed(page);
569 page_cache_release(page);
571 obj->dirty = 0;
573 sg_free_table(obj->pages);
574 kfree(obj->pages);
577 static void
578 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
580 i915_gem_userptr_release__mmu_notifier(obj);
582 if (obj->userptr.mm) {
583 mmput(obj->userptr.mm);
584 obj->userptr.mm = NULL;
588 static int
589 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
591 if (obj->userptr.mn)
592 return 0;
594 return i915_gem_userptr_init__mmu_notifier(obj, 0);
597 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
598 .dmabuf_export = i915_gem_userptr_dmabuf_export,
599 .get_pages = i915_gem_userptr_get_pages,
600 .put_pages = i915_gem_userptr_put_pages,
601 .release = i915_gem_userptr_release,
605 * Creates a new mm object that wraps some normal memory from the process
606 * context - user memory.
608 * We impose several restrictions upon the memory being mapped
609 * into the GPU.
610 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
611 * 2. It cannot overlap any other userptr object in the same address space.
612 * 3. It must be normal system memory, not a pointer into another map of IO
613 * space (e.g. it must not be a GTT mmapping of another object).
614 * 4. We only allow a bo as large as we could in theory map into the GTT,
615 * that is we limit the size to the total size of the GTT.
616 * 5. The bo is marked as being snoopable. The backing pages are left
617 * accessible directly by the CPU, but reads and writes by the GPU may
618 * incur the cost of a snoop (unless you have an LLC architecture).
620 * Synchronisation between multiple users and the GPU is left to userspace
621 * through the normal set-domain-ioctl. The kernel will enforce that the
622 * GPU relinquishes the VMA before it is returned back to the system
623 * i.e. upon free(), munmap() or process termination. However, the userspace
624 * malloc() library may not immediately relinquish the VMA after free() and
625 * instead reuse it whilst the GPU is still reading and writing to the VMA.
626 * Caveat emptor.
628 * Also note, that the object created here is not currently a "first class"
629 * object, in that several ioctls are banned. These are the CPU access
630 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
631 * direct access via your pointer rather than use those ioctls.
633 * If you think this is a good interface to use to pass GPU memory between
634 * drivers, please use dma-buf instead. In fact, wherever possible use
635 * dma-buf instead.
638 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
640 struct drm_i915_private *dev_priv = dev->dev_private;
641 struct drm_i915_gem_userptr *args = data;
642 struct drm_i915_gem_object *obj;
643 int ret;
644 u32 handle;
646 if (args->flags & ~(I915_USERPTR_READ_ONLY |
647 I915_USERPTR_UNSYNCHRONIZED))
648 return -EINVAL;
650 if (offset_in_page(args->user_ptr | args->user_size))
651 return -EINVAL;
653 if (args->user_size > dev_priv->gtt.base.total)
654 return -E2BIG;
656 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
657 (char __user *)(unsigned long)args->user_ptr, args->user_size))
658 return -EFAULT;
660 if (args->flags & I915_USERPTR_READ_ONLY) {
661 /* On almost all of the current hw, we cannot tell the GPU that a
662 * page is readonly, so this is just a placeholder in the uAPI.
664 return -ENODEV;
667 /* Allocate the new object */
668 obj = i915_gem_object_alloc(dev);
669 if (obj == NULL)
670 return -ENOMEM;
672 drm_gem_private_object_init(dev, &obj->base, args->user_size);
673 i915_gem_object_init(obj, &i915_gem_userptr_ops);
674 obj->cache_level = I915_CACHE_LLC;
675 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
676 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
678 obj->userptr.ptr = args->user_ptr;
679 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
681 /* And keep a pointer to the current->mm for resolving the user pages
682 * at binding. This means that we need to hook into the mmu_notifier
683 * in order to detect if the mmu is destroyed.
685 ret = -ENOMEM;
686 if ((obj->userptr.mm = get_task_mm(current)))
687 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
688 if (ret == 0)
689 ret = drm_gem_handle_create(file, &obj->base, &handle);
691 /* drop reference from allocate - handle holds it now */
692 drm_gem_object_unreference_unlocked(&obj->base);
693 if (ret)
694 return ret;
696 args->handle = handle;
697 return 0;
699 #endif
702 i915_gem_init_userptr(struct drm_device *dev)
704 #if defined(CONFIG_MMU_NOTIFIER)
705 struct drm_i915_private *dev_priv = to_i915(dev);
706 hash_init(dev_priv->mmu_notifiers);
707 #endif
708 return 0;