drm: Sync drm/drmP.h with Linux 4.7.10
[dragonfly.git] / sys / dev / drm / ttm / ttm_bo_vm.c
blobcf7bb039afe44e14922192dbbef2d22a4abf158b
1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 * Copyright (c) 2013 The FreeBSD Foundation
32 * All rights reserved.
34 * Portions of this software were developed by Konstantin Belousov
35 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
37 *$FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_vm.c 253710 2013-07-27 16:44:37Z kib $
40 #include "opt_vm.h"
42 #define pr_fmt(fmt) "[TTM] " fmt
44 #include <drm/ttm/ttm_module.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_placement.h>
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <linux/errno.h>
50 #include <linux/export.h>
52 #include <vm/vm_page2.h>
54 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
55 ttm_bo_cmp_rb_tree_items);
58 #define TTM_BO_VM_NUM_PREFAULT 16
60 int
61 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
62 struct ttm_buffer_object *b)
64 if (a->vm_node->start < b->vm_node->start) {
65 return (-1);
66 } else if (a->vm_node->start > b->vm_node->start) {
67 return (1);
68 } else {
69 return (0);
74 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
75 unsigned long page_start,
76 unsigned long num_pages)
78 unsigned long cur_offset;
79 struct ttm_buffer_object *bo;
80 struct ttm_buffer_object *best_bo = NULL;
82 bo = RB_ROOT(&bdev->addr_space_rb);
83 while (bo != NULL) {
84 cur_offset = bo->vm_node->start;
85 if (page_start >= cur_offset) {
86 best_bo = bo;
87 if (page_start == cur_offset)
88 break;
89 bo = RB_RIGHT(bo, vm_rb);
90 } else
91 bo = RB_LEFT(bo, vm_rb);
94 if (unlikely(best_bo == NULL))
95 return NULL;
97 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
98 (page_start + num_pages)))
99 return NULL;
101 return best_bo;
104 static int
105 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
106 int prot, vm_page_t *mres)
108 struct ttm_buffer_object *bo = vm_obj->handle;
109 struct ttm_bo_device *bdev = bo->bdev;
110 struct ttm_tt *ttm = NULL;
111 vm_page_t m, m1, oldm;
112 int ret;
113 int retval = VM_PAGER_OK;
114 struct ttm_mem_type_manager *man =
115 &bdev->man[bo->mem.mem_type];
117 vm_object_pip_add(vm_obj, 1);
118 oldm = *mres;
119 if (oldm != NULL) {
120 vm_page_remove(oldm);
121 *mres = NULL;
122 } else
123 oldm = NULL;
124 retry:
125 VM_OBJECT_UNLOCK(vm_obj);
126 m = NULL;
128 reserve:
129 ret = ttm_bo_reserve(bo, false, false, false, 0);
130 if (unlikely(ret != 0)) {
131 if (ret == -EBUSY) {
132 lwkt_yield();
133 goto reserve;
137 if (bdev->driver->fault_reserve_notify) {
138 ret = bdev->driver->fault_reserve_notify(bo);
139 switch (ret) {
140 case 0:
141 break;
142 case -EBUSY:
143 case -ERESTARTSYS:
144 case -EINTR:
145 lwkt_yield();
146 goto reserve;
147 default:
148 retval = VM_PAGER_ERROR;
149 goto out_unlock;
154 * Wait for buffer data in transit, due to a pipelined
155 * move.
158 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
159 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
161 * Here, the behavior differs between Linux and FreeBSD.
163 * On Linux, the wait is interruptible (3rd argument to
164 * ttm_bo_wait). There must be some mechanism to resume
165 * page fault handling, once the signal is processed.
167 * On FreeBSD, the wait is uninteruptible. This is not a
168 * problem as we can't end up with an unkillable process
169 * here, because the wait will eventually time out.
171 * An example of this situation is the Xorg process
172 * which uses SIGALRM internally. The signal could
173 * interrupt the wait, causing the page fault to fail
174 * and the process to receive SIGSEGV.
176 ret = ttm_bo_wait(bo, false, false, false);
177 lockmgr(&bdev->fence_lock, LK_RELEASE);
178 if (unlikely(ret != 0)) {
179 retval = VM_PAGER_ERROR;
180 goto out_unlock;
182 } else
183 lockmgr(&bdev->fence_lock, LK_RELEASE);
185 ret = ttm_mem_io_lock(man, true);
186 if (unlikely(ret != 0)) {
187 retval = VM_PAGER_ERROR;
188 goto out_unlock;
190 ret = ttm_mem_io_reserve_vm(bo);
191 if (unlikely(ret != 0)) {
192 retval = VM_PAGER_ERROR;
193 goto out_io_unlock;
197 * Strictly, we're not allowed to modify vma->vm_page_prot here,
198 * since the mmap_sem is only held in read mode. However, we
199 * modify only the caching bits of vma->vm_page_prot and
200 * consider those bits protected by
201 * the bo->mutex, as we should be the only writers.
202 * There shouldn't really be any readers of these bits except
203 * within vm_insert_mixed()? fork?
205 * TODO: Add a list of vmas to the bo, and change the
206 * vma->vm_page_prot when the object changes caching policy, with
207 * the correct locks held.
209 if (!bo->mem.bus.is_iomem) {
210 /* Allocate all page at once, most common usage */
211 ttm = bo->ttm;
212 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
213 retval = VM_PAGER_ERROR;
214 goto out_io_unlock;
218 if (bo->mem.bus.is_iomem) {
219 m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
220 bo->mem.bus.offset + offset);
221 pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
222 } else {
223 ttm = bo->ttm;
224 m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
225 if (unlikely(!m)) {
226 retval = VM_PAGER_ERROR;
227 goto out_io_unlock;
229 pmap_page_set_memattr(m,
230 (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
231 VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
234 VM_OBJECT_LOCK(vm_obj);
235 if ((m->busy_count & PBUSY_LOCKED) != 0) {
236 #if 0
237 vm_page_sleep(m, "ttmpbs");
238 #endif
239 ttm_mem_io_unlock(man);
240 ttm_bo_unreserve(bo);
241 goto retry;
243 m->valid = VM_PAGE_BITS_ALL;
244 *mres = m;
245 m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
246 if (m1 == NULL) {
247 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
248 } else {
249 KASSERT(m == m1,
250 ("inconsistent insert bo %p m %p m1 %p offset %jx",
251 bo, m, m1, (uintmax_t)offset));
253 vm_page_busy_try(m, FALSE);
255 if (oldm != NULL) {
256 vm_page_free(oldm);
259 out_io_unlock1:
260 ttm_mem_io_unlock(man);
261 out_unlock1:
262 ttm_bo_unreserve(bo);
263 vm_object_pip_wakeup(vm_obj);
264 return (retval);
266 out_io_unlock:
267 VM_OBJECT_LOCK(vm_obj);
268 goto out_io_unlock1;
270 out_unlock:
271 VM_OBJECT_LOCK(vm_obj);
272 goto out_unlock1;
275 static int
276 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
277 vm_ooffset_t foff, struct ucred *cred, u_short *color)
281 * On Linux, a reference to the buffer object is acquired here.
282 * The reason is that this function is not called when the
283 * mmap() is initialized, but only when a process forks for
284 * instance. Therefore on Linux, the reference on the bo is
285 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
286 * then released in ttm_bo_vm_close().
288 * Here, this function is called during mmap() intialization.
289 * Thus, the reference acquired in ttm_bo_mmap_single() is
290 * sufficient.
292 *color = 0;
293 return (0);
296 static void
297 ttm_bo_vm_dtor(void *handle)
299 struct ttm_buffer_object *bo = handle;
301 ttm_bo_unref(&bo);
304 static struct cdev_pager_ops ttm_pager_ops = {
305 .cdev_pg_fault = ttm_bo_vm_fault,
306 .cdev_pg_ctor = ttm_bo_vm_ctor,
307 .cdev_pg_dtor = ttm_bo_vm_dtor
311 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
312 struct vm_object **obj_res, int nprot)
314 struct ttm_bo_driver *driver;
315 struct ttm_buffer_object *bo;
316 struct vm_object *vm_obj;
317 int ret;
319 *obj_res = NULL;
321 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
322 bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
323 if (likely(bo != NULL))
324 kref_get(&bo->kref);
325 lockmgr(&bdev->vm_lock, LK_RELEASE);
327 if (unlikely(bo == NULL)) {
328 pr_err("Could not find buffer object to map\n");
329 return (EINVAL);
332 driver = bo->bdev->driver;
333 if (unlikely(!driver->verify_access)) {
334 ret = EPERM;
335 goto out_unref;
337 ret = -driver->verify_access(bo);
338 if (unlikely(ret != 0))
339 goto out_unref;
341 vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
342 size, nprot, 0, curthread->td_ucred);
344 if (vm_obj == NULL) {
345 ret = EINVAL;
346 goto out_unref;
349 * Note: We're transferring the bo reference to vm_obj->handle here.
351 *offset = 0;
352 *obj_res = vm_obj;
353 return 0;
354 out_unref:
355 ttm_bo_unref(&bo);
356 return ret;
358 EXPORT_SYMBOL(ttm_bo_mmap);
360 void
361 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
363 vm_object_t vm_obj;
364 vm_page_t m;
365 int i;
367 vm_obj = cdev_pager_lookup(bo);
368 if (vm_obj == NULL)
369 return;
371 VM_OBJECT_LOCK(vm_obj);
372 for (i = 0; i < bo->num_pages; i++) {
373 m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
374 if (m == NULL)
375 continue;
376 cdev_pager_free_page(vm_obj, m);
378 VM_OBJECT_UNLOCK(vm_obj);
380 vm_object_deallocate(vm_obj);
383 #if 0
384 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
386 if (vma->vm_pgoff != 0)
387 return -EACCES;
389 vma->vm_ops = &ttm_bo_vm_ops;
390 vma->vm_private_data = ttm_bo_reference(bo);
391 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
392 return 0;
394 EXPORT_SYMBOL(ttm_fbdev_mmap);
395 #endif