1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 * Copyright (c) 2013 The FreeBSD Foundation
32 * All rights reserved.
34 * Portions of this software were developed by Konstantin Belousov
35 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
37 *$FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_vm.c 253710 2013-07-27 16:44:37Z kib $
42 #define pr_fmt(fmt) "[TTM] " fmt
44 #include <drm/ttm/ttm_module.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_placement.h>
48 #include <vm/vm_page.h>
49 #include <linux/errno.h>
50 #include <linux/export.h>
52 #include <vm/vm_page2.h>
54 RB_GENERATE(ttm_bo_device_buffer_objects
, ttm_buffer_object
, vm_rb
,
55 ttm_bo_cmp_rb_tree_items
);
58 #define TTM_BO_VM_NUM_PREFAULT 16
61 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object
*a
,
62 struct ttm_buffer_object
*b
)
64 if (a
->vm_node
->start
< b
->vm_node
->start
) {
66 } else if (a
->vm_node
->start
> b
->vm_node
->start
) {
74 static struct ttm_buffer_object
*ttm_bo_vm_lookup_rb(struct ttm_bo_device
*bdev
,
75 unsigned long page_start
,
76 unsigned long num_pages
)
78 unsigned long cur_offset
;
79 struct ttm_buffer_object
*bo
;
80 struct ttm_buffer_object
*best_bo
= NULL
;
82 bo
= RB_ROOT(&bdev
->addr_space_rb
);
84 cur_offset
= bo
->vm_node
->start
;
85 if (page_start
>= cur_offset
) {
87 if (page_start
== cur_offset
)
89 bo
= RB_RIGHT(bo
, vm_rb
);
91 bo
= RB_LEFT(bo
, vm_rb
);
94 if (unlikely(best_bo
== NULL
))
97 if (unlikely((best_bo
->vm_node
->start
+ best_bo
->num_pages
) <
98 (page_start
+ num_pages
)))
105 ttm_bo_vm_fault(vm_object_t vm_obj
, vm_ooffset_t offset
,
106 int prot
, vm_page_t
*mres
)
108 struct ttm_buffer_object
*bo
= vm_obj
->handle
;
109 struct ttm_bo_device
*bdev
= bo
->bdev
;
110 struct ttm_tt
*ttm
= NULL
;
111 vm_page_t m
, m1
, oldm
;
113 int retval
= VM_PAGER_OK
;
114 struct ttm_mem_type_manager
*man
=
115 &bdev
->man
[bo
->mem
.mem_type
];
117 vm_object_pip_add(vm_obj
, 1);
120 vm_page_remove(oldm
);
125 VM_OBJECT_UNLOCK(vm_obj
);
129 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
130 if (unlikely(ret
!= 0)) {
137 if (bdev
->driver
->fault_reserve_notify
) {
138 ret
= bdev
->driver
->fault_reserve_notify(bo
);
148 retval
= VM_PAGER_ERROR
;
154 * Wait for buffer data in transit, due to a pipelined
158 lockmgr(&bdev
->fence_lock
, LK_EXCLUSIVE
);
159 if (test_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
)) {
161 * Here, the behavior differs between Linux and FreeBSD.
163 * On Linux, the wait is interruptible (3rd argument to
164 * ttm_bo_wait). There must be some mechanism to resume
165 * page fault handling, once the signal is processed.
167 * On FreeBSD, the wait is uninteruptible. This is not a
168 * problem as we can't end up with an unkillable process
169 * here, because the wait will eventually time out.
171 * An example of this situation is the Xorg process
172 * which uses SIGALRM internally. The signal could
173 * interrupt the wait, causing the page fault to fail
174 * and the process to receive SIGSEGV.
176 ret
= ttm_bo_wait(bo
, false, false, false);
177 lockmgr(&bdev
->fence_lock
, LK_RELEASE
);
178 if (unlikely(ret
!= 0)) {
179 retval
= VM_PAGER_ERROR
;
183 lockmgr(&bdev
->fence_lock
, LK_RELEASE
);
185 ret
= ttm_mem_io_lock(man
, true);
186 if (unlikely(ret
!= 0)) {
187 retval
= VM_PAGER_ERROR
;
190 ret
= ttm_mem_io_reserve_vm(bo
);
191 if (unlikely(ret
!= 0)) {
192 retval
= VM_PAGER_ERROR
;
197 * Strictly, we're not allowed to modify vma->vm_page_prot here,
198 * since the mmap_sem is only held in read mode. However, we
199 * modify only the caching bits of vma->vm_page_prot and
200 * consider those bits protected by
201 * the bo->mutex, as we should be the only writers.
202 * There shouldn't really be any readers of these bits except
203 * within vm_insert_mixed()? fork?
205 * TODO: Add a list of vmas to the bo, and change the
206 * vma->vm_page_prot when the object changes caching policy, with
207 * the correct locks held.
209 if (!bo
->mem
.bus
.is_iomem
) {
210 /* Allocate all page at once, most common usage */
212 if (ttm
->bdev
->driver
->ttm_tt_populate(ttm
)) {
213 retval
= VM_PAGER_ERROR
;
218 if (bo
->mem
.bus
.is_iomem
) {
219 m
= vm_phys_fictitious_to_vm_page(bo
->mem
.bus
.base
+
220 bo
->mem
.bus
.offset
+ offset
);
221 pmap_page_set_memattr(m
, ttm_io_prot(bo
->mem
.placement
));
224 m
= (struct vm_page
*)ttm
->pages
[OFF_TO_IDX(offset
)];
226 retval
= VM_PAGER_ERROR
;
229 pmap_page_set_memattr(m
,
230 (bo
->mem
.placement
& TTM_PL_FLAG_CACHED
) ?
231 VM_MEMATTR_WRITE_BACK
: ttm_io_prot(bo
->mem
.placement
));
234 VM_OBJECT_LOCK(vm_obj
);
235 if ((m
->busy_count
& PBUSY_LOCKED
) != 0) {
237 vm_page_sleep(m
, "ttmpbs");
239 ttm_mem_io_unlock(man
);
240 ttm_bo_unreserve(bo
);
243 m
->valid
= VM_PAGE_BITS_ALL
;
245 m1
= vm_page_lookup(vm_obj
, OFF_TO_IDX(offset
));
247 vm_page_insert(m
, vm_obj
, OFF_TO_IDX(offset
));
250 ("inconsistent insert bo %p m %p m1 %p offset %jx",
251 bo
, m
, m1
, (uintmax_t)offset
));
253 vm_page_busy_try(m
, FALSE
);
260 ttm_mem_io_unlock(man
);
262 ttm_bo_unreserve(bo
);
263 vm_object_pip_wakeup(vm_obj
);
267 VM_OBJECT_LOCK(vm_obj
);
271 VM_OBJECT_LOCK(vm_obj
);
276 ttm_bo_vm_ctor(void *handle
, vm_ooffset_t size
, vm_prot_t prot
,
277 vm_ooffset_t foff
, struct ucred
*cred
, u_short
*color
)
281 * On Linux, a reference to the buffer object is acquired here.
282 * The reason is that this function is not called when the
283 * mmap() is initialized, but only when a process forks for
284 * instance. Therefore on Linux, the reference on the bo is
285 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
286 * then released in ttm_bo_vm_close().
288 * Here, this function is called during mmap() intialization.
289 * Thus, the reference acquired in ttm_bo_mmap_single() is
297 ttm_bo_vm_dtor(void *handle
)
299 struct ttm_buffer_object
*bo
= handle
;
304 static struct cdev_pager_ops ttm_pager_ops
= {
305 .cdev_pg_fault
= ttm_bo_vm_fault
,
306 .cdev_pg_ctor
= ttm_bo_vm_ctor
,
307 .cdev_pg_dtor
= ttm_bo_vm_dtor
311 ttm_bo_mmap_single(struct ttm_bo_device
*bdev
, vm_ooffset_t
*offset
, vm_size_t size
,
312 struct vm_object
**obj_res
, int nprot
)
314 struct ttm_bo_driver
*driver
;
315 struct ttm_buffer_object
*bo
;
316 struct vm_object
*vm_obj
;
321 lockmgr(&bdev
->vm_lock
, LK_EXCLUSIVE
);
322 bo
= ttm_bo_vm_lookup_rb(bdev
, OFF_TO_IDX(*offset
), OFF_TO_IDX(size
));
323 if (likely(bo
!= NULL
))
325 lockmgr(&bdev
->vm_lock
, LK_RELEASE
);
327 if (unlikely(bo
== NULL
)) {
328 pr_err("Could not find buffer object to map\n");
332 driver
= bo
->bdev
->driver
;
333 if (unlikely(!driver
->verify_access
)) {
337 ret
= -driver
->verify_access(bo
);
338 if (unlikely(ret
!= 0))
341 vm_obj
= cdev_pager_allocate(bo
, OBJT_MGTDEVICE
, &ttm_pager_ops
,
342 size
, nprot
, 0, curthread
->td_ucred
);
344 if (vm_obj
== NULL
) {
349 * Note: We're transferring the bo reference to vm_obj->handle here.
358 EXPORT_SYMBOL(ttm_bo_mmap
);
361 ttm_bo_release_mmap(struct ttm_buffer_object
*bo
)
367 vm_obj
= cdev_pager_lookup(bo
);
371 VM_OBJECT_LOCK(vm_obj
);
372 for (i
= 0; i
< bo
->num_pages
; i
++) {
373 m
= vm_page_lookup_busy_wait(vm_obj
, i
, TRUE
, "ttm_unm");
376 cdev_pager_free_page(vm_obj
, m
);
378 VM_OBJECT_UNLOCK(vm_obj
);
380 vm_object_deallocate(vm_obj
);
384 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
386 if (vma
->vm_pgoff
!= 0)
389 vma
->vm_ops
= &ttm_bo_vm_ops
;
390 vma
->vm_private_data
= ttm_bo_reference(bo
);
391 vma
->vm_flags
|= VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
394 EXPORT_SYMBOL(ttm_fbdev_mmap
);