1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <sys/sfbuf.h>
34 #include <linux/export.h>
36 #include <linux/wait.h>
38 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
40 ttm_bo_mem_put(bo
, &bo
->mem
);
43 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
45 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
47 struct ttm_tt
*ttm
= bo
->ttm
;
48 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
51 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
53 ttm_bo_free_old_node(bo
);
54 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
56 old_mem
->mem_type
= TTM_PL_SYSTEM
;
59 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
60 if (unlikely(ret
!= 0))
63 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
64 ret
= ttm_tt_bind(ttm
, new_mem
);
65 if (unlikely(ret
!= 0))
70 new_mem
->mm_node
= NULL
;
74 EXPORT_SYMBOL(ttm_bo_move_ttm
);
76 int ttm_mem_io_lock(struct ttm_mem_type_manager
*man
, bool interruptible
)
78 if (likely(man
->io_reserve_fastpath
))
82 if (lockmgr(&man
->io_reserve_mutex
,
83 LK_EXCLUSIVE
| LK_SLEEPFAIL
))
89 lockmgr(&man
->io_reserve_mutex
, LK_EXCLUSIVE
);
92 EXPORT_SYMBOL(ttm_mem_io_lock
);
94 void ttm_mem_io_unlock(struct ttm_mem_type_manager
*man
)
96 if (likely(man
->io_reserve_fastpath
))
99 lockmgr(&man
->io_reserve_mutex
, LK_RELEASE
);
101 EXPORT_SYMBOL(ttm_mem_io_unlock
);
103 static int ttm_mem_io_evict(struct ttm_mem_type_manager
*man
)
105 struct ttm_buffer_object
*bo
;
107 if (!man
->use_io_reserve_lru
|| list_empty(&man
->io_reserve_lru
))
110 bo
= list_first_entry(&man
->io_reserve_lru
,
111 struct ttm_buffer_object
,
113 list_del_init(&bo
->io_reserve_lru
);
114 ttm_bo_unmap_virtual_locked(bo
);
120 int ttm_mem_io_reserve(struct ttm_bo_device
*bdev
,
121 struct ttm_mem_reg
*mem
)
123 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
126 if (!bdev
->driver
->io_mem_reserve
)
128 if (likely(man
->io_reserve_fastpath
))
129 return bdev
->driver
->io_mem_reserve(bdev
, mem
);
131 if (bdev
->driver
->io_mem_reserve
&&
132 mem
->bus
.io_reserved_count
++ == 0) {
134 ret
= bdev
->driver
->io_mem_reserve(bdev
, mem
);
135 if (ret
== -EAGAIN
) {
136 ret
= ttm_mem_io_evict(man
);
143 EXPORT_SYMBOL(ttm_mem_io_reserve
);
145 void ttm_mem_io_free(struct ttm_bo_device
*bdev
,
146 struct ttm_mem_reg
*mem
)
148 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
150 if (likely(man
->io_reserve_fastpath
))
153 if (bdev
->driver
->io_mem_reserve
&&
154 --mem
->bus
.io_reserved_count
== 0 &&
155 bdev
->driver
->io_mem_free
)
156 bdev
->driver
->io_mem_free(bdev
, mem
);
159 EXPORT_SYMBOL(ttm_mem_io_free
);
161 int ttm_mem_io_reserve_vm(struct ttm_buffer_object
*bo
)
163 struct ttm_mem_reg
*mem
= &bo
->mem
;
166 if (!mem
->bus
.io_reserved_vm
) {
167 struct ttm_mem_type_manager
*man
=
168 &bo
->bdev
->man
[mem
->mem_type
];
170 ret
= ttm_mem_io_reserve(bo
->bdev
, mem
);
171 if (unlikely(ret
!= 0))
173 mem
->bus
.io_reserved_vm
= true;
174 if (man
->use_io_reserve_lru
)
175 list_add_tail(&bo
->io_reserve_lru
,
176 &man
->io_reserve_lru
);
181 void ttm_mem_io_free_vm(struct ttm_buffer_object
*bo
)
183 struct ttm_mem_reg
*mem
= &bo
->mem
;
185 if (mem
->bus
.io_reserved_vm
) {
186 mem
->bus
.io_reserved_vm
= false;
187 list_del_init(&bo
->io_reserve_lru
);
188 ttm_mem_io_free(bo
->bdev
, mem
);
192 static int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
195 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
200 (void) ttm_mem_io_lock(man
, false);
201 ret
= ttm_mem_io_reserve(bdev
, mem
);
202 ttm_mem_io_unlock(man
);
203 if (ret
|| !mem
->bus
.is_iomem
)
207 addr
= mem
->bus
.addr
;
209 addr
= pmap_mapdev_attr(mem
->bus
.base
+ mem
->bus
.offset
,
210 mem
->bus
.size
, (mem
->placement
& TTM_PL_FLAG_WC
) ?
211 VM_MEMATTR_WRITE_COMBINING
: VM_MEMATTR_UNCACHEABLE
);
213 (void) ttm_mem_io_lock(man
, false);
214 ttm_mem_io_free(bdev
, mem
);
215 ttm_mem_io_unlock(man
);
223 static void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
226 struct ttm_mem_type_manager
*man
;
228 man
= &bdev
->man
[mem
->mem_type
];
230 if (virtual && mem
->bus
.addr
== NULL
)
231 pmap_unmapdev((vm_offset_t
)virtual, mem
->bus
.size
);
232 (void) ttm_mem_io_lock(man
, false);
233 ttm_mem_io_free(bdev
, mem
);
234 ttm_mem_io_unlock(man
);
237 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
240 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
242 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
245 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
246 /* iowrite32(ioread32(srcP++), dstP++); */
251 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
255 vm_page_t d
= ttm
->pages
[page
];
261 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
263 /* XXXKIB can't sleep ? */
264 dst
= pmap_mapdev_attr(VM_PAGE_TO_PHYS(d
), PAGE_SIZE
, prot
);
268 memcpy_fromio(dst
, src
, PAGE_SIZE
);
270 pmap_unmapdev((vm_offset_t
)dst
, PAGE_SIZE
);
275 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
279 vm_page_t s
= ttm
->pages
[page
];
285 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
286 src
= pmap_mapdev_attr(VM_PAGE_TO_PHYS(s
), PAGE_SIZE
, prot
);
290 memcpy_toio(dst
, src
, PAGE_SIZE
);
292 pmap_unmapdev((vm_offset_t
)src
, PAGE_SIZE
);
297 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
298 bool evict
, bool no_wait_gpu
,
299 struct ttm_mem_reg
*new_mem
)
301 struct ttm_bo_device
*bdev
= bo
->bdev
;
302 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
303 struct ttm_tt
*ttm
= bo
->ttm
;
304 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
305 struct ttm_mem_reg old_copy
= *old_mem
;
311 unsigned long add
= 0;
314 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
317 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
322 * Single TTM move. NOP.
324 if (old_iomap
== NULL
&& new_iomap
== NULL
)
328 * Don't move nonexistent data. Clear destination instead.
330 if (old_iomap
== NULL
&&
331 (ttm
== NULL
|| (ttm
->state
== tt_unpopulated
&&
332 !(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)))) {
333 memset_io(new_iomap
, 0, new_mem
->num_pages
*PAGE_SIZE
);
338 * TTM might be null for moves within the same region.
340 if (ttm
&& ttm
->state
== tt_unpopulated
) {
341 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
349 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
350 (new_mem
->start
< old_mem
->start
+ old_mem
->size
)) {
352 add
= new_mem
->num_pages
- 1;
355 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
356 page
= i
* dir
+ add
;
357 if (old_iomap
== NULL
) {
358 vm_memattr_t prot
= ttm_io_prot(old_mem
->placement
);
359 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
361 } else if (new_iomap
== NULL
) {
362 vm_memattr_t prot
= ttm_io_prot(new_mem
->placement
);
363 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
366 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
374 new_mem
->mm_node
= NULL
;
376 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
383 ttm_mem_reg_iounmap(bdev
, old_mem
, new_iomap
);
385 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
388 * On error, keep the mm node!
391 ttm_bo_mem_put(bo
, &old_copy
);
394 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
396 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
402 * ttm_buffer_object_transfer
404 * @bo: A pointer to a struct ttm_buffer_object.
405 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
406 * holding the data of @bo with the old placement.
408 * This is a utility function that may be called after an accelerated move
409 * has been scheduled. A new buffer object is created as a placeholder for
410 * the old data while it's being copied. When that buffer object is idle,
411 * it can be destroyed, releasing the space of the old placement.
416 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
417 struct ttm_buffer_object
**new_obj
)
419 struct ttm_buffer_object
*fbo
;
420 struct ttm_bo_device
*bdev
= bo
->bdev
;
421 struct ttm_bo_driver
*driver
= bdev
->driver
;
423 fbo
= kmalloc(sizeof(*fbo
), M_DRM
, M_WAITOK
| M_ZERO
);
430 * Fix up members that we shouldn't copy directly:
431 * TODO: Explicit member copy would probably be better here.
434 init_waitqueue_head(&fbo
->event_queue
);
435 INIT_LIST_HEAD(&fbo
->ddestroy
);
436 INIT_LIST_HEAD(&fbo
->lru
);
437 INIT_LIST_HEAD(&fbo
->swap
);
438 INIT_LIST_HEAD(&fbo
->io_reserve_lru
);
440 atomic_set(&fbo
->cpu_writers
, 0);
442 lockmgr(&bdev
->fence_lock
, LK_EXCLUSIVE
);
444 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
446 fbo
->sync_obj
= NULL
;
447 lockmgr(&bdev
->fence_lock
, LK_RELEASE
);
448 kref_init(&fbo
->list_kref
);
449 kref_init(&fbo
->kref
);
450 fbo
->destroy
= &ttm_transfered_destroy
;
454 * Mirror ref from kref_init() for list_kref.
456 set_bit(TTM_BO_PRIV_FLAG_ACTIVE
, &fbo
->priv_flags
);
463 ttm_io_prot(uint32_t caching_flags
)
465 #if defined(__x86_64__)
466 if (caching_flags
& TTM_PL_FLAG_WC
)
467 return (VM_MEMATTR_WRITE_COMBINING
);
470 * We do not support i386, look at the linux source
471 * for the reason of the comment.
473 return (VM_MEMATTR_UNCACHEABLE
);
478 EXPORT_SYMBOL(ttm_io_prot
);
480 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
481 unsigned long offset
,
483 struct ttm_bo_kmap_obj
*map
)
485 struct ttm_mem_reg
*mem
= &bo
->mem
;
487 if (bo
->mem
.bus
.addr
) {
488 map
->bo_kmap_type
= ttm_bo_map_premapped
;
489 map
->virtual = (void *)(((u8
*)bo
->mem
.bus
.addr
) + offset
);
491 map
->bo_kmap_type
= ttm_bo_map_iomap
;
492 map
->virtual = pmap_mapdev_attr(bo
->mem
.bus
.base
+
493 bo
->mem
.bus
.offset
+ offset
, size
,
494 (mem
->placement
& TTM_PL_FLAG_WC
) ?
495 VM_MEMATTR_WRITE_COMBINING
: VM_MEMATTR_UNCACHEABLE
);
498 return (!map
->virtual) ? -ENOMEM
: 0;
501 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
502 unsigned long start_page
,
503 unsigned long num_pages
,
504 struct ttm_bo_kmap_obj
*map
)
506 struct ttm_mem_reg
*mem
= &bo
->mem
;
508 struct ttm_tt
*ttm
= bo
->ttm
;
513 if (ttm
->state
== tt_unpopulated
) {
514 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
519 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
521 * We're mapping a single page, and the desired
522 * page protection is consistent with the bo.
525 map
->bo_kmap_type
= ttm_bo_map_kmap
;
526 map
->page
= ttm
->pages
[start_page
];
527 map
->sf
= sf_buf_alloc(map
->page
);
528 map
->virtual = (void *)sf_buf_kva(map
->sf
);
531 * We need to use vmap to get the desired page protection
532 * or to make the buffer object look contiguous.
534 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
535 VM_MEMATTR_WRITE_COMBINING
:
536 ttm_io_prot(mem
->placement
);
537 map
->bo_kmap_type
= ttm_bo_map_vmap
;
538 map
->num_pages
= num_pages
;
539 map
->virtual = (void *)kmem_alloc_nofault(&kernel_map
,
540 num_pages
* PAGE_SIZE
, PAGE_SIZE
);
541 if (map
->virtual != NULL
) {
542 for (i
= 0; i
< num_pages
; i
++) {
544 pmap_page_set_memattr(ttm
->pages
[start_page
+
547 pmap_qenter((vm_offset_t
)map
->virtual,
548 &ttm
->pages
[start_page
], num_pages
);
551 return (!map
->virtual) ? -ENOMEM
: 0;
554 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
555 unsigned long start_page
, unsigned long num_pages
,
556 struct ttm_bo_kmap_obj
*map
)
558 struct ttm_mem_type_manager
*man
=
559 &bo
->bdev
->man
[bo
->mem
.mem_type
];
560 unsigned long offset
, size
;
563 BUG_ON(!list_empty(&bo
->swap
));
566 if (num_pages
> bo
->num_pages
)
568 if (start_page
> bo
->num_pages
)
571 if (num_pages
> 1 && !capable(CAP_SYS_ADMIN
))
574 (void) ttm_mem_io_lock(man
, false);
575 ret
= ttm_mem_io_reserve(bo
->bdev
, &bo
->mem
);
576 ttm_mem_io_unlock(man
);
579 if (!bo
->mem
.bus
.is_iomem
) {
580 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
582 offset
= start_page
<< PAGE_SHIFT
;
583 size
= num_pages
<< PAGE_SHIFT
;
584 return ttm_bo_ioremap(bo
, offset
, size
, map
);
587 EXPORT_SYMBOL(ttm_bo_kmap
);
589 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
591 struct ttm_buffer_object
*bo
= map
->bo
;
592 struct ttm_mem_type_manager
*man
=
593 &bo
->bdev
->man
[bo
->mem
.mem_type
];
597 switch (map
->bo_kmap_type
) {
598 case ttm_bo_map_iomap
:
599 pmap_unmapdev((vm_offset_t
)map
->virtual, map
->size
);
601 case ttm_bo_map_vmap
:
602 pmap_qremove((vm_offset_t
)(map
->virtual), map
->num_pages
);
603 kmem_free(&kernel_map
, (vm_offset_t
)map
->virtual,
604 map
->num_pages
* PAGE_SIZE
);
606 case ttm_bo_map_kmap
:
607 sf_buf_free(map
->sf
);
609 case ttm_bo_map_premapped
:
614 (void) ttm_mem_io_lock(man
, false);
615 ttm_mem_io_free(map
->bo
->bdev
, &map
->bo
->mem
);
616 ttm_mem_io_unlock(man
);
621 EXPORT_SYMBOL(ttm_bo_kunmap
);
623 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
627 struct ttm_mem_reg
*new_mem
)
629 struct ttm_bo_device
*bdev
= bo
->bdev
;
630 struct ttm_bo_driver
*driver
= bdev
->driver
;
631 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
632 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
634 struct ttm_buffer_object
*ghost_obj
;
635 void *tmp_obj
= NULL
;
637 lockmgr(&bdev
->fence_lock
, LK_EXCLUSIVE
);
639 tmp_obj
= bo
->sync_obj
;
642 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
644 ret
= ttm_bo_wait(bo
, false, false, false);
645 lockmgr(&bdev
->fence_lock
, LK_RELEASE
);
647 driver
->sync_obj_unref(&tmp_obj
);
651 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
653 ttm_tt_unbind(bo
->ttm
);
654 ttm_tt_destroy(bo
->ttm
);
657 ttm_bo_free_old_node(bo
);
660 * This should help pipeline ordinary buffer moves.
662 * Hang old buffer memory on a new buffer object,
663 * and leave it to be released when the GPU
664 * operation has completed.
667 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
668 lockmgr(&bdev
->fence_lock
, LK_RELEASE
);
670 driver
->sync_obj_unref(&tmp_obj
);
672 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
677 * If we're not moving to fixed memory, the TTM object
678 * needs to stay alive. Otherwhise hang it on the ghost
679 * bo to be unbound and destroyed.
682 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
683 ghost_obj
->ttm
= NULL
;
687 ttm_bo_unreserve(ghost_obj
);
688 ttm_bo_unref(&ghost_obj
);
692 new_mem
->mm_node
= NULL
;
696 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);