kernel - Tag vm_map_entry structure, slight optimization to zalloc, misc.
[dragonfly.git] / sys / dev / drm / ttm / ttm_bo_util.c
blob8075d24f0c83fb2a2c068ba028f39369813ec406
1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <sys/sfbuf.h>
34 #include <linux/export.h>
35 #include <linux/io.h>
36 #include <linux/wait.h>
38 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
40 ttm_bo_mem_put(bo, &bo->mem);
43 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
44 bool evict,
45 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
47 struct ttm_tt *ttm = bo->ttm;
48 struct ttm_mem_reg *old_mem = &bo->mem;
49 int ret;
51 if (old_mem->mem_type != TTM_PL_SYSTEM) {
52 ttm_tt_unbind(ttm);
53 ttm_bo_free_old_node(bo);
54 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
55 TTM_PL_MASK_MEM);
56 old_mem->mem_type = TTM_PL_SYSTEM;
59 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
60 if (unlikely(ret != 0))
61 return ret;
63 if (new_mem->mem_type != TTM_PL_SYSTEM) {
64 ret = ttm_tt_bind(ttm, new_mem);
65 if (unlikely(ret != 0))
66 return ret;
69 *old_mem = *new_mem;
70 new_mem->mm_node = NULL;
72 return 0;
74 EXPORT_SYMBOL(ttm_bo_move_ttm);
76 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
78 if (likely(man->io_reserve_fastpath))
79 return 0;
81 if (interruptible) {
82 if (lockmgr(&man->io_reserve_mutex,
83 LK_EXCLUSIVE | LK_SLEEPFAIL))
84 return (-EINTR);
85 else
86 return (0);
89 lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE);
90 return 0;
92 EXPORT_SYMBOL(ttm_mem_io_lock);
94 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
96 if (likely(man->io_reserve_fastpath))
97 return;
99 lockmgr(&man->io_reserve_mutex, LK_RELEASE);
101 EXPORT_SYMBOL(ttm_mem_io_unlock);
103 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
105 struct ttm_buffer_object *bo;
107 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
108 return -EAGAIN;
110 bo = list_first_entry(&man->io_reserve_lru,
111 struct ttm_buffer_object,
112 io_reserve_lru);
113 list_del_init(&bo->io_reserve_lru);
114 ttm_bo_unmap_virtual_locked(bo);
116 return 0;
120 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
121 struct ttm_mem_reg *mem)
123 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
124 int ret = 0;
126 if (!bdev->driver->io_mem_reserve)
127 return 0;
128 if (likely(man->io_reserve_fastpath))
129 return bdev->driver->io_mem_reserve(bdev, mem);
131 if (bdev->driver->io_mem_reserve &&
132 mem->bus.io_reserved_count++ == 0) {
133 retry:
134 ret = bdev->driver->io_mem_reserve(bdev, mem);
135 if (ret == -EAGAIN) {
136 ret = ttm_mem_io_evict(man);
137 if (ret == 0)
138 goto retry;
141 return ret;
143 EXPORT_SYMBOL(ttm_mem_io_reserve);
145 void ttm_mem_io_free(struct ttm_bo_device *bdev,
146 struct ttm_mem_reg *mem)
148 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
150 if (likely(man->io_reserve_fastpath))
151 return;
153 if (bdev->driver->io_mem_reserve &&
154 --mem->bus.io_reserved_count == 0 &&
155 bdev->driver->io_mem_free)
156 bdev->driver->io_mem_free(bdev, mem);
159 EXPORT_SYMBOL(ttm_mem_io_free);
161 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
163 struct ttm_mem_reg *mem = &bo->mem;
164 int ret;
166 if (!mem->bus.io_reserved_vm) {
167 struct ttm_mem_type_manager *man =
168 &bo->bdev->man[mem->mem_type];
170 ret = ttm_mem_io_reserve(bo->bdev, mem);
171 if (unlikely(ret != 0))
172 return ret;
173 mem->bus.io_reserved_vm = true;
174 if (man->use_io_reserve_lru)
175 list_add_tail(&bo->io_reserve_lru,
176 &man->io_reserve_lru);
178 return 0;
181 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
183 struct ttm_mem_reg *mem = &bo->mem;
185 if (mem->bus.io_reserved_vm) {
186 mem->bus.io_reserved_vm = false;
187 list_del_init(&bo->io_reserve_lru);
188 ttm_mem_io_free(bo->bdev, mem);
192 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
193 void **virtual)
195 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
196 int ret;
197 void *addr;
199 *virtual = NULL;
200 (void) ttm_mem_io_lock(man, false);
201 ret = ttm_mem_io_reserve(bdev, mem);
202 ttm_mem_io_unlock(man);
203 if (ret || !mem->bus.is_iomem)
204 return ret;
206 if (mem->bus.addr) {
207 addr = mem->bus.addr;
208 } else {
209 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
210 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
211 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
212 if (!addr) {
213 (void) ttm_mem_io_lock(man, false);
214 ttm_mem_io_free(bdev, mem);
215 ttm_mem_io_unlock(man);
216 return -ENOMEM;
219 *virtual = addr;
220 return 0;
223 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
224 void *virtual)
226 struct ttm_mem_type_manager *man;
228 man = &bdev->man[mem->mem_type];
230 if (virtual && mem->bus.addr == NULL)
231 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
232 (void) ttm_mem_io_lock(man, false);
233 ttm_mem_io_free(bdev, mem);
234 ttm_mem_io_unlock(man);
237 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
239 uint32_t *dstP =
240 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241 uint32_t *srcP =
242 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
244 int i;
245 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246 /* iowrite32(ioread32(srcP++), dstP++); */
247 *dstP++ = *srcP++;
248 return 0;
251 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
252 unsigned long page,
253 vm_memattr_t prot)
255 vm_page_t d = ttm->pages[page];
256 void *dst;
258 if (!d)
259 return -ENOMEM;
261 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
263 /* XXXKIB can't sleep ? */
264 dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
265 if (!dst)
266 return -ENOMEM;
268 memcpy_fromio(dst, src, PAGE_SIZE);
270 pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
272 return 0;
275 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
276 unsigned long page,
277 vm_memattr_t prot)
279 vm_page_t s = ttm->pages[page];
280 void *src;
282 if (!s)
283 return -ENOMEM;
285 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
286 src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
287 if (!src)
288 return -ENOMEM;
290 memcpy_toio(dst, src, PAGE_SIZE);
292 pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
294 return 0;
297 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
298 bool evict, bool no_wait_gpu,
299 struct ttm_mem_reg *new_mem)
301 struct ttm_bo_device *bdev = bo->bdev;
302 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
303 struct ttm_tt *ttm = bo->ttm;
304 struct ttm_mem_reg *old_mem = &bo->mem;
305 struct ttm_mem_reg old_copy = *old_mem;
306 void *old_iomap;
307 void *new_iomap;
308 int ret;
309 unsigned long i;
310 unsigned long page;
311 unsigned long add = 0;
312 int dir;
314 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
315 if (ret)
316 return ret;
317 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
318 if (ret)
319 goto out;
322 * Single TTM move. NOP.
324 if (old_iomap == NULL && new_iomap == NULL)
325 goto out2;
328 * Don't move nonexistent data. Clear destination instead.
330 if (old_iomap == NULL &&
331 (ttm == NULL || (ttm->state == tt_unpopulated &&
332 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
333 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
334 goto out2;
338 * TTM might be null for moves within the same region.
340 if (ttm && ttm->state == tt_unpopulated) {
341 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
342 if (ret)
343 goto out1;
346 add = 0;
347 dir = 1;
349 if ((old_mem->mem_type == new_mem->mem_type) &&
350 (new_mem->start < old_mem->start + old_mem->size)) {
351 dir = -1;
352 add = new_mem->num_pages - 1;
355 for (i = 0; i < new_mem->num_pages; ++i) {
356 page = i * dir + add;
357 if (old_iomap == NULL) {
358 vm_memattr_t prot = ttm_io_prot(old_mem->placement);
359 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
360 prot);
361 } else if (new_iomap == NULL) {
362 vm_memattr_t prot = ttm_io_prot(new_mem->placement);
363 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
364 prot);
365 } else
366 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
367 if (ret)
368 goto out1;
370 cpu_mfence();
371 out2:
372 old_copy = *old_mem;
373 *old_mem = *new_mem;
374 new_mem->mm_node = NULL;
376 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
377 ttm_tt_unbind(ttm);
378 ttm_tt_destroy(ttm);
379 bo->ttm = NULL;
382 out1:
383 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
384 out:
385 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
388 * On error, keep the mm node!
390 if (!ret)
391 ttm_bo_mem_put(bo, &old_copy);
392 return ret;
394 EXPORT_SYMBOL(ttm_bo_move_memcpy);
396 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
398 kfree(bo);
402 * ttm_buffer_object_transfer
404 * @bo: A pointer to a struct ttm_buffer_object.
405 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
406 * holding the data of @bo with the old placement.
408 * This is a utility function that may be called after an accelerated move
409 * has been scheduled. A new buffer object is created as a placeholder for
410 * the old data while it's being copied. When that buffer object is idle,
411 * it can be destroyed, releasing the space of the old placement.
412 * Returns:
413 * !0: Failure.
416 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
417 struct ttm_buffer_object **new_obj)
419 struct ttm_buffer_object *fbo;
420 struct ttm_bo_device *bdev = bo->bdev;
421 struct ttm_bo_driver *driver = bdev->driver;
423 fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO);
424 if (!fbo)
425 return -ENOMEM;
427 *fbo = *bo;
430 * Fix up members that we shouldn't copy directly:
431 * TODO: Explicit member copy would probably be better here.
434 init_waitqueue_head(&fbo->event_queue);
435 INIT_LIST_HEAD(&fbo->ddestroy);
436 INIT_LIST_HEAD(&fbo->lru);
437 INIT_LIST_HEAD(&fbo->swap);
438 INIT_LIST_HEAD(&fbo->io_reserve_lru);
439 fbo->vm_node = NULL;
440 atomic_set(&fbo->cpu_writers, 0);
442 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
443 if (bo->sync_obj)
444 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
445 else
446 fbo->sync_obj = NULL;
447 lockmgr(&bdev->fence_lock, LK_RELEASE);
448 kref_init(&fbo->list_kref);
449 kref_init(&fbo->kref);
450 fbo->destroy = &ttm_transfered_destroy;
451 fbo->acc_size = 0;
454 * Mirror ref from kref_init() for list_kref.
456 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
458 *new_obj = fbo;
459 return 0;
462 vm_memattr_t
463 ttm_io_prot(uint32_t caching_flags)
465 #if defined(__x86_64__)
466 if (caching_flags & TTM_PL_FLAG_WC)
467 return (VM_MEMATTR_WRITE_COMBINING);
468 else
470 * We do not support i386, look at the linux source
471 * for the reason of the comment.
473 return (VM_MEMATTR_UNCACHEABLE);
474 #else
475 #error Port me
476 #endif
478 EXPORT_SYMBOL(ttm_io_prot);
480 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
481 unsigned long offset,
482 unsigned long size,
483 struct ttm_bo_kmap_obj *map)
485 struct ttm_mem_reg *mem = &bo->mem;
487 if (bo->mem.bus.addr) {
488 map->bo_kmap_type = ttm_bo_map_premapped;
489 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
490 } else {
491 map->bo_kmap_type = ttm_bo_map_iomap;
492 map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
493 bo->mem.bus.offset + offset, size,
494 (mem->placement & TTM_PL_FLAG_WC) ?
495 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
496 map->size = size;
498 return (!map->virtual) ? -ENOMEM : 0;
501 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
502 unsigned long start_page,
503 unsigned long num_pages,
504 struct ttm_bo_kmap_obj *map)
506 struct ttm_mem_reg *mem = &bo->mem;
507 vm_memattr_t prot;
508 struct ttm_tt *ttm = bo->ttm;
509 int i, ret;
511 BUG_ON(!ttm);
513 if (ttm->state == tt_unpopulated) {
514 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
515 if (ret)
516 return ret;
519 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
521 * We're mapping a single page, and the desired
522 * page protection is consistent with the bo.
525 map->bo_kmap_type = ttm_bo_map_kmap;
526 map->page = ttm->pages[start_page];
527 map->sf = sf_buf_alloc(map->page);
528 map->virtual = (void *)sf_buf_kva(map->sf);
529 } else {
531 * We need to use vmap to get the desired page protection
532 * or to make the buffer object look contiguous.
534 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
535 VM_MEMATTR_WRITE_COMBINING :
536 ttm_io_prot(mem->placement);
537 map->bo_kmap_type = ttm_bo_map_vmap;
538 map->num_pages = num_pages;
539 map->virtual =
540 (void *)kmem_alloc_nofault(&kernel_map,
541 num_pages * PAGE_SIZE,
542 VM_SUBSYS_DRM_TTM,
543 PAGE_SIZE);
544 if (map->virtual != NULL) {
545 for (i = 0; i < num_pages; i++) {
546 /* XXXKIB hack */
547 pmap_page_set_memattr(ttm->pages[start_page +
548 i], prot);
550 pmap_qenter((vm_offset_t)map->virtual,
551 &ttm->pages[start_page], num_pages);
554 return (!map->virtual) ? -ENOMEM : 0;
557 int ttm_bo_kmap(struct ttm_buffer_object *bo,
558 unsigned long start_page, unsigned long num_pages,
559 struct ttm_bo_kmap_obj *map)
561 struct ttm_mem_type_manager *man =
562 &bo->bdev->man[bo->mem.mem_type];
563 unsigned long offset, size;
564 int ret;
566 BUG_ON(!list_empty(&bo->swap));
567 map->virtual = NULL;
568 map->bo = bo;
569 if (num_pages > bo->num_pages)
570 return -EINVAL;
571 if (start_page > bo->num_pages)
572 return -EINVAL;
573 #if 0
574 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
575 return -EPERM;
576 #endif
577 (void) ttm_mem_io_lock(man, false);
578 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
579 ttm_mem_io_unlock(man);
580 if (ret)
581 return ret;
582 if (!bo->mem.bus.is_iomem) {
583 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
584 } else {
585 offset = start_page << PAGE_SHIFT;
586 size = num_pages << PAGE_SHIFT;
587 return ttm_bo_ioremap(bo, offset, size, map);
590 EXPORT_SYMBOL(ttm_bo_kmap);
592 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
594 struct ttm_buffer_object *bo = map->bo;
595 struct ttm_mem_type_manager *man =
596 &bo->bdev->man[bo->mem.mem_type];
598 if (!map->virtual)
599 return;
600 switch (map->bo_kmap_type) {
601 case ttm_bo_map_iomap:
602 pmap_unmapdev((vm_offset_t)map->virtual, map->size);
603 break;
604 case ttm_bo_map_vmap:
605 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
606 kmem_free(&kernel_map, (vm_offset_t)map->virtual,
607 map->num_pages * PAGE_SIZE);
608 break;
609 case ttm_bo_map_kmap:
610 sf_buf_free(map->sf);
611 break;
612 case ttm_bo_map_premapped:
613 break;
614 default:
615 BUG();
617 (void) ttm_mem_io_lock(man, false);
618 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
619 ttm_mem_io_unlock(man);
620 map->virtual = NULL;
621 map->page = NULL;
622 map->sf = NULL;
624 EXPORT_SYMBOL(ttm_bo_kunmap);
626 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
627 void *sync_obj,
628 bool evict,
629 bool no_wait_gpu,
630 struct ttm_mem_reg *new_mem)
632 struct ttm_bo_device *bdev = bo->bdev;
633 struct ttm_bo_driver *driver = bdev->driver;
634 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
635 struct ttm_mem_reg *old_mem = &bo->mem;
636 int ret;
637 struct ttm_buffer_object *ghost_obj;
638 void *tmp_obj = NULL;
640 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
641 if (bo->sync_obj) {
642 tmp_obj = bo->sync_obj;
643 bo->sync_obj = NULL;
645 bo->sync_obj = driver->sync_obj_ref(sync_obj);
646 if (evict) {
647 ret = ttm_bo_wait(bo, false, false, false);
648 lockmgr(&bdev->fence_lock, LK_RELEASE);
649 if (tmp_obj)
650 driver->sync_obj_unref(&tmp_obj);
651 if (ret)
652 return ret;
654 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
655 (bo->ttm != NULL)) {
656 ttm_tt_unbind(bo->ttm);
657 ttm_tt_destroy(bo->ttm);
658 bo->ttm = NULL;
660 ttm_bo_free_old_node(bo);
661 } else {
663 * This should help pipeline ordinary buffer moves.
665 * Hang old buffer memory on a new buffer object,
666 * and leave it to be released when the GPU
667 * operation has completed.
670 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
671 lockmgr(&bdev->fence_lock, LK_RELEASE);
672 if (tmp_obj)
673 driver->sync_obj_unref(&tmp_obj);
675 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
676 if (ret)
677 return ret;
680 * If we're not moving to fixed memory, the TTM object
681 * needs to stay alive. Otherwhise hang it on the ghost
682 * bo to be unbound and destroyed.
685 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
686 ghost_obj->ttm = NULL;
687 else
688 bo->ttm = NULL;
690 ttm_bo_unreserve(ghost_obj);
691 ttm_bo_unref(&ghost_obj);
694 *old_mem = *new_mem;
695 new_mem->mm_node = NULL;
697 return 0;
699 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);