ttm: Return -ERESTART when a signal interrupts bo eviction.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / ttm / ttm_bo.c
blobb82ba6e5a58690a4648e2ba147e3201abd46f6ca
1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
41 #define TTM_ASSERT_LOCKED(param)
42 #define TTM_DEBUG(fmt, arg...)
43 #define TTM_BO_HASH_ORDER 13
45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46 static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
49 static inline uint32_t ttm_bo_type_flags(unsigned type)
51 return 1 << (type);
54 static void ttm_bo_release_list(struct kref *list_kref)
56 struct ttm_buffer_object *bo =
57 container_of(list_kref, struct ttm_buffer_object, list_kref);
58 struct ttm_bo_device *bdev = bo->bdev;
60 BUG_ON(atomic_read(&bo->list_kref.refcount));
61 BUG_ON(atomic_read(&bo->kref.refcount));
62 BUG_ON(atomic_read(&bo->cpu_writers));
63 BUG_ON(bo->sync_obj != NULL);
64 BUG_ON(bo->mem.mm_node != NULL);
65 BUG_ON(!list_empty(&bo->lru));
66 BUG_ON(!list_empty(&bo->ddestroy));
68 if (bo->ttm)
69 ttm_tt_destroy(bo->ttm);
70 if (bo->destroy)
71 bo->destroy(bo);
72 else {
73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
74 kfree(bo);
78 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
81 if (interruptible) {
82 int ret = 0;
84 ret = wait_event_interruptible(bo->event_queue,
85 atomic_read(&bo->reserved) == 0);
86 if (unlikely(ret != 0))
87 return -ERESTART;
88 } else {
89 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
91 return 0;
94 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_mem_type_manager *man;
99 BUG_ON(!atomic_read(&bo->reserved));
101 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
103 BUG_ON(!list_empty(&bo->lru));
105 man = &bdev->man[bo->mem.mem_type];
106 list_add_tail(&bo->lru, &man->lru);
107 kref_get(&bo->list_kref);
109 if (bo->ttm != NULL) {
110 list_add_tail(&bo->swap, &bdev->swap_lru);
111 kref_get(&bo->list_kref);
117 * Call with the lru_lock held.
120 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
122 int put_count = 0;
124 if (!list_empty(&bo->swap)) {
125 list_del_init(&bo->swap);
126 ++put_count;
128 if (!list_empty(&bo->lru)) {
129 list_del_init(&bo->lru);
130 ++put_count;
134 * TODO: Add a driver hook to delete from
135 * driver-specific LRU's here.
138 return put_count;
141 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
142 bool interruptible,
143 bool no_wait, bool use_sequence, uint32_t sequence)
145 struct ttm_bo_device *bdev = bo->bdev;
146 int ret;
148 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
149 if (use_sequence && bo->seq_valid &&
150 (sequence - bo->val_seq < (1 << 31))) {
151 return -EAGAIN;
154 if (no_wait)
155 return -EBUSY;
157 spin_unlock(&bdev->lru_lock);
158 ret = ttm_bo_wait_unreserved(bo, interruptible);
159 spin_lock(&bdev->lru_lock);
161 if (unlikely(ret))
162 return ret;
165 if (use_sequence) {
166 bo->val_seq = sequence;
167 bo->seq_valid = true;
168 } else {
169 bo->seq_valid = false;
172 return 0;
174 EXPORT_SYMBOL(ttm_bo_reserve);
176 static void ttm_bo_ref_bug(struct kref *list_kref)
178 BUG();
181 int ttm_bo_reserve(struct ttm_buffer_object *bo,
182 bool interruptible,
183 bool no_wait, bool use_sequence, uint32_t sequence)
185 struct ttm_bo_device *bdev = bo->bdev;
186 int put_count = 0;
187 int ret;
189 spin_lock(&bdev->lru_lock);
190 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
191 sequence);
192 if (likely(ret == 0))
193 put_count = ttm_bo_del_from_lru(bo);
194 spin_unlock(&bdev->lru_lock);
196 while (put_count--)
197 kref_put(&bo->list_kref, ttm_bo_ref_bug);
199 return ret;
202 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
204 struct ttm_bo_device *bdev = bo->bdev;
206 spin_lock(&bdev->lru_lock);
207 ttm_bo_add_to_lru(bo);
208 atomic_set(&bo->reserved, 0);
209 wake_up_all(&bo->event_queue);
210 spin_unlock(&bdev->lru_lock);
212 EXPORT_SYMBOL(ttm_bo_unreserve);
215 * Call bo->mutex locked.
218 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
220 struct ttm_bo_device *bdev = bo->bdev;
221 int ret = 0;
222 uint32_t page_flags = 0;
224 TTM_ASSERT_LOCKED(&bo->mutex);
225 bo->ttm = NULL;
227 switch (bo->type) {
228 case ttm_bo_type_device:
229 if (zero_alloc)
230 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
231 case ttm_bo_type_kernel:
232 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
233 page_flags, bdev->dummy_read_page);
234 if (unlikely(bo->ttm == NULL))
235 ret = -ENOMEM;
236 break;
237 case ttm_bo_type_user:
238 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
239 page_flags | TTM_PAGE_FLAG_USER,
240 bdev->dummy_read_page);
241 if (unlikely(bo->ttm == NULL))
242 ret = -ENOMEM;
243 break;
245 ret = ttm_tt_set_user(bo->ttm, current,
246 bo->buffer_start, bo->num_pages);
247 if (unlikely(ret != 0))
248 ttm_tt_destroy(bo->ttm);
249 break;
250 default:
251 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
252 ret = -EINVAL;
253 break;
256 return ret;
259 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
260 struct ttm_mem_reg *mem,
261 bool evict, bool interruptible, bool no_wait)
263 struct ttm_bo_device *bdev = bo->bdev;
264 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
265 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
266 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
267 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
268 int ret = 0;
270 if (old_is_pci || new_is_pci ||
271 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
272 ttm_bo_unmap_virtual(bo);
275 * Create and bind a ttm if required.
278 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
279 ret = ttm_bo_add_ttm(bo, false);
280 if (ret)
281 goto out_err;
283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
284 if (ret)
285 return ret;
287 if (mem->mem_type != TTM_PL_SYSTEM) {
288 ret = ttm_tt_bind(bo->ttm, mem);
289 if (ret)
290 goto out_err;
293 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
295 struct ttm_mem_reg *old_mem = &bo->mem;
296 uint32_t save_flags = old_mem->placement;
298 *old_mem = *mem;
299 mem->mm_node = NULL;
300 ttm_flag_masked(&save_flags, mem->placement,
301 TTM_PL_MASK_MEMTYPE);
302 goto moved;
307 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
308 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
309 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
310 else if (bdev->driver->move)
311 ret = bdev->driver->move(bo, evict, interruptible,
312 no_wait, mem);
313 else
314 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
316 if (ret)
317 goto out_err;
319 moved:
320 if (bo->evicted) {
321 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
322 if (ret)
323 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
324 bo->evicted = false;
327 if (bo->mem.mm_node) {
328 spin_lock(&bo->lock);
329 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
330 bdev->man[bo->mem.mem_type].gpu_offset;
331 bo->cur_placement = bo->mem.placement;
332 spin_unlock(&bo->lock);
335 return 0;
337 out_err:
338 new_man = &bdev->man[bo->mem.mem_type];
339 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
340 ttm_tt_unbind(bo->ttm);
341 ttm_tt_destroy(bo->ttm);
342 bo->ttm = NULL;
345 return ret;
349 * If bo idle, remove from delayed- and lru lists, and unref.
350 * If not idle, and already on delayed list, do nothing.
351 * If not idle, and not on delayed list, put on delayed list,
352 * up the list_kref and schedule a delayed list check.
355 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
357 struct ttm_bo_device *bdev = bo->bdev;
358 struct ttm_bo_driver *driver = bdev->driver;
359 int ret;
361 spin_lock(&bo->lock);
362 (void) ttm_bo_wait(bo, false, false, !remove_all);
364 if (!bo->sync_obj) {
365 int put_count;
367 spin_unlock(&bo->lock);
369 spin_lock(&bdev->lru_lock);
370 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
371 BUG_ON(ret);
372 if (bo->ttm)
373 ttm_tt_unbind(bo->ttm);
375 if (!list_empty(&bo->ddestroy)) {
376 list_del_init(&bo->ddestroy);
377 kref_put(&bo->list_kref, ttm_bo_ref_bug);
379 if (bo->mem.mm_node) {
380 drm_mm_put_block(bo->mem.mm_node);
381 bo->mem.mm_node = NULL;
383 put_count = ttm_bo_del_from_lru(bo);
384 spin_unlock(&bdev->lru_lock);
386 atomic_set(&bo->reserved, 0);
388 while (put_count--)
389 kref_put(&bo->list_kref, ttm_bo_release_list);
391 return 0;
394 spin_lock(&bdev->lru_lock);
395 if (list_empty(&bo->ddestroy)) {
396 void *sync_obj = bo->sync_obj;
397 void *sync_obj_arg = bo->sync_obj_arg;
399 kref_get(&bo->list_kref);
400 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
401 spin_unlock(&bdev->lru_lock);
402 spin_unlock(&bo->lock);
404 if (sync_obj)
405 driver->sync_obj_flush(sync_obj, sync_obj_arg);
406 schedule_delayed_work(&bdev->wq,
407 ((HZ / 100) < 1) ? 1 : HZ / 100);
408 ret = 0;
410 } else {
411 spin_unlock(&bdev->lru_lock);
412 spin_unlock(&bo->lock);
413 ret = -EBUSY;
416 return ret;
420 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
421 * encountered buffers.
424 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
426 struct ttm_buffer_object *entry, *nentry;
427 struct list_head *list, *next;
428 int ret;
430 spin_lock(&bdev->lru_lock);
431 list_for_each_safe(list, next, &bdev->ddestroy) {
432 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
433 nentry = NULL;
436 * Protect the next list entry from destruction while we
437 * unlock the lru_lock.
440 if (next != &bdev->ddestroy) {
441 nentry = list_entry(next, struct ttm_buffer_object,
442 ddestroy);
443 kref_get(&nentry->list_kref);
445 kref_get(&entry->list_kref);
447 spin_unlock(&bdev->lru_lock);
448 ret = ttm_bo_cleanup_refs(entry, remove_all);
449 kref_put(&entry->list_kref, ttm_bo_release_list);
451 spin_lock(&bdev->lru_lock);
452 if (nentry) {
453 bool next_onlist = !list_empty(next);
454 spin_unlock(&bdev->lru_lock);
455 kref_put(&nentry->list_kref, ttm_bo_release_list);
456 spin_lock(&bdev->lru_lock);
458 * Someone might have raced us and removed the
459 * next entry from the list. We don't bother restarting
460 * list traversal.
463 if (!next_onlist)
464 break;
466 if (ret)
467 break;
469 ret = !list_empty(&bdev->ddestroy);
470 spin_unlock(&bdev->lru_lock);
472 return ret;
475 static void ttm_bo_delayed_workqueue(struct work_struct *work)
477 struct ttm_bo_device *bdev =
478 container_of(work, struct ttm_bo_device, wq.work);
480 if (ttm_bo_delayed_delete(bdev, false)) {
481 schedule_delayed_work(&bdev->wq,
482 ((HZ / 100) < 1) ? 1 : HZ / 100);
486 static void ttm_bo_release(struct kref *kref)
488 struct ttm_buffer_object *bo =
489 container_of(kref, struct ttm_buffer_object, kref);
490 struct ttm_bo_device *bdev = bo->bdev;
492 if (likely(bo->vm_node != NULL)) {
493 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
494 drm_mm_put_block(bo->vm_node);
495 bo->vm_node = NULL;
497 write_unlock(&bdev->vm_lock);
498 ttm_bo_cleanup_refs(bo, false);
499 kref_put(&bo->list_kref, ttm_bo_release_list);
500 write_lock(&bdev->vm_lock);
503 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
505 struct ttm_buffer_object *bo = *p_bo;
506 struct ttm_bo_device *bdev = bo->bdev;
508 *p_bo = NULL;
509 write_lock(&bdev->vm_lock);
510 kref_put(&bo->kref, ttm_bo_release);
511 write_unlock(&bdev->vm_lock);
513 EXPORT_SYMBOL(ttm_bo_unref);
515 static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
516 bool interruptible, bool no_wait)
518 int ret = 0;
519 struct ttm_bo_device *bdev = bo->bdev;
520 struct ttm_mem_reg evict_mem;
521 uint32_t proposed_placement;
523 if (bo->mem.mem_type != mem_type)
524 goto out;
526 spin_lock(&bo->lock);
527 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
528 spin_unlock(&bo->lock);
530 if (unlikely(ret != 0)) {
531 if (ret != -ERESTART) {
532 printk(KERN_ERR TTM_PFX
533 "Failed to expire sync object before "
534 "buffer eviction.\n");
536 goto out;
539 BUG_ON(!atomic_read(&bo->reserved));
541 evict_mem = bo->mem;
542 evict_mem.mm_node = NULL;
544 proposed_placement = bdev->driver->evict_flags(bo);
546 ret = ttm_bo_mem_space(bo, proposed_placement,
547 &evict_mem, interruptible, no_wait);
548 if (unlikely(ret != 0 && ret != -ERESTART))
549 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
550 &evict_mem, interruptible, no_wait);
552 if (ret) {
553 if (ret != -ERESTART)
554 printk(KERN_ERR TTM_PFX
555 "Failed to find memory space for "
556 "buffer 0x%p eviction.\n", bo);
557 goto out;
560 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
561 no_wait);
562 if (ret) {
563 if (ret != -ERESTART)
564 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
565 goto out;
568 spin_lock(&bdev->lru_lock);
569 if (evict_mem.mm_node) {
570 drm_mm_put_block(evict_mem.mm_node);
571 evict_mem.mm_node = NULL;
573 spin_unlock(&bdev->lru_lock);
574 bo->evicted = true;
575 out:
576 return ret;
580 * Repeatedly evict memory from the LRU for @mem_type until we create enough
581 * space, or we've evicted everything and there isn't enough space.
583 static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
584 struct ttm_mem_reg *mem,
585 uint32_t mem_type,
586 bool interruptible, bool no_wait)
588 struct drm_mm_node *node;
589 struct ttm_buffer_object *entry;
590 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
591 struct list_head *lru;
592 unsigned long num_pages = mem->num_pages;
593 int put_count = 0;
594 int ret;
596 retry_pre_get:
597 ret = drm_mm_pre_get(&man->manager);
598 if (unlikely(ret != 0))
599 return ret;
601 spin_lock(&bdev->lru_lock);
602 do {
603 node = drm_mm_search_free(&man->manager, num_pages,
604 mem->page_alignment, 1);
605 if (node)
606 break;
608 lru = &man->lru;
609 if (list_empty(lru))
610 break;
612 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
613 kref_get(&entry->list_kref);
615 ret =
616 ttm_bo_reserve_locked(entry, interruptible, no_wait,
617 false, 0);
619 if (likely(ret == 0))
620 put_count = ttm_bo_del_from_lru(entry);
622 spin_unlock(&bdev->lru_lock);
624 if (unlikely(ret != 0))
625 return ret;
627 while (put_count--)
628 kref_put(&entry->list_kref, ttm_bo_ref_bug);
630 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
632 ttm_bo_unreserve(entry);
634 kref_put(&entry->list_kref, ttm_bo_release_list);
635 if (ret)
636 return ret;
638 spin_lock(&bdev->lru_lock);
639 } while (1);
641 if (!node) {
642 spin_unlock(&bdev->lru_lock);
643 return -ENOMEM;
646 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
647 if (unlikely(!node)) {
648 spin_unlock(&bdev->lru_lock);
649 goto retry_pre_get;
652 spin_unlock(&bdev->lru_lock);
653 mem->mm_node = node;
654 mem->mem_type = mem_type;
655 return 0;
658 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
659 bool disallow_fixed,
660 uint32_t mem_type,
661 uint32_t mask, uint32_t *res_mask)
663 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
665 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
666 return false;
668 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
669 return false;
671 if ((mask & man->available_caching) == 0)
672 return false;
673 if (mask & man->default_caching)
674 cur_flags |= man->default_caching;
675 else if (mask & TTM_PL_FLAG_CACHED)
676 cur_flags |= TTM_PL_FLAG_CACHED;
677 else if (mask & TTM_PL_FLAG_WC)
678 cur_flags |= TTM_PL_FLAG_WC;
679 else
680 cur_flags |= TTM_PL_FLAG_UNCACHED;
682 *res_mask = cur_flags;
683 return true;
687 * Creates space for memory region @mem according to its type.
689 * This function first searches for free space in compatible memory types in
690 * the priority order defined by the driver. If free space isn't found, then
691 * ttm_bo_mem_force_space is attempted in priority order to evict and find
692 * space.
694 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
695 uint32_t proposed_placement,
696 struct ttm_mem_reg *mem,
697 bool interruptible, bool no_wait)
699 struct ttm_bo_device *bdev = bo->bdev;
700 struct ttm_mem_type_manager *man;
702 uint32_t num_prios = bdev->driver->num_mem_type_prio;
703 const uint32_t *prios = bdev->driver->mem_type_prio;
704 uint32_t i;
705 uint32_t mem_type = TTM_PL_SYSTEM;
706 uint32_t cur_flags = 0;
707 bool type_found = false;
708 bool type_ok = false;
709 bool has_eagain = false;
710 struct drm_mm_node *node = NULL;
711 int ret;
713 mem->mm_node = NULL;
714 for (i = 0; i < num_prios; ++i) {
715 mem_type = prios[i];
716 man = &bdev->man[mem_type];
718 type_ok = ttm_bo_mt_compatible(man,
719 bo->type == ttm_bo_type_user,
720 mem_type, proposed_placement,
721 &cur_flags);
723 if (!type_ok)
724 continue;
726 if (mem_type == TTM_PL_SYSTEM)
727 break;
729 if (man->has_type && man->use_type) {
730 type_found = true;
731 do {
732 ret = drm_mm_pre_get(&man->manager);
733 if (unlikely(ret))
734 return ret;
736 spin_lock(&bdev->lru_lock);
737 node = drm_mm_search_free(&man->manager,
738 mem->num_pages,
739 mem->page_alignment,
741 if (unlikely(!node)) {
742 spin_unlock(&bdev->lru_lock);
743 break;
745 node = drm_mm_get_block_atomic(node,
746 mem->num_pages,
747 mem->
748 page_alignment);
749 spin_unlock(&bdev->lru_lock);
750 } while (!node);
752 if (node)
753 break;
756 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
757 mem->mm_node = node;
758 mem->mem_type = mem_type;
759 mem->placement = cur_flags;
760 return 0;
763 if (!type_found)
764 return -EINVAL;
766 num_prios = bdev->driver->num_mem_busy_prio;
767 prios = bdev->driver->mem_busy_prio;
769 for (i = 0; i < num_prios; ++i) {
770 mem_type = prios[i];
771 man = &bdev->man[mem_type];
773 if (!man->has_type)
774 continue;
776 if (!ttm_bo_mt_compatible(man,
777 bo->type == ttm_bo_type_user,
778 mem_type,
779 proposed_placement, &cur_flags))
780 continue;
782 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
783 interruptible, no_wait);
785 if (ret == 0 && mem->mm_node) {
786 mem->placement = cur_flags;
787 return 0;
790 if (ret == -ERESTART)
791 has_eagain = true;
794 ret = (has_eagain) ? -ERESTART : -ENOMEM;
795 return ret;
797 EXPORT_SYMBOL(ttm_bo_mem_space);
799 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
801 int ret = 0;
803 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
804 return -EBUSY;
806 ret = wait_event_interruptible(bo->event_queue,
807 atomic_read(&bo->cpu_writers) == 0);
809 if (ret == -ERESTARTSYS)
810 ret = -ERESTART;
812 return ret;
815 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
816 uint32_t proposed_placement,
817 bool interruptible, bool no_wait)
819 struct ttm_bo_device *bdev = bo->bdev;
820 int ret = 0;
821 struct ttm_mem_reg mem;
823 BUG_ON(!atomic_read(&bo->reserved));
826 * FIXME: It's possible to pipeline buffer moves.
827 * Have the driver move function wait for idle when necessary,
828 * instead of doing it here.
831 spin_lock(&bo->lock);
832 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
833 spin_unlock(&bo->lock);
835 if (ret)
836 return ret;
838 mem.num_pages = bo->num_pages;
839 mem.size = mem.num_pages << PAGE_SHIFT;
840 mem.page_alignment = bo->mem.page_alignment;
843 * Determine where to move the buffer.
846 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
847 interruptible, no_wait);
848 if (ret)
849 goto out_unlock;
851 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
853 out_unlock:
854 if (ret && mem.mm_node) {
855 spin_lock(&bdev->lru_lock);
856 drm_mm_put_block(mem.mm_node);
857 spin_unlock(&bdev->lru_lock);
859 return ret;
862 static int ttm_bo_mem_compat(uint32_t proposed_placement,
863 struct ttm_mem_reg *mem)
865 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
866 return 0;
867 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
868 return 0;
870 return 1;
873 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
874 uint32_t proposed_placement,
875 bool interruptible, bool no_wait)
877 int ret;
879 BUG_ON(!atomic_read(&bo->reserved));
880 bo->proposed_placement = proposed_placement;
882 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
883 (unsigned long)proposed_placement,
884 (unsigned long)bo->mem.placement);
887 * Check whether we need to move buffer.
890 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
891 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
892 interruptible, no_wait);
893 if (ret) {
894 if (ret != -ERESTART)
895 printk(KERN_ERR TTM_PFX
896 "Failed moving buffer. "
897 "Proposed placement 0x%08x\n",
898 bo->proposed_placement);
899 if (ret == -ENOMEM)
900 printk(KERN_ERR TTM_PFX
901 "Out of aperture space or "
902 "DRM memory quota.\n");
903 return ret;
908 * We might need to add a TTM.
911 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
912 ret = ttm_bo_add_ttm(bo, true);
913 if (ret)
914 return ret;
917 * Validation has succeeded, move the access and other
918 * non-mapping-related flag bits from the proposed flags to
919 * the active flags
922 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
923 ~TTM_PL_MASK_MEMTYPE);
925 return 0;
927 EXPORT_SYMBOL(ttm_buffer_object_validate);
930 ttm_bo_check_placement(struct ttm_buffer_object *bo,
931 uint32_t set_flags, uint32_t clr_flags)
933 uint32_t new_mask = set_flags | clr_flags;
935 if ((bo->type == ttm_bo_type_user) &&
936 (clr_flags & TTM_PL_FLAG_CACHED)) {
937 printk(KERN_ERR TTM_PFX
938 "User buffers require cache-coherent memory.\n");
939 return -EINVAL;
942 if (!capable(CAP_SYS_ADMIN)) {
943 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
944 printk(KERN_ERR TTM_PFX "Need to be root to modify"
945 " NO_EVICT status.\n");
946 return -EINVAL;
949 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
950 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
951 printk(KERN_ERR TTM_PFX
952 "Incompatible memory specification"
953 " for NO_EVICT buffer.\n");
954 return -EINVAL;
957 return 0;
960 int ttm_buffer_object_init(struct ttm_bo_device *bdev,
961 struct ttm_buffer_object *bo,
962 unsigned long size,
963 enum ttm_bo_type type,
964 uint32_t flags,
965 uint32_t page_alignment,
966 unsigned long buffer_start,
967 bool interruptible,
968 struct file *persistant_swap_storage,
969 size_t acc_size,
970 void (*destroy) (struct ttm_buffer_object *))
972 int ret = 0;
973 unsigned long num_pages;
975 size += buffer_start & ~PAGE_MASK;
976 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
977 if (num_pages == 0) {
978 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
979 return -EINVAL;
981 bo->destroy = destroy;
983 spin_lock_init(&bo->lock);
984 kref_init(&bo->kref);
985 kref_init(&bo->list_kref);
986 atomic_set(&bo->cpu_writers, 0);
987 atomic_set(&bo->reserved, 1);
988 init_waitqueue_head(&bo->event_queue);
989 INIT_LIST_HEAD(&bo->lru);
990 INIT_LIST_HEAD(&bo->ddestroy);
991 INIT_LIST_HEAD(&bo->swap);
992 bo->bdev = bdev;
993 bo->type = type;
994 bo->num_pages = num_pages;
995 bo->mem.mem_type = TTM_PL_SYSTEM;
996 bo->mem.num_pages = bo->num_pages;
997 bo->mem.mm_node = NULL;
998 bo->mem.page_alignment = page_alignment;
999 bo->buffer_start = buffer_start & PAGE_MASK;
1000 bo->priv_flags = 0;
1001 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1002 bo->seq_valid = false;
1003 bo->persistant_swap_storage = persistant_swap_storage;
1004 bo->acc_size = acc_size;
1006 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1007 if (unlikely(ret != 0))
1008 goto out_err;
1011 * If no caching attributes are set, accept any form of caching.
1014 if ((flags & TTM_PL_MASK_CACHING) == 0)
1015 flags |= TTM_PL_MASK_CACHING;
1018 * For ttm_bo_type_device buffers, allocate
1019 * address space from the device.
1022 if (bo->type == ttm_bo_type_device) {
1023 ret = ttm_bo_setup_vm(bo);
1024 if (ret)
1025 goto out_err;
1028 ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1029 if (ret)
1030 goto out_err;
1032 ttm_bo_unreserve(bo);
1033 return 0;
1035 out_err:
1036 ttm_bo_unreserve(bo);
1037 ttm_bo_unref(&bo);
1039 return ret;
1041 EXPORT_SYMBOL(ttm_buffer_object_init);
1043 static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
1044 unsigned long num_pages)
1046 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1047 PAGE_MASK;
1049 return bdev->ttm_bo_size + 2 * page_array_size;
1052 int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1053 unsigned long size,
1054 enum ttm_bo_type type,
1055 uint32_t flags,
1056 uint32_t page_alignment,
1057 unsigned long buffer_start,
1058 bool interruptible,
1059 struct file *persistant_swap_storage,
1060 struct ttm_buffer_object **p_bo)
1062 struct ttm_buffer_object *bo;
1063 int ret;
1064 struct ttm_mem_global *mem_glob = bdev->mem_glob;
1066 size_t acc_size =
1067 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1068 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
1069 if (unlikely(ret != 0))
1070 return ret;
1072 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1074 if (unlikely(bo == NULL)) {
1075 ttm_mem_global_free(mem_glob, acc_size, false);
1076 return -ENOMEM;
1079 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1080 page_alignment, buffer_start,
1081 interruptible,
1082 persistant_swap_storage, acc_size, NULL);
1083 if (likely(ret == 0))
1084 *p_bo = bo;
1086 return ret;
1089 static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1090 uint32_t mem_type, bool allow_errors)
1092 int ret;
1094 spin_lock(&bo->lock);
1095 ret = ttm_bo_wait(bo, false, false, false);
1096 spin_unlock(&bo->lock);
1098 if (ret && allow_errors)
1099 goto out;
1101 if (bo->mem.mem_type == mem_type)
1102 ret = ttm_bo_evict(bo, mem_type, false, false);
1104 if (ret) {
1105 if (allow_errors) {
1106 goto out;
1107 } else {
1108 ret = 0;
1109 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1113 out:
1114 return ret;
1117 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1118 struct list_head *head,
1119 unsigned mem_type, bool allow_errors)
1121 struct ttm_buffer_object *entry;
1122 int ret;
1123 int put_count;
1126 * Can't use standard list traversal since we're unlocking.
1129 spin_lock(&bdev->lru_lock);
1131 while (!list_empty(head)) {
1132 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1133 kref_get(&entry->list_kref);
1134 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1135 put_count = ttm_bo_del_from_lru(entry);
1136 spin_unlock(&bdev->lru_lock);
1137 while (put_count--)
1138 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1139 BUG_ON(ret);
1140 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1141 ttm_bo_unreserve(entry);
1142 kref_put(&entry->list_kref, ttm_bo_release_list);
1143 spin_lock(&bdev->lru_lock);
1146 spin_unlock(&bdev->lru_lock);
1148 return 0;
1151 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1153 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1154 int ret = -EINVAL;
1156 if (mem_type >= TTM_NUM_MEM_TYPES) {
1157 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1158 return ret;
1161 if (!man->has_type) {
1162 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1163 "memory manager type %u\n", mem_type);
1164 return ret;
1167 man->use_type = false;
1168 man->has_type = false;
1170 ret = 0;
1171 if (mem_type > 0) {
1172 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1174 spin_lock(&bdev->lru_lock);
1175 if (drm_mm_clean(&man->manager))
1176 drm_mm_takedown(&man->manager);
1177 else
1178 ret = -EBUSY;
1180 spin_unlock(&bdev->lru_lock);
1183 return ret;
1185 EXPORT_SYMBOL(ttm_bo_clean_mm);
1187 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1189 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1191 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1192 printk(KERN_ERR TTM_PFX
1193 "Illegal memory manager memory type %u.\n",
1194 mem_type);
1195 return -EINVAL;
1198 if (!man->has_type) {
1199 printk(KERN_ERR TTM_PFX
1200 "Memory type %u has not been initialized.\n",
1201 mem_type);
1202 return 0;
1205 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1207 EXPORT_SYMBOL(ttm_bo_evict_mm);
1209 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1210 unsigned long p_offset, unsigned long p_size)
1212 int ret = -EINVAL;
1213 struct ttm_mem_type_manager *man;
1215 if (type >= TTM_NUM_MEM_TYPES) {
1216 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1217 return ret;
1220 man = &bdev->man[type];
1221 if (man->has_type) {
1222 printk(KERN_ERR TTM_PFX
1223 "Memory manager already initialized for type %d\n",
1224 type);
1225 return ret;
1228 ret = bdev->driver->init_mem_type(bdev, type, man);
1229 if (ret)
1230 return ret;
1232 ret = 0;
1233 if (type != TTM_PL_SYSTEM) {
1234 if (!p_size) {
1235 printk(KERN_ERR TTM_PFX
1236 "Zero size memory manager type %d\n",
1237 type);
1238 return ret;
1240 ret = drm_mm_init(&man->manager, p_offset, p_size);
1241 if (ret)
1242 return ret;
1244 man->has_type = true;
1245 man->use_type = true;
1246 man->size = p_size;
1248 INIT_LIST_HEAD(&man->lru);
1250 return 0;
1252 EXPORT_SYMBOL(ttm_bo_init_mm);
1254 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1256 int ret = 0;
1257 unsigned i = TTM_NUM_MEM_TYPES;
1258 struct ttm_mem_type_manager *man;
1260 while (i--) {
1261 man = &bdev->man[i];
1262 if (man->has_type) {
1263 man->use_type = false;
1264 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1265 ret = -EBUSY;
1266 printk(KERN_ERR TTM_PFX
1267 "DRM memory manager type %d "
1268 "is not clean.\n", i);
1270 man->has_type = false;
1274 if (!cancel_delayed_work(&bdev->wq))
1275 flush_scheduled_work();
1277 while (ttm_bo_delayed_delete(bdev, true))
1280 spin_lock(&bdev->lru_lock);
1281 if (list_empty(&bdev->ddestroy))
1282 TTM_DEBUG("Delayed destroy list was clean\n");
1284 if (list_empty(&bdev->man[0].lru))
1285 TTM_DEBUG("Swap list was clean\n");
1286 spin_unlock(&bdev->lru_lock);
1288 ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1289 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1290 write_lock(&bdev->vm_lock);
1291 drm_mm_takedown(&bdev->addr_space_mm);
1292 write_unlock(&bdev->vm_lock);
1294 __free_page(bdev->dummy_read_page);
1295 return ret;
1297 EXPORT_SYMBOL(ttm_bo_device_release);
1300 * This function is intended to be called on drm driver load.
1301 * If you decide to call it from firstopen, you must protect the call
1302 * from a potentially racing ttm_bo_driver_finish in lastclose.
1303 * (This may happen on X server restart).
1306 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1307 struct ttm_mem_global *mem_glob,
1308 struct ttm_bo_driver *driver, uint64_t file_page_offset)
1310 int ret = -EINVAL;
1312 bdev->dummy_read_page = NULL;
1313 rwlock_init(&bdev->vm_lock);
1314 spin_lock_init(&bdev->lru_lock);
1316 bdev->driver = driver;
1317 bdev->mem_glob = mem_glob;
1319 memset(bdev->man, 0, sizeof(bdev->man));
1321 bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1322 if (unlikely(bdev->dummy_read_page == NULL)) {
1323 ret = -ENOMEM;
1324 goto out_err0;
1328 * Initialize the system memory buffer type.
1329 * Other types need to be driver / IOCTL initialized.
1331 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1332 if (unlikely(ret != 0))
1333 goto out_err1;
1335 bdev->addr_space_rb = RB_ROOT;
1336 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1337 if (unlikely(ret != 0))
1338 goto out_err2;
1340 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1341 bdev->nice_mode = true;
1342 INIT_LIST_HEAD(&bdev->ddestroy);
1343 INIT_LIST_HEAD(&bdev->swap_lru);
1344 bdev->dev_mapping = NULL;
1345 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1346 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1347 if (unlikely(ret != 0)) {
1348 printk(KERN_ERR TTM_PFX
1349 "Could not register buffer object swapout.\n");
1350 goto out_err2;
1353 bdev->ttm_bo_extra_size =
1354 ttm_round_pot(sizeof(struct ttm_tt)) +
1355 ttm_round_pot(sizeof(struct ttm_backend));
1357 bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1358 ttm_round_pot(sizeof(struct ttm_buffer_object));
1360 return 0;
1361 out_err2:
1362 ttm_bo_clean_mm(bdev, 0);
1363 out_err1:
1364 __free_page(bdev->dummy_read_page);
1365 out_err0:
1366 return ret;
1368 EXPORT_SYMBOL(ttm_bo_device_init);
1371 * buffer object vm functions.
1374 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1376 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1378 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1379 if (mem->mem_type == TTM_PL_SYSTEM)
1380 return false;
1382 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1383 return false;
1385 if (mem->placement & TTM_PL_FLAG_CACHED)
1386 return false;
1388 return true;
1391 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1392 struct ttm_mem_reg *mem,
1393 unsigned long *bus_base,
1394 unsigned long *bus_offset, unsigned long *bus_size)
1396 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1398 *bus_size = 0;
1399 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1400 return -EINVAL;
1402 if (ttm_mem_reg_is_pci(bdev, mem)) {
1403 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1404 *bus_size = mem->num_pages << PAGE_SHIFT;
1405 *bus_base = man->io_offset;
1408 return 0;
1411 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1413 struct ttm_bo_device *bdev = bo->bdev;
1414 loff_t offset = (loff_t) bo->addr_space_offset;
1415 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1417 if (!bdev->dev_mapping)
1418 return;
1420 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1423 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1425 struct ttm_bo_device *bdev = bo->bdev;
1426 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1427 struct rb_node *parent = NULL;
1428 struct ttm_buffer_object *cur_bo;
1429 unsigned long offset = bo->vm_node->start;
1430 unsigned long cur_offset;
1432 while (*cur) {
1433 parent = *cur;
1434 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1435 cur_offset = cur_bo->vm_node->start;
1436 if (offset < cur_offset)
1437 cur = &parent->rb_left;
1438 else if (offset > cur_offset)
1439 cur = &parent->rb_right;
1440 else
1441 BUG();
1444 rb_link_node(&bo->vm_rb, parent, cur);
1445 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1449 * ttm_bo_setup_vm:
1451 * @bo: the buffer to allocate address space for
1453 * Allocate address space in the drm device so that applications
1454 * can mmap the buffer and access the contents. This only
1455 * applies to ttm_bo_type_device objects as others are not
1456 * placed in the drm device address space.
1459 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1461 struct ttm_bo_device *bdev = bo->bdev;
1462 int ret;
1464 retry_pre_get:
1465 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1466 if (unlikely(ret != 0))
1467 return ret;
1469 write_lock(&bdev->vm_lock);
1470 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1471 bo->mem.num_pages, 0, 0);
1473 if (unlikely(bo->vm_node == NULL)) {
1474 ret = -ENOMEM;
1475 goto out_unlock;
1478 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1479 bo->mem.num_pages, 0);
1481 if (unlikely(bo->vm_node == NULL)) {
1482 write_unlock(&bdev->vm_lock);
1483 goto retry_pre_get;
1486 ttm_bo_vm_insert_rb(bo);
1487 write_unlock(&bdev->vm_lock);
1488 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1490 return 0;
1491 out_unlock:
1492 write_unlock(&bdev->vm_lock);
1493 return ret;
1496 int ttm_bo_wait(struct ttm_buffer_object *bo,
1497 bool lazy, bool interruptible, bool no_wait)
1499 struct ttm_bo_driver *driver = bo->bdev->driver;
1500 void *sync_obj;
1501 void *sync_obj_arg;
1502 int ret = 0;
1504 if (likely(bo->sync_obj == NULL))
1505 return 0;
1507 while (bo->sync_obj) {
1509 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1510 void *tmp_obj = bo->sync_obj;
1511 bo->sync_obj = NULL;
1512 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1513 spin_unlock(&bo->lock);
1514 driver->sync_obj_unref(&tmp_obj);
1515 spin_lock(&bo->lock);
1516 continue;
1519 if (no_wait)
1520 return -EBUSY;
1522 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1523 sync_obj_arg = bo->sync_obj_arg;
1524 spin_unlock(&bo->lock);
1525 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1526 lazy, interruptible);
1527 if (unlikely(ret != 0)) {
1528 driver->sync_obj_unref(&sync_obj);
1529 spin_lock(&bo->lock);
1530 return ret;
1532 spin_lock(&bo->lock);
1533 if (likely(bo->sync_obj == sync_obj &&
1534 bo->sync_obj_arg == sync_obj_arg)) {
1535 void *tmp_obj = bo->sync_obj;
1536 bo->sync_obj = NULL;
1537 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1538 &bo->priv_flags);
1539 spin_unlock(&bo->lock);
1540 driver->sync_obj_unref(&sync_obj);
1541 driver->sync_obj_unref(&tmp_obj);
1542 spin_lock(&bo->lock);
1545 return 0;
1547 EXPORT_SYMBOL(ttm_bo_wait);
1549 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1551 atomic_set(&bo->reserved, 0);
1552 wake_up_all(&bo->event_queue);
1555 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1556 bool no_wait)
1558 int ret;
1560 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1561 if (no_wait)
1562 return -EBUSY;
1563 else if (interruptible) {
1564 ret = wait_event_interruptible
1565 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1566 if (unlikely(ret != 0))
1567 return -ERESTART;
1568 } else {
1569 wait_event(bo->event_queue,
1570 atomic_read(&bo->reserved) == 0);
1573 return 0;
1576 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1578 int ret = 0;
1581 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1582 * makes sure the lru lists are updated.
1585 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1586 if (unlikely(ret != 0))
1587 return ret;
1588 spin_lock(&bo->lock);
1589 ret = ttm_bo_wait(bo, false, true, no_wait);
1590 spin_unlock(&bo->lock);
1591 if (likely(ret == 0))
1592 atomic_inc(&bo->cpu_writers);
1593 ttm_bo_unreserve(bo);
1594 return ret;
1597 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1599 if (atomic_dec_and_test(&bo->cpu_writers))
1600 wake_up_all(&bo->event_queue);
1604 * A buffer object shrink method that tries to swap out the first
1605 * buffer object on the bo_global::swap_lru list.
1608 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1610 struct ttm_bo_device *bdev =
1611 container_of(shrink, struct ttm_bo_device, shrink);
1612 struct ttm_buffer_object *bo;
1613 int ret = -EBUSY;
1614 int put_count;
1615 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1617 spin_lock(&bdev->lru_lock);
1618 while (ret == -EBUSY) {
1619 if (unlikely(list_empty(&bdev->swap_lru))) {
1620 spin_unlock(&bdev->lru_lock);
1621 return -EBUSY;
1624 bo = list_first_entry(&bdev->swap_lru,
1625 struct ttm_buffer_object, swap);
1626 kref_get(&bo->list_kref);
1629 * Reserve buffer. Since we unlock while sleeping, we need
1630 * to re-check that nobody removed us from the swap-list while
1631 * we slept.
1634 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1635 if (unlikely(ret == -EBUSY)) {
1636 spin_unlock(&bdev->lru_lock);
1637 ttm_bo_wait_unreserved(bo, false);
1638 kref_put(&bo->list_kref, ttm_bo_release_list);
1639 spin_lock(&bdev->lru_lock);
1643 BUG_ON(ret != 0);
1644 put_count = ttm_bo_del_from_lru(bo);
1645 spin_unlock(&bdev->lru_lock);
1647 while (put_count--)
1648 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1651 * Wait for GPU, then move to system cached.
1654 spin_lock(&bo->lock);
1655 ret = ttm_bo_wait(bo, false, false, false);
1656 spin_unlock(&bo->lock);
1658 if (unlikely(ret != 0))
1659 goto out;
1661 if ((bo->mem.placement & swap_placement) != swap_placement) {
1662 struct ttm_mem_reg evict_mem;
1664 evict_mem = bo->mem;
1665 evict_mem.mm_node = NULL;
1666 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1667 evict_mem.mem_type = TTM_PL_SYSTEM;
1669 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1670 false, false);
1671 if (unlikely(ret != 0))
1672 goto out;
1675 ttm_bo_unmap_virtual(bo);
1678 * Swap out. Buffer will be swapped in again as soon as
1679 * anyone tries to access a ttm page.
1682 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1683 out:
1687 * Unreserve without putting on LRU to avoid swapping out an
1688 * already swapped buffer.
1691 atomic_set(&bo->reserved, 0);
1692 wake_up_all(&bo->event_queue);
1693 kref_put(&bo->list_kref, ttm_bo_release_list);
1694 return ret;
1697 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1699 while (ttm_bo_swapout(&bdev->shrink) == 0)