1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "ttm/ttm_execbuf_util.h"
29 #include "ttm/ttm_bo_driver.h"
30 #include "ttm/ttm_placement.h"
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
35 static void ttm_eu_backoff_reservation_locked(struct list_head
*list
)
37 struct ttm_validate_buffer
*entry
;
39 list_for_each_entry(entry
, list
, head
) {
40 struct ttm_buffer_object
*bo
= entry
->bo
;
45 ttm_bo_add_to_lru(bo
);
46 entry
->removed
= false;
49 entry
->reserved
= false;
50 atomic_set(&bo
->reserved
, 0);
51 wake_up_all(&bo
->event_queue
);
55 static void ttm_eu_del_from_lru_locked(struct list_head
*list
)
57 struct ttm_validate_buffer
*entry
;
59 list_for_each_entry(entry
, list
, head
) {
60 struct ttm_buffer_object
*bo
= entry
->bo
;
64 if (!entry
->removed
) {
65 entry
->put_count
= ttm_bo_del_from_lru(bo
);
66 entry
->removed
= true;
71 static void ttm_eu_list_ref_sub(struct list_head
*list
)
73 struct ttm_validate_buffer
*entry
;
75 list_for_each_entry(entry
, list
, head
) {
76 struct ttm_buffer_object
*bo
= entry
->bo
;
78 if (entry
->put_count
) {
79 ttm_bo_list_ref_sub(bo
, entry
->put_count
, true);
85 static int ttm_eu_wait_unreserved_locked(struct list_head
*list
,
86 struct ttm_buffer_object
*bo
)
88 struct ttm_bo_global
*glob
= bo
->glob
;
91 ttm_eu_del_from_lru_locked(list
);
92 spin_unlock(&glob
->lru_lock
);
93 ret
= ttm_bo_wait_unreserved(bo
, true);
94 spin_lock(&glob
->lru_lock
);
95 if (unlikely(ret
!= 0))
96 ttm_eu_backoff_reservation_locked(list
);
101 void ttm_eu_backoff_reservation(struct list_head
*list
)
103 struct ttm_validate_buffer
*entry
;
105 list_for_each_entry(entry
, list
, head
) {
106 struct ttm_buffer_object
*bo
= entry
->bo
;
107 if (!entry
->reserved
)
110 entry
->reserved
= false;
111 ttm_bo_unreserve(bo
);
114 EXPORT_SYMBOL(ttm_eu_backoff_reservation
);
117 * Reserve buffers for validation.
119 * If a buffer in the list is marked for CPU access, we back off and
120 * wait for that buffer to become free for GPU access.
122 * If a buffer is reserved for another validation, the validator with
123 * the highest validation sequence backs off and waits for that buffer
124 * to become unreserved. This prevents deadlocks when validating multiple
125 * buffers in different orders.
128 int ttm_eu_reserve_buffers(struct list_head
*list
, uint32_t val_seq
)
130 struct ttm_bo_global
*glob
;
131 struct ttm_validate_buffer
*entry
;
134 if (list_empty(list
))
137 list_for_each_entry(entry
, list
, head
) {
138 entry
->reserved
= false;
139 entry
->put_count
= 0;
140 entry
->removed
= false;
143 entry
= list_first_entry(list
, struct ttm_validate_buffer
, head
);
144 glob
= entry
->bo
->glob
;
147 spin_lock(&glob
->lru_lock
);
148 list_for_each_entry(entry
, list
, head
) {
149 struct ttm_buffer_object
*bo
= entry
->bo
;
152 ret
= ttm_bo_reserve_locked(bo
, true, true, true, val_seq
);
157 ret
= ttm_eu_wait_unreserved_locked(list
, bo
);
158 if (unlikely(ret
!= 0)) {
159 spin_unlock(&glob
->lru_lock
);
160 ttm_eu_list_ref_sub(list
);
165 ttm_eu_backoff_reservation_locked(list
);
166 spin_unlock(&glob
->lru_lock
);
167 ttm_eu_list_ref_sub(list
);
168 ret
= ttm_bo_wait_unreserved(bo
, true);
169 if (unlikely(ret
!= 0))
173 ttm_eu_backoff_reservation_locked(list
);
174 spin_unlock(&glob
->lru_lock
);
175 ttm_eu_list_ref_sub(list
);
179 entry
->reserved
= true;
180 if (unlikely(atomic_read(&bo
->cpu_writers
) > 0)) {
181 ttm_eu_backoff_reservation_locked(list
);
182 spin_unlock(&glob
->lru_lock
);
183 ttm_eu_list_ref_sub(list
);
184 ret
= ttm_bo_wait_cpu(bo
, false);
191 ttm_eu_del_from_lru_locked(list
);
192 spin_unlock(&glob
->lru_lock
);
193 ttm_eu_list_ref_sub(list
);
197 EXPORT_SYMBOL(ttm_eu_reserve_buffers
);
199 void ttm_eu_fence_buffer_objects(struct list_head
*list
, void *sync_obj
)
201 struct ttm_validate_buffer
*entry
;
203 list_for_each_entry(entry
, list
, head
) {
204 struct ttm_buffer_object
*bo
= entry
->bo
;
205 struct ttm_bo_driver
*driver
= bo
->bdev
->driver
;
208 spin_lock(&bo
->lock
);
209 old_sync_obj
= bo
->sync_obj
;
210 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
211 bo
->sync_obj_arg
= entry
->new_sync_obj_arg
;
212 spin_unlock(&bo
->lock
);
213 ttm_bo_unreserve(bo
);
214 entry
->reserved
= false;
216 driver
->sync_obj_unref(&old_sync_obj
);
219 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects
);